diff --git a/src/third_party/stm32_cubeide/.cproject b/src/third_party/stm32_cubeide/.cproject new file mode 100644 index 000000000..ce89776f4 --- /dev/null +++ b/src/third_party/stm32_cubeide/.cproject @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third_party/stm32_cubeide/.mxproject b/src/third_party/stm32_cubeide/.mxproject new file mode 100644 index 000000000..eb55c281f --- /dev/null +++ b/src/third_party/stm32_cubeide/.mxproject @@ -0,0 +1,103 @@ +[PreviousGenFiles] +AdvancedFolderStructure=true +HeaderFileListSize=37 +HeaderFiles#0=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/gpio.h +HeaderFiles#1=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/adc.h +HeaderFiles#2=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/crc.h +HeaderFiles#3=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/dac.h +HeaderFiles#4=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/dma.h +HeaderFiles#5=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/dma2d.h +HeaderFiles#6=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target/ffconf.h +HeaderFiles#7=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.h +HeaderFiles#8=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target/sd_diskio.h +HeaderFiles#9=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/App/fatfs.h +HeaderFiles#10=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/fmc.h +HeaderFiles#11=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/FreeRTOSConfig.h +HeaderFiles#12=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/i2c.h +HeaderFiles#13=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/iwdg.h +HeaderFiles#14=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/jpeg.h +HeaderFiles#15=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/jpeg_utils_conf.h +HeaderFiles#16=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.h +HeaderFiles#17=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/Target/jconfig.h +HeaderFiles#18=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/Target/jmorecfg.h +HeaderFiles#19=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/Target/jdata_conf.h +HeaderFiles#20=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/ltdc.h +HeaderFiles#21=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/App/lwip.h +HeaderFiles#22=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/Target/lwipopts.h +HeaderFiles#23=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.h +HeaderFiles#24=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/rng.h +HeaderFiles#25=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/rtc.h +HeaderFiles#26=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/sdmmc.h +HeaderFiles#27=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/spi.h +HeaderFiles#28=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/tim.h +HeaderFiles#29=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/usart.h +HeaderFiles#30=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.h +HeaderFiles#31=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.h +HeaderFiles#32=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.h +HeaderFiles#33=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.h +HeaderFiles#34=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_it.h +HeaderFiles#35=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_hal_conf.h +HeaderFiles#36=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc/main.h +HeaderFolderListSize=9 +HeaderPath#0=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Inc +HeaderPath#1=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target +HeaderPath#2=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/App +HeaderPath#3=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/App +HeaderPath#4=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/Target +HeaderPath#5=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/App +HeaderPath#6=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/Target +HeaderPath#7=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App +HeaderPath#8=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/Target +HeaderFiles=; +SourceFileListSize=32 +SourceFiles#0=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/gpio.c +SourceFiles#1=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/adc.c +SourceFiles#2=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/crc.c +SourceFiles#3=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/dac.c +SourceFiles#4=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/dma.c +SourceFiles#5=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/dma2d.c +SourceFiles#6=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.c +SourceFiles#7=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target/sd_diskio.c +SourceFiles#8=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/App/fatfs.c +SourceFiles#9=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/fmc.c +SourceFiles#10=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/freertos.c +SourceFiles#11=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/i2c.c +SourceFiles#12=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/iwdg.c +SourceFiles#13=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/jpeg.c +SourceFiles#14=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.c +SourceFiles#15=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/ltdc.c +SourceFiles#16=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/App/lwip.c +SourceFiles#17=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.c +SourceFiles#18=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/rng.c +SourceFiles#19=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/rtc.c +SourceFiles#20=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/sdmmc.c +SourceFiles#21=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/spi.c +SourceFiles#22=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/tim.c +SourceFiles#23=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/usart.c +SourceFiles#24=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.c +SourceFiles#25=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.c +SourceFiles#26=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.c +SourceFiles#27=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.c +SourceFiles#28=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_it.c +SourceFiles#29=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_msp.c +SourceFiles#30=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_timebase_tim.c +SourceFiles#31=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src/main.c +SourceFolderListSize=8 +SourcePath#0=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/Core/Src +SourcePath#1=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/Target +SourcePath#2=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/FATFS/App +SourcePath#3=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LIBJPEG/App +SourcePath#4=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/App +SourcePath#5=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/LWIP/Target +SourcePath#6=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/App +SourcePath#7=C:/work/eez/modular-psu-firmware/src/third_party/stm32_cubeide/USB_DEVICE/Target +SourceFiles=; + +[PreviousLibFiles] +LibFiles=Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h;Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma2d.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_fmc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sdram.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_iwdg.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_jpeg.h;Utilities/JPEG/jpeg_utils.h;Utilities/JPEG/jpeg_utils_conf_template.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dsi.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rng.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_sdmmc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h;Middlewares/Third_Party/FatFs/src/diskio.h;Middlewares/Third_Party/FatFs/src/ff.h;Middlewares/Third_Party/FatFs/src/ff_gen_drv.h;Middlewares/Third_Party/FatFs/src/integer.h;Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h;Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h;Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h;Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h;Middlewares/Third_Party/FreeRTOS/Source/include/list.h;Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h;Middlewares/Third_Party/FreeRTOS/Source/include/portable.h;Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h;Middlewares/Third_Party/FreeRTOS/Source/include/queue.h;Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h;Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h;Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h;Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/task.h;Middlewares/Third_Party/FreeRTOS/Source/include/timers.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.h;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h;Middlewares/Third_Party/LibJPEG/include/jconfig_template.h;Middlewares/Third_Party/LibJPEG/include/jdct.h;Middlewares/Third_Party/LibJPEG/include/jerror.h;Middlewares/Third_Party/LibJPEG/include/jinclude.h;Middlewares/Third_Party/LibJPEG/include/jmemsys.h;Middlewares/Third_Party/LibJPEG/include/jmorecfg_template.h;Middlewares/Third_Party/LibJPEG/include/jpegint.h;Middlewares/Third_Party/LibJPEG/include/jpeglib.h;Middlewares/Third_Party/LibJPEG/include/jversion.h;Middlewares/Third_Party/LibJPEG/include/jdata_conf_template.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c;Utilities/JPEG/jpeg_utils.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Middlewares/Third_Party/FatFs/src/diskio.c;Middlewares/Third_Party/FatFs/src/ff.c;Middlewares/Third_Party/FatFs/src/ff_gen_drv.c;Middlewares/Third_Party/FatFs/src/option/syscall.c;Middlewares/Third_Party/FatFs/src/option/ccsbcs.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LibJPEG/source/jcapimin.c;Middlewares/Third_Party/LibJPEG/source/jcapistd.c;Middlewares/Third_Party/LibJPEG/source/jdapimin.c;Middlewares/Third_Party/LibJPEG/source/jdapistd.c;Middlewares/Third_Party/LibJPEG/source/jcomapi.c;Middlewares/Third_Party/LibJPEG/source/jcparam.c;Middlewares/Third_Party/LibJPEG/source/jctrans.c;Middlewares/Third_Party/LibJPEG/source/jdtrans.c;Middlewares/Third_Party/LibJPEG/source/jcinit.c;Middlewares/Third_Party/LibJPEG/source/jcmaster.c;Middlewares/Third_Party/LibJPEG/source/jcmainct.c;Middlewares/Third_Party/LibJPEG/source/jcprepct.c;Middlewares/Third_Party/LibJPEG/source/jccoefct.c;Middlewares/Third_Party/LibJPEG/source/jccolor.c;Middlewares/Third_Party/LibJPEG/source/jcsample.c;Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c;Middlewares/Third_Party/LibJPEG/source/jfdctint.c;Middlewares/Third_Party/LibJPEG/source/jfdctfst.c;Middlewares/Third_Party/LibJPEG/source/jfdctflt.c;Middlewares/Third_Party/LibJPEG/source/jchuff.c;Middlewares/Third_Party/LibJPEG/source/jcarith.c;Middlewares/Third_Party/LibJPEG/source/jcmarker.c;Middlewares/Third_Party/LibJPEG/source/jdatadst.c;Middlewares/Third_Party/LibJPEG/source/jdmaster.c;Middlewares/Third_Party/LibJPEG/source/jdinput.c;Middlewares/Third_Party/LibJPEG/source/jdmainct.c;Middlewares/Third_Party/LibJPEG/source/jdcoefct.c;Middlewares/Third_Party/LibJPEG/source/jdpostct.c;Middlewares/Third_Party/LibJPEG/source/jdmarker.c;Middlewares/Third_Party/LibJPEG/source/jdhuff.c;Middlewares/Third_Party/LibJPEG/source/jdarith.c;Middlewares/Third_Party/LibJPEG/source/jddctmgr.c;Middlewares/Third_Party/LibJPEG/source/jidctint.c;Middlewares/Third_Party/LibJPEG/source/jidctfst.c;Middlewares/Third_Party/LibJPEG/source/jidctflt.c;Middlewares/Third_Party/LibJPEG/source/jdsample.c;Middlewares/Third_Party/LibJPEG/source/jdcolor.c;Middlewares/Third_Party/LibJPEG/source/jdmerge.c;Middlewares/Third_Party/LibJPEG/source/jquant1.c;Middlewares/Third_Party/LibJPEG/source/jquant2.c;Middlewares/Third_Party/LibJPEG/source/jdatasrc.c;Middlewares/Third_Party/LibJPEG/source/jaricom.c;Middlewares/Third_Party/LibJPEG/source/jerror.c;Middlewares/Third_Party/LibJPEG/source/jmemmgr.c;Middlewares/Third_Party/LibJPEG/source/jutils.c;Middlewares/Third_Party/LibJPEG/source/jmemnobs.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h;Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma2d.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_fmc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sdram.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_iwdg.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_jpeg.h;Utilities/JPEG/jpeg_utils.h;Utilities/JPEG/jpeg_utils_conf_template.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dsi.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rng.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_sdmmc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h;Middlewares/Third_Party/FatFs/src/diskio.h;Middlewares/Third_Party/FatFs/src/ff.h;Middlewares/Third_Party/FatFs/src/ff_gen_drv.h;Middlewares/Third_Party/FatFs/src/integer.h;Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h;Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h;Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h;Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h;Middlewares/Third_Party/FreeRTOS/Source/include/list.h;Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h;Middlewares/Third_Party/FreeRTOS/Source/include/portable.h;Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h;Middlewares/Third_Party/FreeRTOS/Source/include/queue.h;Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h;Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h;Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h;Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/task.h;Middlewares/Third_Party/FreeRTOS/Source/include/timers.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.h;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h;Middlewares/Third_Party/LibJPEG/include/jconfig_template.h;Middlewares/Third_Party/LibJPEG/include/jdct.h;Middlewares/Third_Party/LibJPEG/include/jerror.h;Middlewares/Third_Party/LibJPEG/include/jinclude.h;Middlewares/Third_Party/LibJPEG/include/jmemsys.h;Middlewares/Third_Party/LibJPEG/include/jmorecfg_template.h;Middlewares/Third_Party/LibJPEG/include/jpegint.h;Middlewares/Third_Party/LibJPEG/include/jpeglib.h;Middlewares/Third_Party/LibJPEG/include/jversion.h;Middlewares/Third_Party/LibJPEG/include/jdata_conf_template.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h;Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f769xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f7xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/system_stm32f7xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Source/Templates/system_stm32f7xx.c;Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h;Middlewares/Third_Party/LwIP/src/include/lwip/api.h;Middlewares/Third_Party/LwIP/src/include/lwip/arch.h;Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/debug.h;Middlewares/Third_Party/LwIP/src/include/lwip/def.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/err.h;Middlewares/Third_Party/LwIP/src/include/lwip/errno.h;Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h;Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h;Middlewares/Third_Party/LwIP/src/include/lwip/init.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/mem.h;Middlewares/Third_Party/LwIP/src/include/lwip/memp.h;Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h;Middlewares/Third_Party/LwIP/src/include/lwip/netif.h;Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h;Middlewares/Third_Party/LwIP/src/include/lwip/opt.h;Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/raw.h;Middlewares/Third_Party/LwIP/src/include/lwip/sio.h;Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h;Middlewares/Third_Party/LwIP/src/include/lwip/stats.h;Middlewares/Third_Party/LwIP/src/include/lwip/sys.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h;Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h;Middlewares/Third_Party/LwIP/src/include/lwip/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/etharp.h;Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h;Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/slipif.h;Middlewares/Third_Party/LwIP/src/include/netif/zepif.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h;Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h;Middlewares/Third_Party/LwIP/system/arch/bpstruct.h;Middlewares/Third_Party/LwIP/system/arch/cc.h;Middlewares/Third_Party/LwIP/system/arch/cpu.h;Middlewares/Third_Party/LwIP/system/arch/epstruct.h;Middlewares/Third_Party/LwIP/system/arch/init.h;Middlewares/Third_Party/LwIP/system/arch/lib.h;Middlewares/Third_Party/LwIP/system/arch/perf.h;Middlewares/Third_Party/LwIP/system/arch/sys_arch.h;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c;Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h;Middlewares/Third_Party/LwIP/src/include/lwip/api.h;Middlewares/Third_Party/LwIP/src/include/lwip/arch.h;Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/debug.h;Middlewares/Third_Party/LwIP/src/include/lwip/def.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/err.h;Middlewares/Third_Party/LwIP/src/include/lwip/errno.h;Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h;Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h;Middlewares/Third_Party/LwIP/src/include/lwip/init.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/mem.h;Middlewares/Third_Party/LwIP/src/include/lwip/memp.h;Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h;Middlewares/Third_Party/LwIP/src/include/lwip/netif.h;Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h;Middlewares/Third_Party/LwIP/src/include/lwip/opt.h;Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/raw.h;Middlewares/Third_Party/LwIP/src/include/lwip/sio.h;Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h;Middlewares/Third_Party/LwIP/src/include/lwip/stats.h;Middlewares/Third_Party/LwIP/src/include/lwip/sys.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h;Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h;Middlewares/Third_Party/LwIP/src/include/lwip/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/etharp.h;Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h;Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/slipif.h;Middlewares/Third_Party/LwIP/src/include/netif/zepif.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h;Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h;Middlewares/Third_Party/LwIP/system/arch/bpstruct.h;Middlewares/Third_Party/LwIP/system/arch/cc.h;Middlewares/Third_Party/LwIP/system/arch/cpu.h;Middlewares/Third_Party/LwIP/system/arch/epstruct.h;Middlewares/Third_Party/LwIP/system/arch/init.h;Middlewares/Third_Party/LwIP/system/arch/lib.h;Middlewares/Third_Party/LwIP/system/arch/perf.h;Middlewares/Third_Party/LwIP/system/arch/sys_arch.h;Drivers/CMSIS/Include/cmsis_armcc.h;Drivers/CMSIS/Include/cmsis_armclang.h;Drivers/CMSIS/Include/cmsis_compiler.h;Drivers/CMSIS/Include/cmsis_gcc.h;Drivers/CMSIS/Include/cmsis_iccarm.h;Drivers/CMSIS/Include/cmsis_version.h;Drivers/CMSIS/Include/core_armv8mbl.h;Drivers/CMSIS/Include/core_armv8mml.h;Drivers/CMSIS/Include/core_cm0.h;Drivers/CMSIS/Include/core_cm0plus.h;Drivers/CMSIS/Include/core_cm1.h;Drivers/CMSIS/Include/core_cm23.h;Drivers/CMSIS/Include/core_cm3.h;Drivers/CMSIS/Include/core_cm33.h;Drivers/CMSIS/Include/core_cm4.h;Drivers/CMSIS/Include/core_cm7.h;Drivers/CMSIS/Include/core_sc000.h;Drivers/CMSIS/Include/core_sc300.h;Drivers/CMSIS/Include/mpu_armv7.h;Drivers/CMSIS/Include/mpu_armv8.h;Drivers/CMSIS/Include/tz_context.h; + +[PreviousUsedCubeIDEFiles] +SourceFiles=Core\Src\main.c;Core\Src\gpio.c;Core\Src\adc.c;Core\Src\crc.c;Core\Src\dac.c;Core\Src\dma.c;Core\Src\dma2d.c;FATFS\Target\bsp_driver_sd.c;FATFS\Target\sd_diskio.c;FATFS\App\fatfs.c;Core\Src\fmc.c;Core\Src\freertos.c;Core\Src\i2c.c;Core\Src\iwdg.c;Core\Src\jpeg.c;LIBJPEG\App\libjpeg.c;Core\Src\ltdc.c;LWIP\App\lwip.c;LWIP\Target\ethernetif.c;Core\Src\rng.c;Core\Src\rtc.c;Core\Src\sdmmc.c;Core\Src\spi.c;Core\Src\tim.c;Core\Src\usart.c;USB_DEVICE\App\usb_device.c;USB_DEVICE\Target\usbd_conf.c;USB_DEVICE\App\usbd_desc.c;USB_DEVICE\App\usbd_cdc_if.c;Core\Src\stm32f7xx_it.c;Core\Src\stm32f7xx_hal_msp.c;Core\Src\stm32f7xx_hal_timebase_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c;Utilities/JPEG/jpeg_utils.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Middlewares/Third_Party/FatFs/src/diskio.c;Middlewares/Third_Party/FatFs/src/ff.c;Middlewares/Third_Party/FatFs/src/ff_gen_drv.c;Middlewares/Third_Party/FatFs/src/option/syscall.c;Middlewares/Third_Party/FatFs/src/option/ccsbcs.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LibJPEG/source/jcapimin.c;Middlewares/Third_Party/LibJPEG/source/jcapistd.c;Middlewares/Third_Party/LibJPEG/source/jdapimin.c;Middlewares/Third_Party/LibJPEG/source/jdapistd.c;Middlewares/Third_Party/LibJPEG/source/jcomapi.c;Middlewares/Third_Party/LibJPEG/source/jcparam.c;Middlewares/Third_Party/LibJPEG/source/jctrans.c;Middlewares/Third_Party/LibJPEG/source/jdtrans.c;Middlewares/Third_Party/LibJPEG/source/jcinit.c;Middlewares/Third_Party/LibJPEG/source/jcmaster.c;Middlewares/Third_Party/LibJPEG/source/jcmainct.c;Middlewares/Third_Party/LibJPEG/source/jcprepct.c;Middlewares/Third_Party/LibJPEG/source/jccoefct.c;Middlewares/Third_Party/LibJPEG/source/jccolor.c;Middlewares/Third_Party/LibJPEG/source/jcsample.c;Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c;Middlewares/Third_Party/LibJPEG/source/jfdctint.c;Middlewares/Third_Party/LibJPEG/source/jfdctfst.c;Middlewares/Third_Party/LibJPEG/source/jfdctflt.c;Middlewares/Third_Party/LibJPEG/source/jchuff.c;Middlewares/Third_Party/LibJPEG/source/jcarith.c;Middlewares/Third_Party/LibJPEG/source/jcmarker.c;Middlewares/Third_Party/LibJPEG/source/jdatadst.c;Middlewares/Third_Party/LibJPEG/source/jdmaster.c;Middlewares/Third_Party/LibJPEG/source/jdinput.c;Middlewares/Third_Party/LibJPEG/source/jdmainct.c;Middlewares/Third_Party/LibJPEG/source/jdcoefct.c;Middlewares/Third_Party/LibJPEG/source/jdpostct.c;Middlewares/Third_Party/LibJPEG/source/jdmarker.c;Middlewares/Third_Party/LibJPEG/source/jdhuff.c;Middlewares/Third_Party/LibJPEG/source/jdarith.c;Middlewares/Third_Party/LibJPEG/source/jddctmgr.c;Middlewares/Third_Party/LibJPEG/source/jidctint.c;Middlewares/Third_Party/LibJPEG/source/jidctfst.c;Middlewares/Third_Party/LibJPEG/source/jidctflt.c;Middlewares/Third_Party/LibJPEG/source/jdsample.c;Middlewares/Third_Party/LibJPEG/source/jdcolor.c;Middlewares/Third_Party/LibJPEG/source/jdmerge.c;Middlewares/Third_Party/LibJPEG/source/jquant1.c;Middlewares/Third_Party/LibJPEG/source/jquant2.c;Middlewares/Third_Party/LibJPEG/source/jdatasrc.c;Middlewares/Third_Party/LibJPEG/source/jaricom.c;Middlewares/Third_Party/LibJPEG/source/jerror.c;Middlewares/Third_Party/LibJPEG/source/jmemmgr.c;Middlewares/Third_Party/LibJPEG/source/jutils.c;Middlewares/Third_Party/LibJPEG/source/jmemnobs.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c;Core\Src/system_stm32f7xx.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c;Utilities/JPEG/jpeg_utils.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Middlewares/Third_Party/FatFs/src/diskio.c;Middlewares/Third_Party/FatFs/src/ff.c;Middlewares/Third_Party/FatFs/src/ff_gen_drv.c;Middlewares/Third_Party/FatFs/src/option/syscall.c;Middlewares/Third_Party/FatFs/src/option/ccsbcs.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LibJPEG/source/jcapimin.c;Middlewares/Third_Party/LibJPEG/source/jcapistd.c;Middlewares/Third_Party/LibJPEG/source/jdapimin.c;Middlewares/Third_Party/LibJPEG/source/jdapistd.c;Middlewares/Third_Party/LibJPEG/source/jcomapi.c;Middlewares/Third_Party/LibJPEG/source/jcparam.c;Middlewares/Third_Party/LibJPEG/source/jctrans.c;Middlewares/Third_Party/LibJPEG/source/jdtrans.c;Middlewares/Third_Party/LibJPEG/source/jcinit.c;Middlewares/Third_Party/LibJPEG/source/jcmaster.c;Middlewares/Third_Party/LibJPEG/source/jcmainct.c;Middlewares/Third_Party/LibJPEG/source/jcprepct.c;Middlewares/Third_Party/LibJPEG/source/jccoefct.c;Middlewares/Third_Party/LibJPEG/source/jccolor.c;Middlewares/Third_Party/LibJPEG/source/jcsample.c;Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c;Middlewares/Third_Party/LibJPEG/source/jfdctint.c;Middlewares/Third_Party/LibJPEG/source/jfdctfst.c;Middlewares/Third_Party/LibJPEG/source/jfdctflt.c;Middlewares/Third_Party/LibJPEG/source/jchuff.c;Middlewares/Third_Party/LibJPEG/source/jcarith.c;Middlewares/Third_Party/LibJPEG/source/jcmarker.c;Middlewares/Third_Party/LibJPEG/source/jdatadst.c;Middlewares/Third_Party/LibJPEG/source/jdmaster.c;Middlewares/Third_Party/LibJPEG/source/jdinput.c;Middlewares/Third_Party/LibJPEG/source/jdmainct.c;Middlewares/Third_Party/LibJPEG/source/jdcoefct.c;Middlewares/Third_Party/LibJPEG/source/jdpostct.c;Middlewares/Third_Party/LibJPEG/source/jdmarker.c;Middlewares/Third_Party/LibJPEG/source/jdhuff.c;Middlewares/Third_Party/LibJPEG/source/jdarith.c;Middlewares/Third_Party/LibJPEG/source/jddctmgr.c;Middlewares/Third_Party/LibJPEG/source/jidctint.c;Middlewares/Third_Party/LibJPEG/source/jidctfst.c;Middlewares/Third_Party/LibJPEG/source/jidctflt.c;Middlewares/Third_Party/LibJPEG/source/jdsample.c;Middlewares/Third_Party/LibJPEG/source/jdcolor.c;Middlewares/Third_Party/LibJPEG/source/jdmerge.c;Middlewares/Third_Party/LibJPEG/source/jquant1.c;Middlewares/Third_Party/LibJPEG/source/jquant2.c;Middlewares/Third_Party/LibJPEG/source/jdatasrc.c;Middlewares/Third_Party/LibJPEG/source/jaricom.c;Middlewares/Third_Party/LibJPEG/source/jerror.c;Middlewares/Third_Party/LibJPEG/source/jmemmgr.c;Middlewares/Third_Party/LibJPEG/source/jutils.c;Middlewares/Third_Party/LibJPEG/source/jmemnobs.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c;Core\Src/system_stm32f7xx.c;Drivers/CMSIS/Device/ST/STM32F7xx/Source/Templates/system_stm32f7xx.c;;Middlewares/Third_Party/FatFs/src/diskio.c;Middlewares/Third_Party/FatFs/src/ff.c;Middlewares/Third_Party/FatFs/src/ff_gen_drv.c;Middlewares/Third_Party/FatFs/src/option/syscall.c;Middlewares/Third_Party/FatFs/src/option/ccsbcs.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LibJPEG/source/jcapimin.c;Middlewares/Third_Party/LibJPEG/source/jcapistd.c;Middlewares/Third_Party/LibJPEG/source/jdapimin.c;Middlewares/Third_Party/LibJPEG/source/jdapistd.c;Middlewares/Third_Party/LibJPEG/source/jcomapi.c;Middlewares/Third_Party/LibJPEG/source/jcparam.c;Middlewares/Third_Party/LibJPEG/source/jctrans.c;Middlewares/Third_Party/LibJPEG/source/jdtrans.c;Middlewares/Third_Party/LibJPEG/source/jcinit.c;Middlewares/Third_Party/LibJPEG/source/jcmaster.c;Middlewares/Third_Party/LibJPEG/source/jcmainct.c;Middlewares/Third_Party/LibJPEG/source/jcprepct.c;Middlewares/Third_Party/LibJPEG/source/jccoefct.c;Middlewares/Third_Party/LibJPEG/source/jccolor.c;Middlewares/Third_Party/LibJPEG/source/jcsample.c;Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c;Middlewares/Third_Party/LibJPEG/source/jfdctint.c;Middlewares/Third_Party/LibJPEG/source/jfdctfst.c;Middlewares/Third_Party/LibJPEG/source/jfdctflt.c;Middlewares/Third_Party/LibJPEG/source/jchuff.c;Middlewares/Third_Party/LibJPEG/source/jcarith.c;Middlewares/Third_Party/LibJPEG/source/jcmarker.c;Middlewares/Third_Party/LibJPEG/source/jdatadst.c;Middlewares/Third_Party/LibJPEG/source/jdmaster.c;Middlewares/Third_Party/LibJPEG/source/jdinput.c;Middlewares/Third_Party/LibJPEG/source/jdmainct.c;Middlewares/Third_Party/LibJPEG/source/jdcoefct.c;Middlewares/Third_Party/LibJPEG/source/jdpostct.c;Middlewares/Third_Party/LibJPEG/source/jdmarker.c;Middlewares/Third_Party/LibJPEG/source/jdhuff.c;Middlewares/Third_Party/LibJPEG/source/jdarith.c;Middlewares/Third_Party/LibJPEG/source/jddctmgr.c;Middlewares/Third_Party/LibJPEG/source/jidctint.c;Middlewares/Third_Party/LibJPEG/source/jidctfst.c;Middlewares/Third_Party/LibJPEG/source/jidctflt.c;Middlewares/Third_Party/LibJPEG/source/jdsample.c;Middlewares/Third_Party/LibJPEG/source/jdcolor.c;Middlewares/Third_Party/LibJPEG/source/jdmerge.c;Middlewares/Third_Party/LibJPEG/source/jquant1.c;Middlewares/Third_Party/LibJPEG/source/jquant2.c;Middlewares/Third_Party/LibJPEG/source/jdatasrc.c;Middlewares/Third_Party/LibJPEG/source/jaricom.c;Middlewares/Third_Party/LibJPEG/source/jerror.c;Middlewares/Third_Party/LibJPEG/source/jmemmgr.c;Middlewares/Third_Party/LibJPEG/source/jutils.c;Middlewares/Third_Party/LibJPEG/source/jmemnobs.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c;Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c;Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c; +HeaderPath=Middlewares\Third_Party\LwIP\src\include;Middlewares\Third_Party\LwIP\system;Middlewares\Third_Party\LwIP\src\include;Middlewares\Third_Party\LwIP\system;Drivers\STM32F7xx_HAL_Driver\Inc;Drivers\STM32F7xx_HAL_Driver\Inc\Legacy;Utilities\JPEG;Middlewares\Third_Party\FatFs\src;Middlewares\Third_Party\FreeRTOS\Source\include;Middlewares\Third_Party\FreeRTOS\Source\CMSIS_RTOS;Middlewares\Third_Party\FreeRTOS\Source\portable\GCC\ARM_CM7\r0p1;Middlewares\Third_Party\LibJPEG\include;Middlewares\Third_Party\LwIP\src\include\netif\ppp;Middlewares\ST\STM32_USB_Device_Library\Core\Inc;Middlewares\ST\STM32_USB_Device_Library\Class\CDC\Inc;Drivers\CMSIS\Device\ST\STM32F7xx\Include;Middlewares\Third_Party\LwIP\src\include\lwip;Middlewares\Third_Party\LwIP\src\include\lwip\apps;Middlewares\Third_Party\LwIP\src\include\lwip\priv;Middlewares\Third_Party\LwIP\src\include\lwip\prot;Middlewares\Third_Party\LwIP\src\include\netif;Middlewares\Third_Party\LwIP\src\include\compat\posix;Middlewares\Third_Party\LwIP\src\include\compat\posix\arpa;Middlewares\Third_Party\LwIP\src\include\compat\posix\net;Middlewares\Third_Party\LwIP\src\include\compat\posix\sys;Middlewares\Third_Party\LwIP\src\include\compat\stdc;Middlewares\Third_Party\LwIP\system\arch;Drivers\CMSIS\Include;Core\Inc;FATFS\Target;FATFS\App;LIBJPEG\App;LIBJPEG\Target;LWIP\App;LWIP\Target;USB_DEVICE\App;USB_DEVICE\Target; +CDefines=USE_HAL_DRIVER;STM32F769xx;USE_HAL_DRIVER;USE_HAL_DRIVER; + diff --git a/src/third_party/stm32_cubeide/.project b/src/third_party/stm32_cubeide/.project new file mode 100644 index 000000000..21a6650d1 --- /dev/null +++ b/src/third_party/stm32_cubeide/.project @@ -0,0 +1,51 @@ + + + bb3 + + + + + + org.eclipse.cdt.managedbuilder.core.genmakebuilder + clean,full,incremental, + + + + + org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder + full,incremental, + + + + + + com.st.stm32cube.ide.mcu.MCUProjectNature + com.st.stm32cube.ide.mcu.MCUCubeProjectNature + org.eclipse.cdt.core.cnature + com.st.stm32cube.ide.mcu.MCUCubeIdeServicesRevAProjectNature + com.st.stm32cube.ide.mcu.MCUAdvancedStructureProjectNature + com.st.stm32cube.ide.mcu.MCUEndUserDisabledTrustZoneProjectNature + com.st.stm32cube.ide.mcu.MCUSingleCpuProjectNature + com.st.stm32cube.ide.mcu.MCURootProjectNature + org.eclipse.cdt.managedbuilder.core.managedBuildNature + org.eclipse.cdt.managedbuilder.core.ScannerConfigNature + org.eclipse.cdt.core.ccnature + + + + eez + 2 + PARENT-2-PROJECT_LOC/eez + + + libscpi + 2 + PARENT-1-PROJECT_LOC/libscpi + + + micropython + 2 + PARENT-1-PROJECT_LOC/micropython + + + diff --git a/src/third_party/stm32_cubeide/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs b/src/third_party/stm32_cubeide/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs new file mode 100644 index 000000000..7ba0b8d58 --- /dev/null +++ b/src/third_party/stm32_cubeide/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs @@ -0,0 +1,3 @@ +eclipse.preferences.version=1 +svd_custom_file_path= +svd_file_path=platform\:/plugin/com.st.stm32cube.ide.mcu.productdb.debug/resources/cmsis/STMicroelectronics_CMSIS_SVD/STM32F769.svd diff --git a/src/third_party/stm32_cubeide/.settings/language.settings.xml b/src/third_party/stm32_cubeide/.settings/language.settings.xml new file mode 100644 index 000000000..d8e6f0b91 --- /dev/null +++ b/src/third_party/stm32_cubeide/.settings/language.settings.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third_party/stm32_cubeide/Core/Inc/FreeRTOSConfig.h b/src/third_party/stm32_cubeide/Core/Inc/FreeRTOSConfig.h new file mode 100644 index 000000000..fa135f376 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/FreeRTOSConfig.h @@ -0,0 +1,139 @@ +/* USER CODE BEGIN Header */ +/* + * FreeRTOS Kernel V10.2.1 + * Portion Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Portion Copyright (C) 2019 StMicroelectronics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ +/* USER CODE END Header */ + +#ifndef FREERTOS_CONFIG_H +#define FREERTOS_CONFIG_H + +/*----------------------------------------------------------- + * Application specific definitions. + * + * These definitions should be adjusted for your particular hardware and + * application requirements. + * + * THESE PARAMETERS ARE DESCRIBED WITHIN THE 'CONFIGURATION' SECTION OF THE + * FreeRTOS API DOCUMENTATION AVAILABLE ON THE FreeRTOS.org WEB SITE. + * + * See http://www.freertos.org/a00110.html. + *----------------------------------------------------------*/ + +/* USER CODE BEGIN Includes */ +/* Section where include file can be added */ +/* USER CODE END Includes */ + +/* Ensure definitions are only used by the compiler, and not by the assembler. */ +#if defined(__ICCARM__) || defined(__CC_ARM) || defined(__GNUC__) + #include + extern uint32_t SystemCoreClock; +#endif +#define configENABLE_FPU 0 +#define configENABLE_MPU 0 + +#define configUSE_PREEMPTION 1 +#define configSUPPORT_STATIC_ALLOCATION 1 +#define configSUPPORT_DYNAMIC_ALLOCATION 1 +#define configUSE_IDLE_HOOK 0 +#define configUSE_TICK_HOOK 0 +#define configCPU_CLOCK_HZ ( SystemCoreClock ) +#define configTICK_RATE_HZ ((TickType_t)1000) +#define configMAX_PRIORITIES ( 7 ) +#define configMINIMAL_STACK_SIZE ((uint16_t)1024) +#define configTOTAL_HEAP_SIZE ((size_t)131072) +#define configMAX_TASK_NAME_LEN ( 16 ) +#define configUSE_16_BIT_TICKS 0 +#define configUSE_MUTEXES 1 +#define configQUEUE_REGISTRY_SIZE 8 +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +/* USER CODE BEGIN MESSAGE_BUFFER_LENGTH_TYPE */ +/* Defaults to size_t for backward compatibility, but can be changed + if lengths will always be less than the number of bytes in a size_t. */ +#define configMESSAGE_BUFFER_LENGTH_TYPE size_t +/* USER CODE END MESSAGE_BUFFER_LENGTH_TYPE */ + +/* Co-routine definitions. */ +#define configUSE_CO_ROUTINES 0 +#define configMAX_CO_ROUTINE_PRIORITIES ( 2 ) + +/* Set the following definitions to 1 to include the API function, or zero +to exclude the API function. */ +#define INCLUDE_vTaskPrioritySet 1 +#define INCLUDE_uxTaskPriorityGet 1 +#define INCLUDE_vTaskDelete 1 +#define INCLUDE_vTaskCleanUpResources 0 +#define INCLUDE_vTaskSuspend 1 +#define INCLUDE_vTaskDelayUntil 0 +#define INCLUDE_vTaskDelay 1 +#define INCLUDE_xTaskGetSchedulerState 1 + +/* Cortex-M specific definitions. */ +#ifdef __NVIC_PRIO_BITS + /* __BVIC_PRIO_BITS will be specified when CMSIS is being used. */ + #define configPRIO_BITS __NVIC_PRIO_BITS +#else + #define configPRIO_BITS 4 +#endif + +/* The lowest interrupt priority that can be used in a call to a "set priority" +function. */ +#define configLIBRARY_LOWEST_INTERRUPT_PRIORITY 15 + +/* The highest interrupt priority that can be used by any interrupt service +routine that makes calls to interrupt safe FreeRTOS API functions. DO NOT CALL +INTERRUPT SAFE FREERTOS API FUNCTIONS FROM ANY INTERRUPT THAT HAS A HIGHER +PRIORITY THAN THIS! (higher priorities are lower numeric values. */ +#define configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY 5 + +/* Interrupt priorities used by the kernel port layer itself. These are generic +to all Cortex-M ports, and do not rely on any particular library functions. */ +#define configKERNEL_INTERRUPT_PRIORITY ( configLIBRARY_LOWEST_INTERRUPT_PRIORITY << (8 - configPRIO_BITS) ) +/* !!!! configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to zero !!!! +See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. */ +#define configMAX_SYSCALL_INTERRUPT_PRIORITY ( configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << (8 - configPRIO_BITS) ) + +/* Normal assert() semantics without relying on the provision of an assert.h +header file. */ +/* USER CODE BEGIN 1 */ +#define configASSERT( x ) if ((x) == 0) {taskDISABLE_INTERRUPTS(); for( ;; );} +/* USER CODE END 1 */ + +/* Definitions that map the FreeRTOS port interrupt handlers to their CMSIS +standard names. */ +#define vPortSVCHandler SVC_Handler +#define xPortPendSVHandler PendSV_Handler + +/* IMPORTANT: This define is commented when used with STM32Cube firmware, when the timebase source is SysTick, + to prevent overwriting SysTick_Handler defined within STM32Cube HAL */ + +#define xPortSysTickHandler SysTick_Handler + +/* USER CODE BEGIN Defines */ +#define configUSE_NEWLIB_REENTRANT 1 +/* USER CODE END Defines */ + +#endif /* FREERTOS_CONFIG_H */ diff --git a/src/third_party/stm32_cubeide/Core/Inc/adc.h b/src/third_party/stm32_cubeide/Core/Inc/adc.h new file mode 100644 index 000000000..f7a4ebdd0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/adc.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : ADC.h + * Description : This file provides code for the configuration + * of the ADC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __adc_H +#define __adc_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern ADC_HandleTypeDef hadc1; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_ADC1_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ adc_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/crc.h b/src/third_party/stm32_cubeide/Core/Inc/crc.h new file mode 100644 index 000000000..a5b0a7a58 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/crc.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : CRC.h + * Description : This file provides code for the configuration + * of the CRC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __crc_H +#define __crc_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern CRC_HandleTypeDef hcrc; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_CRC_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ crc_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/dac.h b/src/third_party/stm32_cubeide/Core/Inc/dac.h new file mode 100644 index 000000000..cda5388b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/dac.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : DAC.h + * Description : This file provides code for the configuration + * of the DAC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __dac_H +#define __dac_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern DAC_HandleTypeDef hdac; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_DAC_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ dac_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/dma.h b/src/third_party/stm32_cubeide/Core/Inc/dma.h new file mode 100644 index 000000000..a1219cd9c --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/dma.h @@ -0,0 +1,56 @@ +/** + ****************************************************************************** + * File Name : dma.h + * Description : This file contains all the function prototypes for + * the dma.c file + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __dma_H +#define __dma_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* DMA memory to memory transfer handles -------------------------------------*/ + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_DMA_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif + +#endif /* __dma_H */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/dma2d.h b/src/third_party/stm32_cubeide/Core/Inc/dma2d.h new file mode 100644 index 000000000..23cbf08ee --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/dma2d.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : DMA2D.h + * Description : This file provides code for the configuration + * of the DMA2D instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __dma2d_H +#define __dma2d_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern DMA2D_HandleTypeDef hdma2d; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_DMA2D_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ dma2d_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/fmc.h b/src/third_party/stm32_cubeide/Core/Inc/fmc.h new file mode 100644 index 000000000..e407568cf --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/fmc.h @@ -0,0 +1,60 @@ +/** + ****************************************************************************** + * File Name : FMC.h + * Description : This file provides code for the configuration + * of the FMC peripheral. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __FMC_H +#define __FMC_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern SDRAM_HandleTypeDef hsdram1; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_FMC_Init(void); +void HAL_SDRAM_MspInit(SDRAM_HandleTypeDef* hsdram); +void HAL_SDRAM_MspDeInit(SDRAM_HandleTypeDef* hsdram); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__FMC_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/gpio.h b/src/third_party/stm32_cubeide/Core/Inc/gpio.h new file mode 100644 index 000000000..95c08b22a --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/gpio.h @@ -0,0 +1,57 @@ +/** + ****************************************************************************** + * File Name : gpio.h + * Description : This file contains all the functions prototypes for + * the gpio + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __gpio_H +#define __gpio_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_GPIO_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ pinoutConfig_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/i2c.h b/src/third_party/stm32_cubeide/Core/Inc/i2c.h new file mode 100644 index 000000000..d1dcf7cb8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/i2c.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : I2C.h + * Description : This file provides code for the configuration + * of the I2C instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __i2c_H +#define __i2c_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern I2C_HandleTypeDef hi2c1; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_I2C1_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ i2c_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/iwdg.h b/src/third_party/stm32_cubeide/Core/Inc/iwdg.h new file mode 100644 index 000000000..e46768397 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/iwdg.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : IWDG.h + * Description : This file provides code for the configuration + * of the IWDG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __iwdg_H +#define __iwdg_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern IWDG_HandleTypeDef hiwdg; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_IWDG_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ iwdg_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/jpeg.h b/src/third_party/stm32_cubeide/Core/Inc/jpeg.h new file mode 100644 index 000000000..5a8d7e8e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/jpeg.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : JPEG.h + * Description : This file provides code for the configuration + * of the JPEG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __jpeg_H +#define __jpeg_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern JPEG_HandleTypeDef hjpeg; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_JPEG_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ jpeg_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/jpeg_utils_conf.h b/src/third_party/stm32_cubeide/Core/Inc/jpeg_utils_conf.h new file mode 100644 index 000000000..334fb87a2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/jpeg_utils_conf.h @@ -0,0 +1,73 @@ +/* + * jpeg_utils_conf.h + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains additional configuration options that customize the + * JPEG HW configuration. Most users will not need to touch this file. + */ + + /* Define to prevent recursive inclusion -------------------------------------*/ + +#ifndef __JPEG_UTILS_CONF_H__ +#define __JPEG_UTILS_CONF_H__ + +/* Includes ------------------------------------------------------------------*/ + +#include "stm32f7xx_hal.h" +#include "stm32f7xx_hal_jpeg.h" + +/* Private define ------------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Defines + * @{ + */ +/* RGB Color format definition for JPEG encoding/Decoding : Should not be modified*/ +#define JPEG_ARGB8888 0 /* ARGB8888 Color Format */ +#define JPEG_RGB888 1 /* RGB888 Color Format */ +#define JPEG_RGB565 2 /* RGB565 Color Format */ + +/* + * Define USE_JPEG_DECODER + */ + +#define USE_JPEG_DECODER 1 /* 1 or 0 ********* Value different from default value : 1 ********** */ +/* + * Define USE_JPEG_ENCODER + */ + +#define USE_JPEG_ENCODER 1 /* 1 or 0 ********* Value different from default value : 1 ********** */ + +/* + * Define JPEG_RGB_FORMAT + */ +#define JPEG_RGB_FORMAT JPEG_RGB565 /* JPEG_ARGB8888, JPEG_RGB888, JPEG_RGB565 ********* Value different from default value : 0 ********** */ + +/* + * Define JPEG_SWAP_RG + */ +#define JPEG_SWAP_RG 0 /* 0 or 1 ********* Value different from default value : 0 ********** */ + + + + + + + + + + + + + + + + + + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions ------------------------------------------------------- */ + +#endif /* __JPEG_UTILS_CONF_H__ */ diff --git a/src/third_party/stm32_cubeide/Core/Inc/ltdc.h b/src/third_party/stm32_cubeide/Core/Inc/ltdc.h new file mode 100644 index 000000000..18e4242d6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/ltdc.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : LTDC.h + * Description : This file provides code for the configuration + * of the LTDC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __ltdc_H +#define __ltdc_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern LTDC_HandleTypeDef hltdc; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_LTDC_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ ltdc_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/main.h b/src/third_party/stm32_cubeide/Core/Inc/main.h new file mode 100644 index 000000000..02959c96b --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/main.h @@ -0,0 +1,130 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.h + * @brief : Header for main.c file. + * This file contains the common defines of the application. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __MAIN_H +#define __MAIN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" +#include "stm32f7xx_hal.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +/* Private defines -----------------------------------------------------------*/ +#define SPI4_CSA_Pin GPIO_PIN_3 +#define SPI4_CSA_GPIO_Port GPIOE +#define SPI4_CSB_Pin GPIO_PIN_4 +#define SPI4_CSB_GPIO_Port GPIOE +#define DIN2_Pin GPIO_PIN_13 +#define DIN2_GPIO_Port GPIOC +#define UART_RX_DIN1_Pin GPIO_PIN_6 +#define UART_RX_DIN1_GPIO_Port GPIOF +#define NFAULT_Pin GPIO_PIN_2 +#define NFAULT_GPIO_Port GPIOB +#define TFT_BRIGHTNESS_Pin GPIO_PIN_15 +#define TFT_BRIGHTNESS_GPIO_Port GPIOB +#define SPI2_CSB_Pin GPIO_PIN_11 +#define SPI2_CSB_GPIO_Port GPIOD +#define SPI2_CSA_Pin GPIO_PIN_12 +#define SPI2_CSA_GPIO_Port GPIOD +#define OE_SYNC_Pin GPIO_PIN_13 +#define OE_SYNC_GPIO_Port GPIOD +#define PWR_DIRECT_Pin GPIO_PIN_2 +#define PWR_DIRECT_GPIO_Port GPIOG +#define PWR_SSTART_Pin GPIO_PIN_3 +#define PWR_SSTART_GPIO_Port GPIOG +#define ENC_A_Pin GPIO_PIN_6 +#define ENC_A_GPIO_Port GPIOC +#define ENC_B_Pin GPIO_PIN_7 +#define ENC_B_GPIO_Port GPIOC +#define MCLK_25_Pin GPIO_PIN_9 +#define MCLK_25_GPIO_Port GPIOC +#define SPI2_IRQ_Pin GPIO_PIN_8 +#define SPI2_IRQ_GPIO_Port GPIOA +#define SPI2_IRQ_EXTI_IRQn EXTI9_5_IRQn +#define SPI5_IRQ_Pin GPIO_PIN_15 +#define SPI5_IRQ_GPIO_Port GPIOA +#define SPI5_IRQ_EXTI_IRQn EXTI15_10_IRQn +#define SD_DETECT_Pin GPIO_PIN_11 +#define SD_DETECT_GPIO_Port GPIOC +#define SD_DETECT_EXTI_IRQn EXTI15_10_IRQn +#define USB_OTG_FS_OC_Pin GPIO_PIN_4 +#define USB_OTG_FS_OC_GPIO_Port GPIOD +#define USB_OTG_FS_PSO_Pin GPIO_PIN_5 +#define USB_OTG_FS_PSO_GPIO_Port GPIOD +#define IRQ_TOUCH_Pin GPIO_PIN_7 +#define IRQ_TOUCH_GPIO_Port GPIOD +#define USER_SW_Pin GPIO_PIN_9 +#define USER_SW_GPIO_Port GPIOG +#define SPI5_CSB_Pin GPIO_PIN_13 +#define SPI5_CSB_GPIO_Port GPIOG +#define SPI5_CSA_Pin GPIO_PIN_14 +#define SPI5_CSA_GPIO_Port GPIOG +#define UART_TX_DOUT1_Pin GPIO_PIN_4 +#define UART_TX_DOUT1_GPIO_Port GPIOB +#define DOUT2_Pin GPIO_PIN_5 +#define DOUT2_GPIO_Port GPIOB +#define SPI4_IRQ_Pin GPIO_PIN_9 +#define SPI4_IRQ_GPIO_Port GPIOB +#define SPI4_IRQ_EXTI_IRQn EXTI9_5_IRQn +#define ENC_SW_Pin GPIO_PIN_4 +#define ENC_SW_GPIO_Port GPIOI +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MAIN_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/rng.h b/src/third_party/stm32_cubeide/Core/Inc/rng.h new file mode 100644 index 000000000..b0d5d2685 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/rng.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : RNG.h + * Description : This file provides code for the configuration + * of the RNG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __rng_H +#define __rng_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern RNG_HandleTypeDef hrng; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_RNG_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ rng_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/rtc.h b/src/third_party/stm32_cubeide/Core/Inc/rtc.h new file mode 100644 index 000000000..bf4272551 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/rtc.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : RTC.h + * Description : This file provides code for the configuration + * of the RTC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __rtc_H +#define __rtc_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern RTC_HandleTypeDef hrtc; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_RTC_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ rtc_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/sdmmc.h b/src/third_party/stm32_cubeide/Core/Inc/sdmmc.h new file mode 100644 index 000000000..562548403 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/sdmmc.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : SDMMC.h + * Description : This file provides code for the configuration + * of the SDMMC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __sdmmc_H +#define __sdmmc_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern SD_HandleTypeDef hsd1; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_SDMMC1_SD_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ sdmmc_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/spi.h b/src/third_party/stm32_cubeide/Core/Inc/spi.h new file mode 100644 index 000000000..0d59b10ae --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/spi.h @@ -0,0 +1,62 @@ +/** + ****************************************************************************** + * File Name : SPI.h + * Description : This file provides code for the configuration + * of the SPI instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __spi_H +#define __spi_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern SPI_HandleTypeDef hspi2; +extern SPI_HandleTypeDef hspi4; +extern SPI_HandleTypeDef hspi5; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_SPI2_Init(void); +void MX_SPI4_Init(void); +void MX_SPI5_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ spi_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_hal_conf.h b/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_hal_conf.h new file mode 100644 index 000000000..7ce3b40e3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_hal_conf.h @@ -0,0 +1,451 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_conf_template.h + * @author MCD Application Team + * @brief HAL configuration template file. + * This file should be copied to the application folder and renamed + * to stm32f7xx_hal_conf.h. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_CONF_H +#define __STM32F7xx_HAL_CONF_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/* ########################## Module Selection ############################## */ +/** + * @brief This is the list of modules to be used in the HAL driver + */ +#define HAL_MODULE_ENABLED + + #define HAL_ADC_MODULE_ENABLED +/* #define HAL_CRYP_MODULE_ENABLED */ +/* #define HAL_CAN_MODULE_ENABLED */ +/* #define HAL_CEC_MODULE_ENABLED */ +#define HAL_CRC_MODULE_ENABLED +/* #define HAL_CRYP_MODULE_ENABLED */ +#define HAL_DAC_MODULE_ENABLED +/* #define HAL_DCMI_MODULE_ENABLED */ +#define HAL_DMA2D_MODULE_ENABLED +#define HAL_ETH_MODULE_ENABLED +/* #define HAL_NAND_MODULE_ENABLED */ +/* #define HAL_NOR_MODULE_ENABLED */ +/* #define HAL_SRAM_MODULE_ENABLED */ +#define HAL_SDRAM_MODULE_ENABLED +/* #define HAL_HASH_MODULE_ENABLED */ +/* #define HAL_I2S_MODULE_ENABLED */ +#define HAL_IWDG_MODULE_ENABLED +/* #define HAL_LPTIM_MODULE_ENABLED */ +#define HAL_LTDC_MODULE_ENABLED +/* #define HAL_QSPI_MODULE_ENABLED */ +#define HAL_RNG_MODULE_ENABLED +#define HAL_RTC_MODULE_ENABLED +/* #define HAL_SAI_MODULE_ENABLED */ +#define HAL_SD_MODULE_ENABLED +/* #define HAL_MMC_MODULE_ENABLED */ +/* #define HAL_SPDIFRX_MODULE_ENABLED */ +#define HAL_SPI_MODULE_ENABLED +#define HAL_TIM_MODULE_ENABLED +#define HAL_UART_MODULE_ENABLED +/* #define HAL_USART_MODULE_ENABLED */ +/* #define HAL_IRDA_MODULE_ENABLED */ +/* #define HAL_SMARTCARD_MODULE_ENABLED */ +/* #define HAL_WWDG_MODULE_ENABLED */ +#define HAL_PCD_MODULE_ENABLED +/* #define HAL_HCD_MODULE_ENABLED */ +/* #define HAL_DFSDM_MODULE_ENABLED */ +/* #define HAL_DSI_MODULE_ENABLED */ +#define HAL_JPEG_MODULE_ENABLED +/* #define HAL_MDIOS_MODULE_ENABLED */ +/* #define HAL_SMBUS_MODULE_ENABLED */ +/* #define HAL_EXTI_MODULE_ENABLED */ +#define HAL_GPIO_MODULE_ENABLED +#define HAL_EXTI_MODULE_ENABLED +#define HAL_DMA_MODULE_ENABLED +#define HAL_RCC_MODULE_ENABLED +#define HAL_FLASH_MODULE_ENABLED +#define HAL_PWR_MODULE_ENABLED +#define HAL_I2C_MODULE_ENABLED +#define HAL_CORTEX_MODULE_ENABLED + +/* ########################## HSE/HSI Values adaptation ##################### */ +/** + * @brief Adjust the value of External High Speed oscillator (HSE) used in your application. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSE is used as system clock source, directly or through the PLL). + */ +#if !defined (HSE_VALUE) + #define HSE_VALUE ((uint32_t)25000000U) /*!< Value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSE_STARTUP_TIMEOUT) + #define HSE_STARTUP_TIMEOUT ((uint32_t)100U) /*!< Time out for HSE start up, in ms */ +#endif /* HSE_STARTUP_TIMEOUT */ + +/** + * @brief Internal High Speed oscillator (HSI) value. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSI is used as system clock source, directly or through the PLL). + */ +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000U) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @brief Internal Low Speed oscillator (LSI) value. + */ +#if !defined (LSI_VALUE) + #define LSI_VALUE ((uint32_t)32000U) /*!< LSI Typical Value in Hz*/ +#endif /* LSI_VALUE */ /*!< Value of the Internal Low Speed oscillator in Hz + The real value may vary depending on the variations + in voltage and temperature. */ +/** + * @brief External Low Speed oscillator (LSE) value. + */ +#if !defined (LSE_VALUE) + #define LSE_VALUE ((uint32_t)32768U) /*!< Value of the External Low Speed oscillator in Hz */ +#endif /* LSE_VALUE */ + +#if !defined (LSE_STARTUP_TIMEOUT) + #define LSE_STARTUP_TIMEOUT ((uint32_t)5000U) /*!< Time out for LSE start up, in ms */ +#endif /* LSE_STARTUP_TIMEOUT */ + +/** + * @brief External clock source for I2S peripheral + * This value is used by the I2S HAL module to compute the I2S clock source + * frequency, this source is inserted directly through I2S_CKIN pad. + */ +#if !defined (EXTERNAL_CLOCK_VALUE) + #define EXTERNAL_CLOCK_VALUE ((uint32_t)12288000U) /*!< Value of the Internal oscillator in Hz*/ +#endif /* EXTERNAL_CLOCK_VALUE */ + +/* Tip: To avoid modifying this file each time you need to use different HSE, + === you can define the HSE value in your toolchain compiler preprocessor. */ + +/* ########################### System Configuration ######################### */ +/** + * @brief This is the HAL system configuration section + */ +#define VDD_VALUE ((uint32_t)3300U) /*!< Value of VDD in mv */ +#define TICK_INT_PRIORITY ((uint32_t)0U) /*!< tick interrupt priority */ +#define USE_RTOS 0U +#define PREFETCH_ENABLE 0U +#define ART_ACCLERATOR_ENABLE 0U /* To enable instruction cache and prefetch */ + +/* ########################## Assert Selection ############################## */ +/** + * @brief Uncomment the line below to expanse the "assert_param" macro in the + * HAL drivers code + */ + #define USE_FULL_ASSERT 1U + +/* ################## Ethernet peripheral configuration ##################### */ + +/* Section 1 : Ethernet peripheral configuration */ + +/* MAC ADDRESS: MAC_ADDR0:MAC_ADDR1:MAC_ADDR2:MAC_ADDR3:MAC_ADDR4:MAC_ADDR5 */ +#define MAC_ADDR0 2U +#define MAC_ADDR1 0U +#define MAC_ADDR2 0U +#define MAC_ADDR3 0U +#define MAC_ADDR4 0U +#define MAC_ADDR5 0U + +/* Definition of the Ethernet driver buffers size and count */ +#define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for receive */ +#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ +#define ETH_RXBUFNB ((uint32_t)4U) /* 4 Rx buffers of size ETH_RX_BUF_SIZE */ +#define ETH_TXBUFNB ((uint32_t)4U) /* 4 Tx buffers of size ETH_TX_BUF_SIZE */ + +/* Section 2: PHY configuration section */ + +/* DP83848_PHY_ADDRESS Address*/ +#define DP83848_PHY_ADDRESS 2 +/* PHY Reset delay these values are based on a 1 ms Systick interrupt*/ +#define PHY_RESET_DELAY ((uint32_t)0x000000FFU) +/* PHY Configuration delay */ +#define PHY_CONFIG_DELAY ((uint32_t)0x00000FFFU) + +#define PHY_READ_TO ((uint32_t)0x0000FFFFU) +#define PHY_WRITE_TO ((uint32_t)0x0000FFFFU) + +/* Section 3: Common PHY Registers */ + +#define PHY_BCR ((uint16_t)0x00U) /*!< Transceiver Basic Control Register */ +#define PHY_BSR ((uint16_t)0x01U) /*!< Transceiver Basic Status Register */ + +#define PHY_RESET ((uint16_t)0x8000U) /*!< PHY Reset */ +#define PHY_LOOPBACK ((uint16_t)0x4000U) /*!< Select loop-back mode */ +#define PHY_FULLDUPLEX_100M ((uint16_t)0x2100U) /*!< Set the full-duplex mode at 100 Mb/s */ +#define PHY_HALFDUPLEX_100M ((uint16_t)0x2000U) /*!< Set the half-duplex mode at 100 Mb/s */ +#define PHY_FULLDUPLEX_10M ((uint16_t)0x0100U) /*!< Set the full-duplex mode at 10 Mb/s */ +#define PHY_HALFDUPLEX_10M ((uint16_t)0x0000U) /*!< Set the half-duplex mode at 10 Mb/s */ +#define PHY_AUTONEGOTIATION ((uint16_t)0x1000U) /*!< Enable auto-negotiation function */ +#define PHY_RESTART_AUTONEGOTIATION ((uint16_t)0x0200U) /*!< Restart auto-negotiation function */ +#define PHY_POWERDOWN ((uint16_t)0x0800U) /*!< Select the power down mode */ +#define PHY_ISOLATE ((uint16_t)0x0400U) /*!< Isolate PHY from MII */ + +#define PHY_AUTONEGO_COMPLETE ((uint16_t)0x0020U) /*!< Auto-Negotiation process completed */ +#define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */ +#define PHY_JABBER_DETECTION ((uint16_t)0x0002U) /*!< Jabber condition detected */ + +/* Section 4: Extended PHY Registers */ +#define PHY_SR ((uint16_t)0x1FU) /*!< PHY status register Offset */ +#define PHY_MICR ((uint16_t)0x11U) /*!< MII Interrupt Control Register */ +#define PHY_MISR ((uint16_t)0x12U) /*!< MII Interrupt Status and Misc. Control Register */ + +#define PHY_LINK_STATUS ((uint16_t)0x0001U) /*!< PHY Link mask */ +#define PHY_SPEED_STATUS ((uint16_t)0x0004U) /*!< PHY Speed mask */ +#define PHY_DUPLEX_STATUS ((uint16_t)0x0010U) /*!< PHY Duplex mask */ + +#define PHY_MICR_INT_EN ((uint16_t)0x0002U) /*!< PHY Enable interrupts */ +#define PHY_MICR_INT_OE ((uint16_t)0x0001U) /*!< PHY Enable output interrupt events */ + +#define PHY_MISR_LINK_INT_EN ((uint16_t)0x0020U) /*!< Enable Interrupt on change of link status */ +#define PHY_LINK_INTERRUPT ((uint16_t)0x2000U) /*!< PHY link status interrupt mask */ + +/* ################## SPI peripheral configuration ########################## */ + +/* CRC FEATURE: Use to activate CRC feature inside HAL SPI Driver +* Activated: CRC code is present inside driver +* Deactivated: CRC code cleaned from driver +*/ + +#define USE_SPI_CRC 0U + +/* Includes ------------------------------------------------------------------*/ +/** + * @brief Include module's header file + */ + +#ifdef HAL_RCC_MODULE_ENABLED + #include "stm32f7xx_hal_rcc.h" +#endif /* HAL_RCC_MODULE_ENABLED */ + +#ifdef HAL_EXTI_MODULE_ENABLED + #include "stm32f7xx_hal_exti.h" +#endif /* HAL_EXTI_MODULE_ENABLED */ + +#ifdef HAL_GPIO_MODULE_ENABLED + #include "stm32f7xx_hal_gpio.h" +#endif /* HAL_GPIO_MODULE_ENABLED */ + +#ifdef HAL_DMA_MODULE_ENABLED + #include "stm32f7xx_hal_dma.h" +#endif /* HAL_DMA_MODULE_ENABLED */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + #include "stm32f7xx_hal_cortex.h" +#endif /* HAL_CORTEX_MODULE_ENABLED */ + +#ifdef HAL_ADC_MODULE_ENABLED + #include "stm32f7xx_hal_adc.h" +#endif /* HAL_ADC_MODULE_ENABLED */ + +#ifdef HAL_CAN_MODULE_ENABLED + #include "stm32f7xx_hal_can.h" +#endif /* HAL_CAN_MODULE_ENABLED */ + +#ifdef HAL_CEC_MODULE_ENABLED + #include "stm32f7xx_hal_cec.h" +#endif /* HAL_CEC_MODULE_ENABLED */ + +#ifdef HAL_CRC_MODULE_ENABLED + #include "stm32f7xx_hal_crc.h" +#endif /* HAL_CRC_MODULE_ENABLED */ + +#ifdef HAL_CRYP_MODULE_ENABLED + #include "stm32f7xx_hal_cryp.h" +#endif /* HAL_CRYP_MODULE_ENABLED */ + +#ifdef HAL_DMA2D_MODULE_ENABLED + #include "stm32f7xx_hal_dma2d.h" +#endif /* HAL_DMA2D_MODULE_ENABLED */ + +#ifdef HAL_DAC_MODULE_ENABLED + #include "stm32f7xx_hal_dac.h" +#endif /* HAL_DAC_MODULE_ENABLED */ + +#ifdef HAL_DCMI_MODULE_ENABLED + #include "stm32f7xx_hal_dcmi.h" +#endif /* HAL_DCMI_MODULE_ENABLED */ + +#ifdef HAL_ETH_MODULE_ENABLED + #include "stm32f7xx_hal_eth.h" +#endif /* HAL_ETH_MODULE_ENABLED */ + +#ifdef HAL_FLASH_MODULE_ENABLED + #include "stm32f7xx_hal_flash.h" +#endif /* HAL_FLASH_MODULE_ENABLED */ + +#ifdef HAL_SRAM_MODULE_ENABLED + #include "stm32f7xx_hal_sram.h" +#endif /* HAL_SRAM_MODULE_ENABLED */ + +#ifdef HAL_NOR_MODULE_ENABLED + #include "stm32f7xx_hal_nor.h" +#endif /* HAL_NOR_MODULE_ENABLED */ + +#ifdef HAL_NAND_MODULE_ENABLED + #include "stm32f7xx_hal_nand.h" +#endif /* HAL_NAND_MODULE_ENABLED */ + +#ifdef HAL_SDRAM_MODULE_ENABLED + #include "stm32f7xx_hal_sdram.h" +#endif /* HAL_SDRAM_MODULE_ENABLED */ + +#ifdef HAL_HASH_MODULE_ENABLED + #include "stm32f7xx_hal_hash.h" +#endif /* HAL_HASH_MODULE_ENABLED */ + +#ifdef HAL_I2C_MODULE_ENABLED + #include "stm32f7xx_hal_i2c.h" +#endif /* HAL_I2C_MODULE_ENABLED */ + +#ifdef HAL_I2S_MODULE_ENABLED + #include "stm32f7xx_hal_i2s.h" +#endif /* HAL_I2S_MODULE_ENABLED */ + +#ifdef HAL_IWDG_MODULE_ENABLED + #include "stm32f7xx_hal_iwdg.h" +#endif /* HAL_IWDG_MODULE_ENABLED */ + +#ifdef HAL_LPTIM_MODULE_ENABLED + #include "stm32f7xx_hal_lptim.h" +#endif /* HAL_LPTIM_MODULE_ENABLED */ + +#ifdef HAL_LTDC_MODULE_ENABLED + #include "stm32f7xx_hal_ltdc.h" +#endif /* HAL_LTDC_MODULE_ENABLED */ + +#ifdef HAL_PWR_MODULE_ENABLED + #include "stm32f7xx_hal_pwr.h" +#endif /* HAL_PWR_MODULE_ENABLED */ + +#ifdef HAL_QSPI_MODULE_ENABLED + #include "stm32f7xx_hal_qspi.h" +#endif /* HAL_QSPI_MODULE_ENABLED */ + +#ifdef HAL_RNG_MODULE_ENABLED + #include "stm32f7xx_hal_rng.h" +#endif /* HAL_RNG_MODULE_ENABLED */ + +#ifdef HAL_RTC_MODULE_ENABLED + #include "stm32f7xx_hal_rtc.h" +#endif /* HAL_RTC_MODULE_ENABLED */ + +#ifdef HAL_SAI_MODULE_ENABLED + #include "stm32f7xx_hal_sai.h" +#endif /* HAL_SAI_MODULE_ENABLED */ + +#ifdef HAL_SD_MODULE_ENABLED + #include "stm32f7xx_hal_sd.h" +#endif /* HAL_SD_MODULE_ENABLED */ + +#ifdef HAL_MMC_MODULE_ENABLED + #include "stm32f7xx_hal_mmc.h" +#endif /* HAL_MMC_MODULE_ENABLED */ + +#ifdef HAL_SPDIFRX_MODULE_ENABLED + #include "stm32f7xx_hal_spdifrx.h" +#endif /* HAL_SPDIFRX_MODULE_ENABLED */ + +#ifdef HAL_SPI_MODULE_ENABLED + #include "stm32f7xx_hal_spi.h" +#endif /* HAL_SPI_MODULE_ENABLED */ + +#ifdef HAL_TIM_MODULE_ENABLED + #include "stm32f7xx_hal_tim.h" +#endif /* HAL_TIM_MODULE_ENABLED */ + +#ifdef HAL_UART_MODULE_ENABLED + #include "stm32f7xx_hal_uart.h" +#endif /* HAL_UART_MODULE_ENABLED */ + +#ifdef HAL_USART_MODULE_ENABLED + #include "stm32f7xx_hal_usart.h" +#endif /* HAL_USART_MODULE_ENABLED */ + +#ifdef HAL_IRDA_MODULE_ENABLED + #include "stm32f7xx_hal_irda.h" +#endif /* HAL_IRDA_MODULE_ENABLED */ + +#ifdef HAL_SMARTCARD_MODULE_ENABLED + #include "stm32f7xx_hal_smartcard.h" +#endif /* HAL_SMARTCARD_MODULE_ENABLED */ + +#ifdef HAL_WWDG_MODULE_ENABLED + #include "stm32f7xx_hal_wwdg.h" +#endif /* HAL_WWDG_MODULE_ENABLED */ + +#ifdef HAL_PCD_MODULE_ENABLED + #include "stm32f7xx_hal_pcd.h" +#endif /* HAL_PCD_MODULE_ENABLED */ + +#ifdef HAL_HCD_MODULE_ENABLED + #include "stm32f7xx_hal_hcd.h" +#endif /* HAL_HCD_MODULE_ENABLED */ + +#ifdef HAL_DFSDM_MODULE_ENABLED + #include "stm32f7xx_hal_dfsdm.h" +#endif /* HAL_DFSDM_MODULE_ENABLED */ + +#ifdef HAL_DSI_MODULE_ENABLED + #include "stm32f7xx_hal_dsi.h" +#endif /* HAL_DSI_MODULE_ENABLED */ + +#ifdef HAL_JPEG_MODULE_ENABLED + #include "stm32f7xx_hal_jpeg.h" +#endif /* HAL_JPEG_MODULE_ENABLED */ + +#ifdef HAL_MDIOS_MODULE_ENABLED + #include "stm32f7xx_hal_mdios.h" +#endif /* HAL_MDIOS_MODULE_ENABLED */ + +#ifdef HAL_SMBUS_MODULE_ENABLED + #include "stm32f7xx_hal_smbus.h" +#endif /* HAL_SMBUS_MODULE_ENABLED */ + +/* Exported macro ------------------------------------------------------------*/ +#ifdef USE_FULL_ASSERT +/** + * @brief The assert_param macro is used for function's parameters check. + * @param expr: If expr is false, it calls assert_failed function + * which reports the name of the source file and the source + * line number of the call that failed. + * If expr is true, it returns no value. + * @retval None + */ + #define assert_param(expr) ((expr) ? (void)0U : assert_failed((uint8_t *)__FILE__, __LINE__)) +/* Exported functions ------------------------------------------------------- */ + void assert_failed(uint8_t* file, uint32_t line); +#else + #define assert_param(expr) ((void)0U) +#endif /* USE_FULL_ASSERT */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_CONF_H */ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_it.h b/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_it.h new file mode 100644 index 000000000..4aa4b00cb --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/stm32f7xx_it.h @@ -0,0 +1,77 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_it.h + * @brief This file contains the headers of the interrupt handlers. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_IT_H +#define __STM32F7xx_IT_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void NMI_Handler(void); +void HardFault_Handler(void); +void MemManage_Handler(void); +void BusFault_Handler(void); +void UsageFault_Handler(void); +void DebugMon_Handler(void); +void DMA1_Stream5_IRQHandler(void); +void ADC_IRQHandler(void); +void EXTI9_5_IRQHandler(void); +void TIM1_UP_TIM10_IRQHandler(void); +void EXTI15_10_IRQHandler(void); +void SDMMC1_IRQHandler(void); +void DMA2_Stream3_IRQHandler(void); +void ETH_IRQHandler(void); +void OTG_FS_IRQHandler(void); +void DMA2_Stream6_IRQHandler(void); +void UART7_IRQHandler(void); +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_IT_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/tim.h b/src/third_party/stm32_cubeide/Core/Inc/tim.h new file mode 100644 index 000000000..ec2672176 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/tim.h @@ -0,0 +1,66 @@ +/** + ****************************************************************************** + * File Name : TIM.h + * Description : This file provides code for the configuration + * of the TIM instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __tim_H +#define __tim_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern TIM_HandleTypeDef htim3; +extern TIM_HandleTypeDef htim6; +extern TIM_HandleTypeDef htim8; +extern TIM_HandleTypeDef htim12; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_TIM3_Init(void); +void MX_TIM6_Init(void); +void MX_TIM8_Init(void); +void MX_TIM12_Init(void); + +void HAL_TIM_MspPostInit(TIM_HandleTypeDef *htim); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ tim_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Inc/usart.h b/src/third_party/stm32_cubeide/Core/Inc/usart.h new file mode 100644 index 000000000..e140d970d --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Inc/usart.h @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * File Name : USART.h + * Description : This file provides code for the configuration + * of the USART instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __usart_H +#define __usart_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern UART_HandleTypeDef huart7; + +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +void MX_UART7_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ + +#ifdef __cplusplus +} +#endif +#endif /*__ usart_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/adc.c b/src/third_party/stm32_cubeide/Core/Src/adc.c new file mode 100644 index 000000000..fc3c137ce --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/adc.c @@ -0,0 +1,107 @@ +/** + ****************************************************************************** + * File Name : ADC.c + * Description : This file provides code for the configuration + * of the ADC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "adc.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +ADC_HandleTypeDef hadc1; + +/* ADC1 init function */ +void MX_ADC1_Init(void) +{ + ADC_ChannelConfTypeDef sConfig = {0}; + + /** Configure the global features of the ADC (Clock, Resolution, Data Alignment and number of conversion) + */ + hadc1.Instance = ADC1; + hadc1.Init.ClockPrescaler = ADC_CLOCK_SYNC_PCLK_DIV2; + hadc1.Init.Resolution = ADC_RESOLUTION_12B; + hadc1.Init.ScanConvMode = ADC_SCAN_DISABLE; + hadc1.Init.ContinuousConvMode = DISABLE; + hadc1.Init.DiscontinuousConvMode = DISABLE; + hadc1.Init.ExternalTrigConvEdge = ADC_EXTERNALTRIGCONVEDGE_NONE; + hadc1.Init.ExternalTrigConv = ADC_SOFTWARE_START; + hadc1.Init.DataAlign = ADC_DATAALIGN_RIGHT; + hadc1.Init.NbrOfConversion = 1; + hadc1.Init.DMAContinuousRequests = DISABLE; + hadc1.Init.EOCSelection = ADC_EOC_SINGLE_CONV; + if (HAL_ADC_Init(&hadc1) != HAL_OK) + { + Error_Handler(); + } + /** Configure for the selected ADC regular channel its corresponding rank in the sequencer and its sample time. + */ + sConfig.Channel = ADC_CHANNEL_VBAT; + sConfig.Rank = ADC_REGULAR_RANK_1; + sConfig.SamplingTime = ADC_SAMPLETIME_15CYCLES; + if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_ADC_MspInit(ADC_HandleTypeDef* adcHandle) +{ + + if(adcHandle->Instance==ADC1) + { + /* USER CODE BEGIN ADC1_MspInit 0 */ + + /* USER CODE END ADC1_MspInit 0 */ + /* ADC1 clock enable */ + __HAL_RCC_ADC1_CLK_ENABLE(); + + /* ADC1 interrupt Init */ + HAL_NVIC_SetPriority(ADC_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(ADC_IRQn); + /* USER CODE BEGIN ADC1_MspInit 1 */ + + /* USER CODE END ADC1_MspInit 1 */ + } +} + +void HAL_ADC_MspDeInit(ADC_HandleTypeDef* adcHandle) +{ + + if(adcHandle->Instance==ADC1) + { + /* USER CODE BEGIN ADC1_MspDeInit 0 */ + + /* USER CODE END ADC1_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_ADC1_CLK_DISABLE(); + + /* ADC1 interrupt Deinit */ + HAL_NVIC_DisableIRQ(ADC_IRQn); + /* USER CODE BEGIN ADC1_MspDeInit 1 */ + + /* USER CODE END ADC1_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/crc.c b/src/third_party/stm32_cubeide/Core/Src/crc.c new file mode 100644 index 000000000..ff997d2a3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/crc.c @@ -0,0 +1,82 @@ +/** + ****************************************************************************** + * File Name : CRC.c + * Description : This file provides code for the configuration + * of the CRC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "crc.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +CRC_HandleTypeDef hcrc; + +/* CRC init function */ +void MX_CRC_Init(void) +{ + + hcrc.Instance = CRC; + hcrc.Init.DefaultPolynomialUse = DEFAULT_POLYNOMIAL_ENABLE; + hcrc.Init.DefaultInitValueUse = DEFAULT_INIT_VALUE_ENABLE; + hcrc.Init.InputDataInversionMode = CRC_INPUTDATA_INVERSION_NONE; + hcrc.Init.OutputDataInversionMode = CRC_OUTPUTDATA_INVERSION_DISABLE; + hcrc.InputDataFormat = CRC_INPUTDATA_FORMAT_BYTES; + if (HAL_CRC_Init(&hcrc) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_CRC_MspInit(CRC_HandleTypeDef* crcHandle) +{ + + if(crcHandle->Instance==CRC) + { + /* USER CODE BEGIN CRC_MspInit 0 */ + + /* USER CODE END CRC_MspInit 0 */ + /* CRC clock enable */ + __HAL_RCC_CRC_CLK_ENABLE(); + /* USER CODE BEGIN CRC_MspInit 1 */ + + /* USER CODE END CRC_MspInit 1 */ + } +} + +void HAL_CRC_MspDeInit(CRC_HandleTypeDef* crcHandle) +{ + + if(crcHandle->Instance==CRC) + { + /* USER CODE BEGIN CRC_MspDeInit 0 */ + + /* USER CODE END CRC_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_CRC_CLK_DISABLE(); + /* USER CODE BEGIN CRC_MspDeInit 1 */ + + /* USER CODE END CRC_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/dac.c b/src/third_party/stm32_cubeide/Core/Src/dac.c new file mode 100644 index 000000000..6842cde47 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/dac.c @@ -0,0 +1,127 @@ +/** + ****************************************************************************** + * File Name : DAC.c + * Description : This file provides code for the configuration + * of the DAC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "dac.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +DAC_HandleTypeDef hdac; +DMA_HandleTypeDef hdma_dac1; + +/* DAC init function */ +void MX_DAC_Init(void) +{ + DAC_ChannelConfTypeDef sConfig = {0}; + + /** DAC Initialization + */ + hdac.Instance = DAC; + if (HAL_DAC_Init(&hdac) != HAL_OK) + { + Error_Handler(); + } + /** DAC channel OUT1 config + */ + sConfig.DAC_Trigger = DAC_TRIGGER_T6_TRGO; + sConfig.DAC_OutputBuffer = DAC_OUTPUTBUFFER_ENABLE; + if (HAL_DAC_ConfigChannel(&hdac, &sConfig, DAC_CHANNEL_1) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_DAC_MspInit(DAC_HandleTypeDef* dacHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(dacHandle->Instance==DAC) + { + /* USER CODE BEGIN DAC_MspInit 0 */ + + /* USER CODE END DAC_MspInit 0 */ + /* DAC clock enable */ + __HAL_RCC_DAC_CLK_ENABLE(); + + __HAL_RCC_GPIOA_CLK_ENABLE(); + /**DAC GPIO Configuration + PA4 ------> DAC_OUT1 + */ + GPIO_InitStruct.Pin = GPIO_PIN_4; + GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + /* DAC DMA Init */ + /* DAC1 Init */ + hdma_dac1.Instance = DMA1_Stream5; + hdma_dac1.Init.Channel = DMA_CHANNEL_7; + hdma_dac1.Init.Direction = DMA_MEMORY_TO_PERIPH; + hdma_dac1.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_dac1.Init.MemInc = DMA_MINC_ENABLE; + hdma_dac1.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE; + hdma_dac1.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE; + hdma_dac1.Init.Mode = DMA_NORMAL; + hdma_dac1.Init.Priority = DMA_PRIORITY_LOW; + hdma_dac1.Init.FIFOMode = DMA_FIFOMODE_DISABLE; + if (HAL_DMA_Init(&hdma_dac1) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(dacHandle,DMA_Handle1,hdma_dac1); + + /* USER CODE BEGIN DAC_MspInit 1 */ + + /* USER CODE END DAC_MspInit 1 */ + } +} + +void HAL_DAC_MspDeInit(DAC_HandleTypeDef* dacHandle) +{ + + if(dacHandle->Instance==DAC) + { + /* USER CODE BEGIN DAC_MspDeInit 0 */ + + /* USER CODE END DAC_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_DAC_CLK_DISABLE(); + + /**DAC GPIO Configuration + PA4 ------> DAC_OUT1 + */ + HAL_GPIO_DeInit(GPIOA, GPIO_PIN_4); + + /* DAC DMA DeInit */ + HAL_DMA_DeInit(dacHandle->DMA_Handle1); + /* USER CODE BEGIN DAC_MspDeInit 1 */ + + /* USER CODE END DAC_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/dma.c b/src/third_party/stm32_cubeide/Core/Src/dma.c new file mode 100644 index 000000000..58d25ff70 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/dma.c @@ -0,0 +1,70 @@ +/** + ****************************************************************************** + * File Name : dma.c + * Description : This file provides code for the configuration + * of all the requested memory to memory DMA transfers. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "dma.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/*----------------------------------------------------------------------------*/ +/* Configure DMA */ +/*----------------------------------------------------------------------------*/ + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/** + * Enable DMA controller clock + */ +void MX_DMA_Init(void) +{ + + /* DMA controller clock enable */ + __HAL_RCC_DMA1_CLK_ENABLE(); + __HAL_RCC_DMA2_CLK_ENABLE(); + + /* DMA interrupt init */ + /* DMA1_Stream5_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA1_Stream5_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(DMA1_Stream5_IRQn); + /* DMA2_Stream3_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA2_Stream3_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(DMA2_Stream3_IRQn); + /* DMA2_Stream6_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(DMA2_Stream6_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(DMA2_Stream6_IRQn); + +} + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/dma2d.c b/src/third_party/stm32_cubeide/Core/Src/dma2d.c new file mode 100644 index 000000000..bd613c4e1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/dma2d.c @@ -0,0 +1,90 @@ +/** + ****************************************************************************** + * File Name : DMA2D.c + * Description : This file provides code for the configuration + * of the DMA2D instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "dma2d.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +DMA2D_HandleTypeDef hdma2d; + +/* DMA2D init function */ +void MX_DMA2D_Init(void) +{ + + hdma2d.Instance = DMA2D; + hdma2d.Init.Mode = DMA2D_M2M; + hdma2d.Init.ColorMode = DMA2D_OUTPUT_RGB565; + hdma2d.Init.OutputOffset = 0; + hdma2d.LayerCfg[1].InputOffset = 0; + hdma2d.LayerCfg[1].InputColorMode = DMA2D_INPUT_RGB565; + hdma2d.LayerCfg[1].AlphaMode = DMA2D_NO_MODIF_ALPHA; + hdma2d.LayerCfg[1].InputAlpha = 0; + hdma2d.LayerCfg[1].AlphaInverted = DMA2D_REGULAR_ALPHA; + hdma2d.LayerCfg[1].RedBlueSwap = DMA2D_RB_REGULAR; + if (HAL_DMA2D_Init(&hdma2d) != HAL_OK) + { + Error_Handler(); + } + if (HAL_DMA2D_ConfigLayer(&hdma2d, 1) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_DMA2D_MspInit(DMA2D_HandleTypeDef* dma2dHandle) +{ + + if(dma2dHandle->Instance==DMA2D) + { + /* USER CODE BEGIN DMA2D_MspInit 0 */ + + /* USER CODE END DMA2D_MspInit 0 */ + /* DMA2D clock enable */ + __HAL_RCC_DMA2D_CLK_ENABLE(); + /* USER CODE BEGIN DMA2D_MspInit 1 */ + + /* USER CODE END DMA2D_MspInit 1 */ + } +} + +void HAL_DMA2D_MspDeInit(DMA2D_HandleTypeDef* dma2dHandle) +{ + + if(dma2dHandle->Instance==DMA2D) + { + /* USER CODE BEGIN DMA2D_MspDeInit 0 */ + + /* USER CODE END DMA2D_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_DMA2D_CLK_DISABLE(); + /* USER CODE BEGIN DMA2D_MspDeInit 1 */ + + /* USER CODE END DMA2D_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/fmc.c b/src/third_party/stm32_cubeide/Core/Src/fmc.c new file mode 100644 index 000000000..e9afed0e9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/fmc.c @@ -0,0 +1,277 @@ +/** + ****************************************************************************** + * File Name : FMC.c + * Description : This file provides code for the configuration + * of the FMC peripheral. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "fmc.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +SDRAM_HandleTypeDef hsdram1; + +/* FMC initialization function */ +void MX_FMC_Init(void) +{ + FMC_SDRAM_TimingTypeDef SdramTiming = {0}; + + /** Perform the SDRAM1 memory initialization sequence + */ + hsdram1.Instance = FMC_SDRAM_DEVICE; + /* hsdram1.Init */ + hsdram1.Init.SDBank = FMC_SDRAM_BANK1; + hsdram1.Init.ColumnBitsNumber = FMC_SDRAM_COLUMN_BITS_NUM_8; + hsdram1.Init.RowBitsNumber = FMC_SDRAM_ROW_BITS_NUM_12; + hsdram1.Init.MemoryDataWidth = FMC_SDRAM_MEM_BUS_WIDTH_16; + hsdram1.Init.InternalBankNumber = FMC_SDRAM_INTERN_BANKS_NUM_4; + hsdram1.Init.CASLatency = FMC_SDRAM_CAS_LATENCY_3; + hsdram1.Init.WriteProtection = FMC_SDRAM_WRITE_PROTECTION_DISABLE; + hsdram1.Init.SDClockPeriod = FMC_SDRAM_CLOCK_PERIOD_2; + hsdram1.Init.ReadBurst = FMC_SDRAM_RBURST_DISABLE; + hsdram1.Init.ReadPipeDelay = FMC_SDRAM_RPIPE_DELAY_1; + /* SdramTiming */ + SdramTiming.LoadToActiveDelay = 2; + SdramTiming.ExitSelfRefreshDelay = 7; + SdramTiming.SelfRefreshTime = 4; + SdramTiming.RowCycleDelay = 7; + SdramTiming.WriteRecoveryTime = 3; + SdramTiming.RPDelay = 2; + SdramTiming.RCDDelay = 2; + + if (HAL_SDRAM_Init(&hsdram1, &SdramTiming) != HAL_OK) + { + Error_Handler( ); + } + +} + +static uint32_t FMC_Initialized = 0; + +static void HAL_FMC_MspInit(void){ + /* USER CODE BEGIN FMC_MspInit 0 */ + + /* USER CODE END FMC_MspInit 0 */ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if (FMC_Initialized) { + return; + } + FMC_Initialized = 1; + + /* Peripheral clock enable */ + __HAL_RCC_FMC_CLK_ENABLE(); + + /** FMC GPIO Configuration + PF0 ------> FMC_A0 + PF1 ------> FMC_A1 + PF2 ------> FMC_A2 + PF3 ------> FMC_A3 + PF4 ------> FMC_A4 + PF5 ------> FMC_A5 + PH2 ------> FMC_SDCKE0 + PH3 ------> FMC_SDNE0 + PH5 ------> FMC_SDNWE + PF11 ------> FMC_SDNRAS + PF12 ------> FMC_A6 + PF13 ------> FMC_A7 + PF14 ------> FMC_A8 + PF15 ------> FMC_A9 + PG0 ------> FMC_A10 + PG1 ------> FMC_A11 + PE7 ------> FMC_D4 + PE8 ------> FMC_D5 + PE9 ------> FMC_D6 + PE10 ------> FMC_D7 + PE11 ------> FMC_D8 + PE12 ------> FMC_D9 + PE13 ------> FMC_D10 + PE14 ------> FMC_D11 + PE15 ------> FMC_D12 + PD8 ------> FMC_D13 + PD9 ------> FMC_D14 + PD10 ------> FMC_D15 + PD14 ------> FMC_D0 + PD15 ------> FMC_D1 + PG4 ------> FMC_BA0 + PG5 ------> FMC_BA1 + PG8 ------> FMC_SDCLK + PD0 ------> FMC_D2 + PD1 ------> FMC_D3 + PG15 ------> FMC_SDNCAS + PE0 ------> FMC_NBL0 + PE1 ------> FMC_NBL1 + */ + /* GPIO_InitStruct */ + GPIO_InitStruct.Pin = GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3 + |GPIO_PIN_4|GPIO_PIN_5|GPIO_PIN_11|GPIO_PIN_12 + |GPIO_PIN_13|GPIO_PIN_14|GPIO_PIN_15; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_FMC; + + HAL_GPIO_Init(GPIOF, &GPIO_InitStruct); + + /* GPIO_InitStruct */ + GPIO_InitStruct.Pin = GPIO_PIN_2|GPIO_PIN_3|GPIO_PIN_5; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_FMC; + + HAL_GPIO_Init(GPIOH, &GPIO_InitStruct); + + /* GPIO_InitStruct */ + GPIO_InitStruct.Pin = GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_4|GPIO_PIN_5 + |GPIO_PIN_8|GPIO_PIN_15; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_FMC; + + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + /* GPIO_InitStruct */ + GPIO_InitStruct.Pin = GPIO_PIN_7|GPIO_PIN_8|GPIO_PIN_9|GPIO_PIN_10 + |GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13|GPIO_PIN_14 + |GPIO_PIN_15|GPIO_PIN_0|GPIO_PIN_1; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_FMC; + + HAL_GPIO_Init(GPIOE, &GPIO_InitStruct); + + /* GPIO_InitStruct */ + GPIO_InitStruct.Pin = GPIO_PIN_8|GPIO_PIN_9|GPIO_PIN_10|GPIO_PIN_14 + |GPIO_PIN_15|GPIO_PIN_0|GPIO_PIN_1; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_FMC; + + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /* USER CODE BEGIN FMC_MspInit 1 */ + + /* USER CODE END FMC_MspInit 1 */ +} + +void HAL_SDRAM_MspInit(SDRAM_HandleTypeDef* sdramHandle){ + /* USER CODE BEGIN SDRAM_MspInit 0 */ + + /* USER CODE END SDRAM_MspInit 0 */ + HAL_FMC_MspInit(); + /* USER CODE BEGIN SDRAM_MspInit 1 */ + + /* USER CODE END SDRAM_MspInit 1 */ +} + +static uint32_t FMC_DeInitialized = 0; + +static void HAL_FMC_MspDeInit(void){ + /* USER CODE BEGIN FMC_MspDeInit 0 */ + + /* USER CODE END FMC_MspDeInit 0 */ + if (FMC_DeInitialized) { + return; + } + FMC_DeInitialized = 1; + /* Peripheral clock enable */ + __HAL_RCC_FMC_CLK_DISABLE(); + + /** FMC GPIO Configuration + PF0 ------> FMC_A0 + PF1 ------> FMC_A1 + PF2 ------> FMC_A2 + PF3 ------> FMC_A3 + PF4 ------> FMC_A4 + PF5 ------> FMC_A5 + PH2 ------> FMC_SDCKE0 + PH3 ------> FMC_SDNE0 + PH5 ------> FMC_SDNWE + PF11 ------> FMC_SDNRAS + PF12 ------> FMC_A6 + PF13 ------> FMC_A7 + PF14 ------> FMC_A8 + PF15 ------> FMC_A9 + PG0 ------> FMC_A10 + PG1 ------> FMC_A11 + PE7 ------> FMC_D4 + PE8 ------> FMC_D5 + PE9 ------> FMC_D6 + PE10 ------> FMC_D7 + PE11 ------> FMC_D8 + PE12 ------> FMC_D9 + PE13 ------> FMC_D10 + PE14 ------> FMC_D11 + PE15 ------> FMC_D12 + PD8 ------> FMC_D13 + PD9 ------> FMC_D14 + PD10 ------> FMC_D15 + PD14 ------> FMC_D0 + PD15 ------> FMC_D1 + PG4 ------> FMC_BA0 + PG5 ------> FMC_BA1 + PG8 ------> FMC_SDCLK + PD0 ------> FMC_D2 + PD1 ------> FMC_D3 + PG15 ------> FMC_SDNCAS + PE0 ------> FMC_NBL0 + PE1 ------> FMC_NBL1 + */ + + HAL_GPIO_DeInit(GPIOF, GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3 + |GPIO_PIN_4|GPIO_PIN_5|GPIO_PIN_11|GPIO_PIN_12 + |GPIO_PIN_13|GPIO_PIN_14|GPIO_PIN_15); + + HAL_GPIO_DeInit(GPIOH, GPIO_PIN_2|GPIO_PIN_3|GPIO_PIN_5); + + HAL_GPIO_DeInit(GPIOG, GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_4|GPIO_PIN_5 + |GPIO_PIN_8|GPIO_PIN_15); + + HAL_GPIO_DeInit(GPIOE, GPIO_PIN_7|GPIO_PIN_8|GPIO_PIN_9|GPIO_PIN_10 + |GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13|GPIO_PIN_14 + |GPIO_PIN_15|GPIO_PIN_0|GPIO_PIN_1); + + HAL_GPIO_DeInit(GPIOD, GPIO_PIN_8|GPIO_PIN_9|GPIO_PIN_10|GPIO_PIN_14 + |GPIO_PIN_15|GPIO_PIN_0|GPIO_PIN_1); + + /* USER CODE BEGIN FMC_MspDeInit 1 */ + + /* USER CODE END FMC_MspDeInit 1 */ +} + +void HAL_SDRAM_MspDeInit(SDRAM_HandleTypeDef* sdramHandle){ + /* USER CODE BEGIN SDRAM_MspDeInit 0 */ + + /* USER CODE END SDRAM_MspDeInit 0 */ + HAL_FMC_MspDeInit(); + /* USER CODE BEGIN SDRAM_MspDeInit 1 */ + + /* USER CODE END SDRAM_MspDeInit 1 */ +} +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/freertos.c b/src/third_party/stm32_cubeide/Core/Src/freertos.c new file mode 100644 index 000000000..d9e597634 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/freertos.c @@ -0,0 +1,145 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : freertos.c + * Description : Code for freertos applications + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "FreeRTOS.h" +#include "task.h" +#include "main.h" +#include "cmsis_os.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ + +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN Variables */ + +/* USER CODE END Variables */ +osThreadId defaultTaskHandle; + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN FunctionPrototypes */ + +/* USER CODE END FunctionPrototypes */ + +void StartDefaultTask(void const * argument); + +extern void MX_LWIP_Init(void); +extern void MX_USB_DEVICE_Init(void); +void MX_FREERTOS_Init(void); /* (MISRA C 2004 rule 8.1) */ + +/* GetIdleTaskMemory prototype (linked to static allocation support) */ +void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); + +/* USER CODE BEGIN GET_IDLE_TASK_MEMORY */ +static StaticTask_t xIdleTaskTCBBuffer; +static StackType_t xIdleStack[configMINIMAL_STACK_SIZE]; + +void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ) +{ + *ppxIdleTaskTCBBuffer = &xIdleTaskTCBBuffer; + *ppxIdleTaskStackBuffer = &xIdleStack[0]; + *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE; + /* place for user code */ +} +/* USER CODE END GET_IDLE_TASK_MEMORY */ + +/** + * @brief FreeRTOS initialization + * @param None + * @retval None + */ +void MX_FREERTOS_Init(void) { + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* USER CODE BEGIN RTOS_MUTEX */ + /* add mutexes, ... */ + /* USER CODE END RTOS_MUTEX */ + + /* USER CODE BEGIN RTOS_SEMAPHORES */ + /* add semaphores, ... */ + /* USER CODE END RTOS_SEMAPHORES */ + + /* USER CODE BEGIN RTOS_TIMERS */ + /* start timers, add new ones, ... */ + /* USER CODE END RTOS_TIMERS */ + + /* USER CODE BEGIN RTOS_QUEUES */ + /* add queues, ... */ + /* USER CODE END RTOS_QUEUES */ + + /* Create the thread(s) */ + /* definition and creation of defaultTask */ + osThreadDef(defaultTask, StartDefaultTask, osPriorityNormal, 0, 128); + defaultTaskHandle = osThreadCreate(osThread(defaultTask), NULL); + + /* USER CODE BEGIN RTOS_THREADS */ + /* add threads, ... */ + /* USER CODE END RTOS_THREADS */ + +} + +/* USER CODE BEGIN Header_StartDefaultTask */ +/** + * @brief Function implementing the defaultTask thread. + * @param argument: Not used + * @retval None + */ +/* USER CODE END Header_StartDefaultTask */ +void StartDefaultTask(void const * argument) +{ + /* init code for LWIP */ + MX_LWIP_Init(); + + /* init code for USB_DEVICE */ + MX_USB_DEVICE_Init(); + /* USER CODE BEGIN StartDefaultTask */ + /* Infinite loop */ + for(;;) + { + osDelay(1); + } + /* USER CODE END StartDefaultTask */ +} + +/* Private application code --------------------------------------------------*/ +/* USER CODE BEGIN Application */ + +/* USER CODE END Application */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/gpio.c b/src/third_party/stm32_cubeide/Core/Src/gpio.c new file mode 100644 index 000000000..ff604afb4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/gpio.c @@ -0,0 +1,196 @@ +/** + ****************************************************************************** + * File Name : gpio.c + * Description : This file provides code for the configuration + * of all used GPIO pins. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "gpio.h" +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/*----------------------------------------------------------------------------*/ +/* Configure GPIO */ +/*----------------------------------------------------------------------------*/ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/** Configure pins as + * Analog + * Input + * Output + * EVENT_OUT + * EXTI + * Free pins are configured automatically as Analog (this feature is enabled through + * the Code Generation settings) + PC9 ------> RCC_MCO_2 +*/ +void MX_GPIO_Init(void) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + + /* GPIO Ports Clock Enable */ + __HAL_RCC_GPIOE_CLK_ENABLE(); + __HAL_RCC_GPIOI_CLK_ENABLE(); + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOF_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOE, SPI4_CSA_Pin|SPI4_CSB_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOD, SPI2_CSB_Pin|SPI2_CSA_Pin|OE_SYNC_Pin|USB_OTG_FS_PSO_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOG, PWR_DIRECT_Pin|PWR_SSTART_Pin|SPI5_CSB_Pin|SPI5_CSA_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pins : PEPin PEPin */ + GPIO_InitStruct.Pin = SPI4_CSA_Pin|SPI4_CSB_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + HAL_GPIO_Init(GPIOE, &GPIO_InitStruct); + + /*Configure GPIO pin : PI8 */ + GPIO_InitStruct.Pin = GPIO_PIN_8; + GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOI, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = DIN2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(DIN2_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = NFAULT_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(NFAULT_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : PDPin PDPin */ + GPIO_InitStruct.Pin = SPI2_CSB_Pin|SPI2_CSA_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pins : PDPin PDPin */ + GPIO_InitStruct.Pin = OE_SYNC_Pin|USB_OTG_FS_PSO_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pins : PGPin PGPin */ + GPIO_InitStruct.Pin = PWR_DIRECT_Pin|PWR_SSTART_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_PULLDOWN; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = MCLK_25_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCLK_25_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : PAPin PAPin */ + GPIO_InitStruct.Pin = SPI2_IRQ_Pin|SPI5_IRQ_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_RISING; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + /*Configure GPIO pin : PA10 */ + GPIO_InitStruct.Pin = GPIO_PIN_10; + GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + /*Configure GPIO pin : PC10 */ + GPIO_InitStruct.Pin = GPIO_PIN_10; + GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = SD_DETECT_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_FALLING; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(SD_DETECT_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : PDPin PDPin */ + GPIO_InitStruct.Pin = USB_OTG_FS_OC_Pin|IRQ_TOUCH_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pin : PD6 */ + GPIO_InitStruct.Pin = GPIO_PIN_6; + GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = USER_SW_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(USER_SW_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : PGPin PGPin */ + GPIO_InitStruct.Pin = SPI5_CSB_Pin|SPI5_CSA_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = SPI4_IRQ_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_RISING; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(SPI4_IRQ_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : PtPin */ + GPIO_InitStruct.Pin = ENC_SW_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_PULLUP; + HAL_GPIO_Init(ENC_SW_GPIO_Port, &GPIO_InitStruct); + + /* EXTI interrupt init*/ + HAL_NVIC_SetPriority(EXTI9_5_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(EXTI9_5_IRQn); + + HAL_NVIC_SetPriority(EXTI15_10_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(EXTI15_10_IRQn); + +} + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/i2c.c b/src/third_party/stm32_cubeide/Core/Src/i2c.c new file mode 100644 index 000000000..e94fd13ae --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/i2c.c @@ -0,0 +1,118 @@ +/** + ****************************************************************************** + * File Name : I2C.c + * Description : This file provides code for the configuration + * of the I2C instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "i2c.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +I2C_HandleTypeDef hi2c1; + +/* I2C1 init function */ +void MX_I2C1_Init(void) +{ + + hi2c1.Instance = I2C1; + hi2c1.Init.Timing = 0x6000030D; + hi2c1.Init.OwnAddress1 = 0; + hi2c1.Init.AddressingMode = I2C_ADDRESSINGMODE_7BIT; + hi2c1.Init.DualAddressMode = I2C_DUALADDRESS_DISABLE; + hi2c1.Init.OwnAddress2 = 0; + hi2c1.Init.OwnAddress2Masks = I2C_OA2_NOMASK; + hi2c1.Init.GeneralCallMode = I2C_GENERALCALL_DISABLE; + hi2c1.Init.NoStretchMode = I2C_NOSTRETCH_DISABLE; + if (HAL_I2C_Init(&hi2c1) != HAL_OK) + { + Error_Handler(); + } + /** Configure Analogue filter + */ + if (HAL_I2CEx_ConfigAnalogFilter(&hi2c1, I2C_ANALOGFILTER_ENABLE) != HAL_OK) + { + Error_Handler(); + } + /** Configure Digital filter + */ + if (HAL_I2CEx_ConfigDigitalFilter(&hi2c1, 0) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_I2C_MspInit(I2C_HandleTypeDef* i2cHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(i2cHandle->Instance==I2C1) + { + /* USER CODE BEGIN I2C1_MspInit 0 */ + + /* USER CODE END I2C1_MspInit 0 */ + + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**I2C1 GPIO Configuration + PB6 ------> I2C1_SCL + PB7 ------> I2C1_SDA + */ + GPIO_InitStruct.Pin = GPIO_PIN_6|GPIO_PIN_7; + GPIO_InitStruct.Mode = GPIO_MODE_AF_OD; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF4_I2C1; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /* I2C1 clock enable */ + __HAL_RCC_I2C1_CLK_ENABLE(); + /* USER CODE BEGIN I2C1_MspInit 1 */ + + /* USER CODE END I2C1_MspInit 1 */ + } +} + +void HAL_I2C_MspDeInit(I2C_HandleTypeDef* i2cHandle) +{ + + if(i2cHandle->Instance==I2C1) + { + /* USER CODE BEGIN I2C1_MspDeInit 0 */ + + /* USER CODE END I2C1_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_I2C1_CLK_DISABLE(); + + /**I2C1 GPIO Configuration + PB6 ------> I2C1_SCL + PB7 ------> I2C1_SDA + */ + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_6|GPIO_PIN_7); + + /* USER CODE BEGIN I2C1_MspDeInit 1 */ + + /* USER CODE END I2C1_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/iwdg.c b/src/third_party/stm32_cubeide/Core/Src/iwdg.c new file mode 100644 index 000000000..a83d0ca18 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/iwdg.c @@ -0,0 +1,48 @@ +/** + ****************************************************************************** + * File Name : IWDG.c + * Description : This file provides code for the configuration + * of the IWDG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "iwdg.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +IWDG_HandleTypeDef hiwdg; + +/* IWDG init function */ +void MX_IWDG_Init(void) +{ + + hiwdg.Instance = IWDG; + hiwdg.Init.Prescaler = IWDG_PRESCALER_16; + hiwdg.Init.Window = 4095; + hiwdg.Init.Reload = 4095; + if (HAL_IWDG_Init(&hiwdg) != HAL_OK) + { + Error_Handler(); + } + +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/jpeg.c b/src/third_party/stm32_cubeide/Core/Src/jpeg.c new file mode 100644 index 000000000..6df3b17e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/jpeg.c @@ -0,0 +1,77 @@ +/** + ****************************************************************************** + * File Name : JPEG.c + * Description : This file provides code for the configuration + * of the JPEG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "jpeg.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +JPEG_HandleTypeDef hjpeg; + +/* JPEG init function */ +void MX_JPEG_Init(void) +{ + + hjpeg.Instance = JPEG; + if (HAL_JPEG_Init(&hjpeg) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_JPEG_MspInit(JPEG_HandleTypeDef* jpegHandle) +{ + + if(jpegHandle->Instance==JPEG) + { + /* USER CODE BEGIN JPEG_MspInit 0 */ + + /* USER CODE END JPEG_MspInit 0 */ + /* JPEG clock enable */ + __HAL_RCC_JPEG_CLK_ENABLE(); + /* USER CODE BEGIN JPEG_MspInit 1 */ + + /* USER CODE END JPEG_MspInit 1 */ + } +} + +void HAL_JPEG_MspDeInit(JPEG_HandleTypeDef* jpegHandle) +{ + + if(jpegHandle->Instance==JPEG) + { + /* USER CODE BEGIN JPEG_MspDeInit 0 */ + + /* USER CODE END JPEG_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_JPEG_CLK_DISABLE(); + /* USER CODE BEGIN JPEG_MspDeInit 1 */ + + /* USER CODE END JPEG_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/ltdc.c b/src/third_party/stm32_cubeide/Core/Src/ltdc.c new file mode 100644 index 000000000..545240677 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/ltdc.c @@ -0,0 +1,256 @@ +/** + ****************************************************************************** + * File Name : LTDC.c + * Description : This file provides code for the configuration + * of the LTDC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "ltdc.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +LTDC_HandleTypeDef hltdc; + +/* LTDC init function */ +void MX_LTDC_Init(void) +{ + LTDC_LayerCfgTypeDef pLayerCfg = {0}; + + hltdc.Instance = LTDC; + hltdc.Init.HSPolarity = LTDC_HSPOLARITY_AH; + hltdc.Init.VSPolarity = LTDC_VSPOLARITY_AL; + hltdc.Init.DEPolarity = LTDC_DEPOLARITY_AL; + hltdc.Init.PCPolarity = LTDC_PCPOLARITY_IPC; + hltdc.Init.HorizontalSync = 50; + hltdc.Init.VerticalSync = 19; + hltdc.Init.AccumulatedHBP = 93; + hltdc.Init.AccumulatedVBP = 31; + hltdc.Init.AccumulatedActiveW = 573; + hltdc.Init.AccumulatedActiveH = 303; + hltdc.Init.TotalWidth = 581; + hltdc.Init.TotalHeigh = 311; + hltdc.Init.Backcolor.Blue = 0; + hltdc.Init.Backcolor.Green = 0; + hltdc.Init.Backcolor.Red = 0; + if (HAL_LTDC_Init(&hltdc) != HAL_OK) + { + Error_Handler(); + } + pLayerCfg.WindowX0 = 0; + pLayerCfg.WindowX1 = 480; + pLayerCfg.WindowY0 = 0; + pLayerCfg.WindowY1 = 272; + pLayerCfg.PixelFormat = LTDC_PIXEL_FORMAT_RGB565; + pLayerCfg.Alpha = 255; + pLayerCfg.Alpha0 = 255; + pLayerCfg.BlendingFactor1 = LTDC_BLENDING_FACTOR1_CA; + pLayerCfg.BlendingFactor2 = LTDC_BLENDING_FACTOR2_CA; + pLayerCfg.FBStartAdress = 0; + pLayerCfg.ImageWidth = 480; + pLayerCfg.ImageHeight = 272; + pLayerCfg.Backcolor.Blue = 0; + pLayerCfg.Backcolor.Green = 0; + pLayerCfg.Backcolor.Red = 0; + if (HAL_LTDC_ConfigLayer(&hltdc, &pLayerCfg, 0) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_LTDC_MspInit(LTDC_HandleTypeDef* ltdcHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(ltdcHandle->Instance==LTDC) + { + /* USER CODE BEGIN LTDC_MspInit 0 */ + + /* USER CODE END LTDC_MspInit 0 */ + /* LTDC clock enable */ + __HAL_RCC_LTDC_CLK_ENABLE(); + + __HAL_RCC_GPIOI_CLK_ENABLE(); + __HAL_RCC_GPIOF_CLK_ENABLE(); + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**LTDC GPIO Configuration + PI9 ------> LTDC_VSYNC + PI10 ------> LTDC_HSYNC + PI11 ------> LTDC_G6 + PF10 ------> LTDC_DE + PC0 ------> LTDC_R5 + PH4 ------> LTDC_G4 + PA5 ------> LTDC_R4 + PA6 ------> LTDC_G2 + PB0 ------> LTDC_R3 + PB1 ------> LTDC_R6 + PG6 ------> LTDC_R7 + PG7 ------> LTDC_CLK + PI0 ------> LTDC_G5 + PD3 ------> LTDC_G7 + PG10 ------> LTDC_G3 + PG11 ------> LTDC_B3 + PG12 ------> LTDC_B4 + PI5 ------> LTDC_B5 + PI6 ------> LTDC_B6 + PI7 ------> LTDC_B7 + */ + GPIO_InitStruct.Pin = GPIO_PIN_9|GPIO_PIN_10|GPIO_PIN_0|GPIO_PIN_5 + |GPIO_PIN_6|GPIO_PIN_7; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOI, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_11; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF9_LTDC; + HAL_GPIO_Init(GPIOI, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_10; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOF, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_0; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_4; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOH, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_5|GPIO_PIN_6; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_0|GPIO_PIN_1; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF9_LTDC; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_6|GPIO_PIN_7|GPIO_PIN_11; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_3; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF14_LTDC; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_10|GPIO_PIN_12; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF9_LTDC; + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + /* USER CODE BEGIN LTDC_MspInit 1 */ + + /* USER CODE END LTDC_MspInit 1 */ + } +} + +void HAL_LTDC_MspDeInit(LTDC_HandleTypeDef* ltdcHandle) +{ + + if(ltdcHandle->Instance==LTDC) + { + /* USER CODE BEGIN LTDC_MspDeInit 0 */ + + /* USER CODE END LTDC_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_LTDC_CLK_DISABLE(); + + /**LTDC GPIO Configuration + PI9 ------> LTDC_VSYNC + PI10 ------> LTDC_HSYNC + PI11 ------> LTDC_G6 + PF10 ------> LTDC_DE + PC0 ------> LTDC_R5 + PH4 ------> LTDC_G4 + PA5 ------> LTDC_R4 + PA6 ------> LTDC_G2 + PB0 ------> LTDC_R3 + PB1 ------> LTDC_R6 + PG6 ------> LTDC_R7 + PG7 ------> LTDC_CLK + PI0 ------> LTDC_G5 + PD3 ------> LTDC_G7 + PG10 ------> LTDC_G3 + PG11 ------> LTDC_B3 + PG12 ------> LTDC_B4 + PI5 ------> LTDC_B5 + PI6 ------> LTDC_B6 + PI7 ------> LTDC_B7 + */ + HAL_GPIO_DeInit(GPIOI, GPIO_PIN_9|GPIO_PIN_10|GPIO_PIN_11|GPIO_PIN_0 + |GPIO_PIN_5|GPIO_PIN_6|GPIO_PIN_7); + + HAL_GPIO_DeInit(GPIOF, GPIO_PIN_10); + + HAL_GPIO_DeInit(GPIOC, GPIO_PIN_0); + + HAL_GPIO_DeInit(GPIOH, GPIO_PIN_4); + + HAL_GPIO_DeInit(GPIOA, GPIO_PIN_5|GPIO_PIN_6); + + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_0|GPIO_PIN_1); + + HAL_GPIO_DeInit(GPIOG, GPIO_PIN_6|GPIO_PIN_7|GPIO_PIN_10|GPIO_PIN_11 + |GPIO_PIN_12); + + HAL_GPIO_DeInit(GPIOD, GPIO_PIN_3); + + /* USER CODE BEGIN LTDC_MspDeInit 1 */ + + /* USER CODE END LTDC_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/main.c b/src/third_party/stm32_cubeide/Core/Src/main.c new file mode 100644 index 000000000..f19e148d3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/main.c @@ -0,0 +1,225 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "cmsis_os.h" +#include "adc.h" +#include "crc.h" +#include "dac.h" +#include "dma.h" +#include "dma2d.h" +#include "fatfs.h" +#include "i2c.h" +#include "iwdg.h" +#include "jpeg.h" +#include "libjpeg.h" +#include "ltdc.h" +#include "lwip.h" +#include "rng.h" +#include "rtc.h" +#include "sdmmc.h" +#include "spi.h" +#include "tim.h" +#include "usart.h" +#include "usb_device.h" +#include "gpio.h" +#include "fmc.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +void MX_FREERTOS_Init(void); +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ +extern void debug_trace(const char *str, size_t len); + +int __io_putstr(char *ptr, int len) { + debug_trace(ptr, (size_t)len); + return 0; +} +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0}; + + /** Configure LSE Drive Capability + */ + HAL_PWR_EnableBkUpAccess(); + __HAL_RCC_LSEDRIVE_CONFIG(RCC_LSEDRIVE_LOW); + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1); + /** Initializes the CPU, AHB and APB busses clocks + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_LSI|RCC_OSCILLATORTYPE_HSE + |RCC_OSCILLATORTYPE_LSE; + RCC_OscInitStruct.HSEState = RCC_HSE_ON; + RCC_OscInitStruct.LSEState = RCC_LSE_ON; + RCC_OscInitStruct.LSIState = RCC_LSI_ON; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; + RCC_OscInitStruct.PLL.PLLM = 25; + RCC_OscInitStruct.PLL.PLLN = 432; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 9; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + Error_Handler(); + } + /** Activate the Over-Drive mode + */ + if (HAL_PWREx_EnableOverDrive() != HAL_OK) + { + Error_Handler(); + } + /** Initializes the CPU, AHB and APB busses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK + |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV4; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV4; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_7) != HAL_OK) + { + Error_Handler(); + } + PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_LTDC|RCC_PERIPHCLK_RTC + |RCC_PERIPHCLK_UART7|RCC_PERIPHCLK_I2C1 + |RCC_PERIPHCLK_SDMMC1|RCC_PERIPHCLK_CLK48; + PeriphClkInitStruct.PLLSAI.PLLSAIN = 200; + PeriphClkInitStruct.PLLSAI.PLLSAIR = 5; + PeriphClkInitStruct.PLLSAI.PLLSAIQ = 2; + PeriphClkInitStruct.PLLSAI.PLLSAIP = RCC_PLLSAIP_DIV2; + PeriphClkInitStruct.PLLSAIDivQ = 1; + PeriphClkInitStruct.PLLSAIDivR = RCC_PLLSAIDIVR_4; + PeriphClkInitStruct.RTCClockSelection = RCC_RTCCLKSOURCE_LSE; + PeriphClkInitStruct.Uart7ClockSelection = RCC_UART7CLKSOURCE_PCLK1; + PeriphClkInitStruct.I2c1ClockSelection = RCC_I2C1CLKSOURCE_PCLK1; + PeriphClkInitStruct.Clk48ClockSelection = RCC_CLK48SOURCE_PLL; + PeriphClkInitStruct.Sdmmc1ClockSelection = RCC_SDMMC1CLKSOURCE_CLK48; + if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK) + { + Error_Handler(); + } + HAL_RCC_MCOConfig(RCC_MCO2, RCC_MCO2SOURCE_HSE, RCC_MCODIV_1); + /** Enables the Clock Security System + */ + HAL_RCC_EnableCSS(); +} + +/* USER CODE BEGIN 4 */ + +/* USER CODE END 4 */ + + /** + * @brief Period elapsed callback in non blocking mode + * @note This function is called when TIM10 interrupt took place, inside + * HAL_TIM_IRQHandler(). It makes a direct call to HAL_IncTick() to increment + * a global variable "uwTick" used as application time base. + * @param htim : TIM handle + * @retval None + */ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* USER CODE BEGIN Callback 0 */ + + /* USER CODE END Callback 0 */ + if (htim->Instance == TIM10) { + HAL_IncTick(); + } + /* USER CODE BEGIN Callback 1 */ + + /* USER CODE END Callback 1 */ +} + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) +{ + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + NVIC_SystemReset(); + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) +{ + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + tex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/rng.c b/src/third_party/stm32_cubeide/Core/Src/rng.c new file mode 100644 index 000000000..5d23d554f --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/rng.c @@ -0,0 +1,77 @@ +/** + ****************************************************************************** + * File Name : RNG.c + * Description : This file provides code for the configuration + * of the RNG instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "rng.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +RNG_HandleTypeDef hrng; + +/* RNG init function */ +void MX_RNG_Init(void) +{ + + hrng.Instance = RNG; + if (HAL_RNG_Init(&hrng) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_RNG_MspInit(RNG_HandleTypeDef* rngHandle) +{ + + if(rngHandle->Instance==RNG) + { + /* USER CODE BEGIN RNG_MspInit 0 */ + + /* USER CODE END RNG_MspInit 0 */ + /* RNG clock enable */ + __HAL_RCC_RNG_CLK_ENABLE(); + /* USER CODE BEGIN RNG_MspInit 1 */ + + /* USER CODE END RNG_MspInit 1 */ + } +} + +void HAL_RNG_MspDeInit(RNG_HandleTypeDef* rngHandle) +{ + + if(rngHandle->Instance==RNG) + { + /* USER CODE BEGIN RNG_MspDeInit 0 */ + + /* USER CODE END RNG_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_RNG_CLK_DISABLE(); + /* USER CODE BEGIN RNG_MspDeInit 1 */ + + /* USER CODE END RNG_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/rtc.c b/src/third_party/stm32_cubeide/Core/Src/rtc.c new file mode 100644 index 000000000..121c125a2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/rtc.c @@ -0,0 +1,87 @@ +/** + ****************************************************************************** + * File Name : RTC.c + * Description : This file provides code for the configuration + * of the RTC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "rtc.h" + +/* USER CODE BEGIN 0 */ + +// hrtc.Init.AsynchPrediv = 124; // 25 000 000 / (25 * (124 + 1) * (7999 + 1)) = 1 + +/* USER CODE END 0 */ + +RTC_HandleTypeDef hrtc; + +/* RTC init function */ +void MX_RTC_Init(void) +{ + + /** Initialize RTC Only + */ + hrtc.Instance = RTC; + hrtc.Init.HourFormat = RTC_HOURFORMAT_24; + hrtc.Init.AsynchPrediv = 127; + hrtc.Init.SynchPrediv = 255; + hrtc.Init.OutPut = RTC_OUTPUT_DISABLE; + hrtc.Init.OutPutPolarity = RTC_OUTPUT_POLARITY_HIGH; + hrtc.Init.OutPutType = RTC_OUTPUT_TYPE_OPENDRAIN; + if (HAL_RTC_Init(&hrtc) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_RTC_MspInit(RTC_HandleTypeDef* rtcHandle) +{ + + if(rtcHandle->Instance==RTC) + { + /* USER CODE BEGIN RTC_MspInit 0 */ + + /* USER CODE END RTC_MspInit 0 */ + /* RTC clock enable */ + __HAL_RCC_RTC_ENABLE(); + /* USER CODE BEGIN RTC_MspInit 1 */ + + /* USER CODE END RTC_MspInit 1 */ + } +} + +void HAL_RTC_MspDeInit(RTC_HandleTypeDef* rtcHandle) +{ + + if(rtcHandle->Instance==RTC) + { + /* USER CODE BEGIN RTC_MspDeInit 0 */ + + /* USER CODE END RTC_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_RTC_DISABLE(); + /* USER CODE BEGIN RTC_MspDeInit 1 */ + + /* USER CODE END RTC_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/sdmmc.c b/src/third_party/stm32_cubeide/Core/Src/sdmmc.c new file mode 100644 index 000000000..6291043af --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/sdmmc.c @@ -0,0 +1,167 @@ +/** + ****************************************************************************** + * File Name : SDMMC.c + * Description : This file provides code for the configuration + * of the SDMMC instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "sdmmc.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +SD_HandleTypeDef hsd1; +DMA_HandleTypeDef hdma_sdmmc1_rx; +DMA_HandleTypeDef hdma_sdmmc1_tx; + +/* SDMMC1 init function */ + +void MX_SDMMC1_SD_Init(void) +{ + + hsd1.Instance = SDMMC1; + hsd1.Init.ClockEdge = SDMMC_CLOCK_EDGE_RISING; + hsd1.Init.ClockBypass = SDMMC_CLOCK_BYPASS_DISABLE; + hsd1.Init.ClockPowerSave = SDMMC_CLOCK_POWER_SAVE_DISABLE; + hsd1.Init.BusWide = SDMMC_BUS_WIDE_1B; + hsd1.Init.HardwareFlowControl = SDMMC_HARDWARE_FLOW_CONTROL_ENABLE; + hsd1.Init.ClockDiv = 0; + +} + +void HAL_SD_MspInit(SD_HandleTypeDef* sdHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(sdHandle->Instance==SDMMC1) + { + /* USER CODE BEGIN SDMMC1_MspInit 0 */ + + /* USER CODE END SDMMC1_MspInit 0 */ + /* SDMMC1 clock enable */ + __HAL_RCC_SDMMC1_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**SDMMC1 GPIO Configuration + PC8 ------> SDMMC1_D0 + PC12 ------> SDMMC1_CK + PD2 ------> SDMMC1_CMD + */ + GPIO_InitStruct.Pin = GPIO_PIN_8|GPIO_PIN_12; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_SDMMC1; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_2; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_SDMMC1; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /* SDMMC1 DMA Init */ + /* SDMMC1_RX Init */ + hdma_sdmmc1_rx.Instance = DMA2_Stream3; + hdma_sdmmc1_rx.Init.Channel = DMA_CHANNEL_4; + hdma_sdmmc1_rx.Init.Direction = DMA_PERIPH_TO_MEMORY; + hdma_sdmmc1_rx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_sdmmc1_rx.Init.MemInc = DMA_MINC_ENABLE; + hdma_sdmmc1_rx.Init.PeriphDataAlignment = DMA_PDATAALIGN_WORD; + hdma_sdmmc1_rx.Init.MemDataAlignment = DMA_MDATAALIGN_WORD; + hdma_sdmmc1_rx.Init.Mode = DMA_PFCTRL; + hdma_sdmmc1_rx.Init.Priority = DMA_PRIORITY_LOW; + hdma_sdmmc1_rx.Init.FIFOMode = DMA_FIFOMODE_ENABLE; + hdma_sdmmc1_rx.Init.FIFOThreshold = DMA_FIFO_THRESHOLD_FULL; + hdma_sdmmc1_rx.Init.MemBurst = DMA_MBURST_INC4; + hdma_sdmmc1_rx.Init.PeriphBurst = DMA_PBURST_INC4; + if (HAL_DMA_Init(&hdma_sdmmc1_rx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(sdHandle,hdmarx,hdma_sdmmc1_rx); + + /* SDMMC1_TX Init */ + hdma_sdmmc1_tx.Instance = DMA2_Stream6; + hdma_sdmmc1_tx.Init.Channel = DMA_CHANNEL_4; + hdma_sdmmc1_tx.Init.Direction = DMA_MEMORY_TO_PERIPH; + hdma_sdmmc1_tx.Init.PeriphInc = DMA_PINC_DISABLE; + hdma_sdmmc1_tx.Init.MemInc = DMA_MINC_ENABLE; + hdma_sdmmc1_tx.Init.PeriphDataAlignment = DMA_PDATAALIGN_WORD; + hdma_sdmmc1_tx.Init.MemDataAlignment = DMA_MDATAALIGN_WORD; + hdma_sdmmc1_tx.Init.Mode = DMA_PFCTRL; + hdma_sdmmc1_tx.Init.Priority = DMA_PRIORITY_LOW; + hdma_sdmmc1_tx.Init.FIFOMode = DMA_FIFOMODE_ENABLE; + hdma_sdmmc1_tx.Init.FIFOThreshold = DMA_FIFO_THRESHOLD_FULL; + hdma_sdmmc1_tx.Init.MemBurst = DMA_MBURST_INC4; + hdma_sdmmc1_tx.Init.PeriphBurst = DMA_PBURST_INC4; + if (HAL_DMA_Init(&hdma_sdmmc1_tx) != HAL_OK) + { + Error_Handler(); + } + + __HAL_LINKDMA(sdHandle,hdmatx,hdma_sdmmc1_tx); + + /* SDMMC1 interrupt Init */ + HAL_NVIC_SetPriority(SDMMC1_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(SDMMC1_IRQn); + /* USER CODE BEGIN SDMMC1_MspInit 1 */ + + /* USER CODE END SDMMC1_MspInit 1 */ + } +} + +void HAL_SD_MspDeInit(SD_HandleTypeDef* sdHandle) +{ + + if(sdHandle->Instance==SDMMC1) + { + /* USER CODE BEGIN SDMMC1_MspDeInit 0 */ + + /* USER CODE END SDMMC1_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_SDMMC1_CLK_DISABLE(); + + /**SDMMC1 GPIO Configuration + PC8 ------> SDMMC1_D0 + PC12 ------> SDMMC1_CK + PD2 ------> SDMMC1_CMD + */ + HAL_GPIO_DeInit(GPIOC, GPIO_PIN_8|GPIO_PIN_12); + + HAL_GPIO_DeInit(GPIOD, GPIO_PIN_2); + + /* SDMMC1 DMA DeInit */ + HAL_DMA_DeInit(sdHandle->hdmarx); + HAL_DMA_DeInit(sdHandle->hdmatx); + + /* SDMMC1 interrupt Deinit */ + HAL_NVIC_DisableIRQ(SDMMC1_IRQn); + /* USER CODE BEGIN SDMMC1_MspDeInit 1 */ + + /* USER CODE END SDMMC1_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/spi.c b/src/third_party/stm32_cubeide/Core/Src/spi.c new file mode 100644 index 000000000..27590bdd2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/spi.c @@ -0,0 +1,261 @@ +/** + ****************************************************************************** + * File Name : SPI.c + * Description : This file provides code for the configuration + * of the SPI instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "spi.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +SPI_HandleTypeDef hspi2; +SPI_HandleTypeDef hspi4; +SPI_HandleTypeDef hspi5; + +/* SPI2 init function */ +void MX_SPI2_Init(void) +{ + + hspi2.Instance = SPI2; + hspi2.Init.Mode = SPI_MODE_MASTER; + hspi2.Init.Direction = SPI_DIRECTION_2LINES; + hspi2.Init.DataSize = SPI_DATASIZE_8BIT; + hspi2.Init.CLKPolarity = SPI_POLARITY_LOW; + hspi2.Init.CLKPhase = SPI_PHASE_1EDGE; + hspi2.Init.NSS = SPI_NSS_SOFT; + hspi2.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_16; + hspi2.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi2.Init.TIMode = SPI_TIMODE_DISABLE; + hspi2.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi2.Init.CRCPolynomial = 7; + hspi2.Init.CRCLength = SPI_CRC_LENGTH_DATASIZE; + hspi2.Init.NSSPMode = SPI_NSS_PULSE_ENABLE; + if (HAL_SPI_Init(&hspi2) != HAL_OK) + { + Error_Handler(); + } + +} +/* SPI4 init function */ +void MX_SPI4_Init(void) +{ + + hspi4.Instance = SPI4; + hspi4.Init.Mode = SPI_MODE_MASTER; + hspi4.Init.Direction = SPI_DIRECTION_2LINES; + hspi4.Init.DataSize = SPI_DATASIZE_8BIT; + hspi4.Init.CLKPolarity = SPI_POLARITY_LOW; + hspi4.Init.CLKPhase = SPI_PHASE_1EDGE; + hspi4.Init.NSS = SPI_NSS_SOFT; + hspi4.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_16; + hspi4.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi4.Init.TIMode = SPI_TIMODE_DISABLE; + hspi4.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi4.Init.CRCPolynomial = 7; + hspi4.Init.CRCLength = SPI_CRC_LENGTH_DATASIZE; + hspi4.Init.NSSPMode = SPI_NSS_PULSE_ENABLE; + if (HAL_SPI_Init(&hspi4) != HAL_OK) + { + Error_Handler(); + } + +} +/* SPI5 init function */ +void MX_SPI5_Init(void) +{ + + hspi5.Instance = SPI5; + hspi5.Init.Mode = SPI_MODE_MASTER; + hspi5.Init.Direction = SPI_DIRECTION_2LINES; + hspi5.Init.DataSize = SPI_DATASIZE_8BIT; + hspi5.Init.CLKPolarity = SPI_POLARITY_LOW; + hspi5.Init.CLKPhase = SPI_PHASE_1EDGE; + hspi5.Init.NSS = SPI_NSS_SOFT; + hspi5.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_16; + hspi5.Init.FirstBit = SPI_FIRSTBIT_MSB; + hspi5.Init.TIMode = SPI_TIMODE_DISABLE; + hspi5.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + hspi5.Init.CRCPolynomial = 7; + hspi5.Init.CRCLength = SPI_CRC_LENGTH_DATASIZE; + hspi5.Init.NSSPMode = SPI_NSS_PULSE_ENABLE; + if (HAL_SPI_Init(&hspi5) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_SPI_MspInit(SPI_HandleTypeDef* spiHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(spiHandle->Instance==SPI2) + { + /* USER CODE BEGIN SPI2_MspInit 0 */ + + /* USER CODE END SPI2_MspInit 0 */ + /* SPI2 clock enable */ + __HAL_RCC_SPI2_CLK_ENABLE(); + + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOI_CLK_ENABLE(); + /**SPI2 GPIO Configuration + PB14 ------> SPI2_MISO + PI1 ------> SPI2_SCK + PI3 ------> SPI2_MOSI + */ + GPIO_InitStruct.Pin = GPIO_PIN_14; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF5_SPI2; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_1|GPIO_PIN_3; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF5_SPI2; + HAL_GPIO_Init(GPIOI, &GPIO_InitStruct); + + /* USER CODE BEGIN SPI2_MspInit 1 */ + + /* USER CODE END SPI2_MspInit 1 */ + } + else if(spiHandle->Instance==SPI4) + { + /* USER CODE BEGIN SPI4_MspInit 0 */ + + /* USER CODE END SPI4_MspInit 0 */ + /* SPI4 clock enable */ + __HAL_RCC_SPI4_CLK_ENABLE(); + + __HAL_RCC_GPIOE_CLK_ENABLE(); + /**SPI4 GPIO Configuration + PE2 ------> SPI4_SCK + PE5 ------> SPI4_MISO + PE6 ------> SPI4_MOSI + */ + GPIO_InitStruct.Pin = GPIO_PIN_2|GPIO_PIN_5|GPIO_PIN_6; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF5_SPI4; + HAL_GPIO_Init(GPIOE, &GPIO_InitStruct); + + /* USER CODE BEGIN SPI4_MspInit 1 */ + + /* USER CODE END SPI4_MspInit 1 */ + } + else if(spiHandle->Instance==SPI5) + { + /* USER CODE BEGIN SPI5_MspInit 0 */ + + /* USER CODE END SPI5_MspInit 0 */ + /* SPI5 clock enable */ + __HAL_RCC_SPI5_CLK_ENABLE(); + + __HAL_RCC_GPIOF_CLK_ENABLE(); + /**SPI5 GPIO Configuration + PF7 ------> SPI5_SCK + PF8 ------> SPI5_MISO + PF9 ------> SPI5_MOSI + */ + GPIO_InitStruct.Pin = GPIO_PIN_7|GPIO_PIN_8|GPIO_PIN_9; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF5_SPI5; + HAL_GPIO_Init(GPIOF, &GPIO_InitStruct); + + /* USER CODE BEGIN SPI5_MspInit 1 */ + + /* USER CODE END SPI5_MspInit 1 */ + } +} + +void HAL_SPI_MspDeInit(SPI_HandleTypeDef* spiHandle) +{ + + if(spiHandle->Instance==SPI2) + { + /* USER CODE BEGIN SPI2_MspDeInit 0 */ + + /* USER CODE END SPI2_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_SPI2_CLK_DISABLE(); + + /**SPI2 GPIO Configuration + PB14 ------> SPI2_MISO + PI1 ------> SPI2_SCK + PI3 ------> SPI2_MOSI + */ + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_14); + + HAL_GPIO_DeInit(GPIOI, GPIO_PIN_1|GPIO_PIN_3); + + /* USER CODE BEGIN SPI2_MspDeInit 1 */ + + /* USER CODE END SPI2_MspDeInit 1 */ + } + else if(spiHandle->Instance==SPI4) + { + /* USER CODE BEGIN SPI4_MspDeInit 0 */ + + /* USER CODE END SPI4_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_SPI4_CLK_DISABLE(); + + /**SPI4 GPIO Configuration + PE2 ------> SPI4_SCK + PE5 ------> SPI4_MISO + PE6 ------> SPI4_MOSI + */ + HAL_GPIO_DeInit(GPIOE, GPIO_PIN_2|GPIO_PIN_5|GPIO_PIN_6); + + /* USER CODE BEGIN SPI4_MspDeInit 1 */ + + /* USER CODE END SPI4_MspDeInit 1 */ + } + else if(spiHandle->Instance==SPI5) + { + /* USER CODE BEGIN SPI5_MspDeInit 0 */ + + /* USER CODE END SPI5_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_SPI5_CLK_DISABLE(); + + /**SPI5 GPIO Configuration + PF7 ------> SPI5_SCK + PF8 ------> SPI5_MISO + PF9 ------> SPI5_MOSI + */ + HAL_GPIO_DeInit(GPIOF, GPIO_PIN_7|GPIO_PIN_8|GPIO_PIN_9); + + /* USER CODE BEGIN SPI5_MspDeInit 1 */ + + /* USER CODE END SPI5_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_msp.c b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_msp.c new file mode 100644 index 000000000..bd322bf8e --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_msp.c @@ -0,0 +1,86 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : stm32f7xx_hal_msp.c + * Description : This file provides code for the MSP Initialization + * and de-Initialization codes. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ + +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN Define */ + +/* USER CODE END Define */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN Macro */ + +/* USER CODE END Macro */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* External functions --------------------------------------------------------*/ +/* USER CODE BEGIN ExternalFunctions */ + +/* USER CODE END ExternalFunctions */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/** + * Initializes the Global MSP. + */ +void HAL_MspInit(void) +{ + /* USER CODE BEGIN MspInit 0 */ + + /* USER CODE END MspInit 0 */ + + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* System interrupt init*/ + /* PendSV_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(PendSV_IRQn, 15, 0); + + /* USER CODE BEGIN MspInit 1 */ + + /* USER CODE END MspInit 1 */ +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_timebase_tim.c b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_timebase_tim.c new file mode 100644 index 000000000..190d55b88 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_hal_timebase_tim.c @@ -0,0 +1,114 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_hal_timebase_TIM.c + * @brief HAL time base based on the hardware TIM. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" +#include "stm32f7xx_hal_tim.h" + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +TIM_HandleTypeDef htim10; +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** + * @brief This function configures the TIM10 as a time base source. + * The time source is configured to have 1ms time base with a dedicated + * Tick interrupt priority. + * @note This function is called automatically at the beginning of program after + * reset by HAL_Init() or at any time when clock is configured, by HAL_RCC_ClockConfig(). + * @param TickPriority: Tick interrupt priority. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) +{ + RCC_ClkInitTypeDef clkconfig; + uint32_t uwTimclock = 0; + uint32_t uwPrescalerValue = 0; + uint32_t pFLatency; + + /*Configure the TIM10 IRQ priority */ + HAL_NVIC_SetPriority(TIM1_UP_TIM10_IRQn, TickPriority ,0); + + /* Enable the TIM10 global Interrupt */ + HAL_NVIC_EnableIRQ(TIM1_UP_TIM10_IRQn); + + /* Enable TIM10 clock */ + __HAL_RCC_TIM10_CLK_ENABLE(); + + /* Get clock configuration */ + HAL_RCC_GetClockConfig(&clkconfig, &pFLatency); + + /* Compute TIM10 clock */ + uwTimclock = 2*HAL_RCC_GetPCLK2Freq(); + + /* Compute the prescaler value to have TIM10 counter clock equal to 1MHz */ + uwPrescalerValue = (uint32_t) ((uwTimclock / 1000000) - 1); + + /* Initialize TIM10 */ + htim10.Instance = TIM10; + + /* Initialize TIMx peripheral as follow: + + Period = [(TIM10CLK/1000) - 1]. to have a (1/1000) s time base. + + Prescaler = (uwTimclock/1000000 - 1) to have a 1MHz counter clock. + + ClockDivision = 0 + + Counter direction = Up + */ + htim10.Init.Period = (1000000 / 1000) - 1; + htim10.Init.Prescaler = uwPrescalerValue; + htim10.Init.ClockDivision = 0; + htim10.Init.CounterMode = TIM_COUNTERMODE_UP; + if(HAL_TIM_Base_Init(&htim10) == HAL_OK) + { + /* Start the TIM time Base generation in interrupt mode */ + return HAL_TIM_Base_Start_IT(&htim10); + } + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief Suspend Tick increment. + * @note Disable the tick increment by disabling TIM10 update interrupt. + * @param None + * @retval None + */ +void HAL_SuspendTick(void) +{ + /* Disable TIM10 update Interrupt */ + __HAL_TIM_DISABLE_IT(&htim10, TIM_IT_UPDATE); +} + +/** + * @brief Resume Tick increment. + * @note Enable the tick increment by Enabling TIM10 update interrupt. + * @param None + * @retval None + */ +void HAL_ResumeTick(void) +{ + /* Enable TIM10 Update interrupt */ + __HAL_TIM_ENABLE_IT(&htim10, TIM_IT_UPDATE); +} + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_it.c b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_it.c new file mode 100644 index 000000000..767fc1899 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/stm32f7xx_it.c @@ -0,0 +1,337 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_it.c + * @brief Interrupt Service Routines. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "stm32f7xx_it.h" +#include "FreeRTOS.h" +#include "task.h" +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ + +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ + +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* External variables --------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; +extern PCD_HandleTypeDef hpcd_USB_OTG_FS; +extern ADC_HandleTypeDef hadc1; +extern DMA_HandleTypeDef hdma_dac1; +extern DMA_HandleTypeDef hdma_sdmmc1_rx; +extern DMA_HandleTypeDef hdma_sdmmc1_tx; +extern SD_HandleTypeDef hsd1; +extern UART_HandleTypeDef huart7; +extern TIM_HandleTypeDef htim10; + +/* USER CODE BEGIN EV */ + +/* USER CODE END EV */ + +/******************************************************************************/ +/* Cortex-M7 Processor Interruption and Exception Handlers */ +/******************************************************************************/ +/** + * @brief This function handles Non maskable interrupt. + */ +void NMI_Handler(void) +{ + /* USER CODE BEGIN NonMaskableInt_IRQn 0 */ + + /* USER CODE END NonMaskableInt_IRQn 0 */ + HAL_RCC_NMI_IRQHandler(); + /* USER CODE BEGIN NonMaskableInt_IRQn 1 */ + + /* USER CODE END NonMaskableInt_IRQn 1 */ +} + +/** + * @brief This function handles Hard fault interrupt. + */ +void HardFault_Handler(void) +{ + /* USER CODE BEGIN HardFault_IRQn 0 */ + NVIC_SystemReset(); + /* USER CODE END HardFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_HardFault_IRQn 0 */ + /* USER CODE END W1_HardFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Memory management fault. + */ +void MemManage_Handler(void) +{ + /* USER CODE BEGIN MemoryManagement_IRQn 0 */ + NVIC_SystemReset(); + /* USER CODE END MemoryManagement_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_MemoryManagement_IRQn 0 */ + /* USER CODE END W1_MemoryManagement_IRQn 0 */ + } +} + +/** + * @brief This function handles Pre-fetch fault, memory access fault. + */ +void BusFault_Handler(void) +{ + /* USER CODE BEGIN BusFault_IRQn 0 */ + NVIC_SystemReset(); + /* USER CODE END BusFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_BusFault_IRQn 0 */ + /* USER CODE END W1_BusFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Undefined instruction or illegal state. + */ +void UsageFault_Handler(void) +{ + /* USER CODE BEGIN UsageFault_IRQn 0 */ + NVIC_SystemReset(); + /* USER CODE END UsageFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_UsageFault_IRQn 0 */ + /* USER CODE END W1_UsageFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Debug monitor. + */ +void DebugMon_Handler(void) +{ + /* USER CODE BEGIN DebugMonitor_IRQn 0 */ + + /* USER CODE END DebugMonitor_IRQn 0 */ + /* USER CODE BEGIN DebugMonitor_IRQn 1 */ + + /* USER CODE END DebugMonitor_IRQn 1 */ +} + +/******************************************************************************/ +/* STM32F7xx Peripheral Interrupt Handlers */ +/* Add here the Interrupt Handlers for the used peripherals. */ +/* For the available peripheral interrupt handler names, */ +/* please refer to the startup file (startup_stm32f7xx.s). */ +/******************************************************************************/ + +/** + * @brief This function handles DMA1 stream5 global interrupt. + */ +void DMA1_Stream5_IRQHandler(void) +{ + /* USER CODE BEGIN DMA1_Stream5_IRQn 0 */ + + /* USER CODE END DMA1_Stream5_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_dac1); + /* USER CODE BEGIN DMA1_Stream5_IRQn 1 */ + + /* USER CODE END DMA1_Stream5_IRQn 1 */ +} + +/** + * @brief This function handles ADC1, ADC2 and ADC3 global interrupts. + */ +void ADC_IRQHandler(void) +{ + /* USER CODE BEGIN ADC_IRQn 0 */ + + /* USER CODE END ADC_IRQn 0 */ + HAL_ADC_IRQHandler(&hadc1); + /* USER CODE BEGIN ADC_IRQn 1 */ + + /* USER CODE END ADC_IRQn 1 */ +} + +/** + * @brief This function handles EXTI line[9:5] interrupts. + */ +void EXTI9_5_IRQHandler(void) +{ + /* USER CODE BEGIN EXTI9_5_IRQn 0 */ + + /* USER CODE END EXTI9_5_IRQn 0 */ + HAL_GPIO_EXTI_IRQHandler(GPIO_PIN_8); + HAL_GPIO_EXTI_IRQHandler(GPIO_PIN_9); + /* USER CODE BEGIN EXTI9_5_IRQn 1 */ + + /* USER CODE END EXTI9_5_IRQn 1 */ +} + +/** + * @brief This function handles TIM1 update interrupt and TIM10 global interrupt. + */ +void TIM1_UP_TIM10_IRQHandler(void) +{ + /* USER CODE BEGIN TIM1_UP_TIM10_IRQn 0 */ + + /* USER CODE END TIM1_UP_TIM10_IRQn 0 */ + HAL_TIM_IRQHandler(&htim10); + /* USER CODE BEGIN TIM1_UP_TIM10_IRQn 1 */ + + /* USER CODE END TIM1_UP_TIM10_IRQn 1 */ +} + +/** + * @brief This function handles EXTI line[15:10] interrupts. + */ +void EXTI15_10_IRQHandler(void) +{ + /* USER CODE BEGIN EXTI15_10_IRQn 0 */ + + /* USER CODE END EXTI15_10_IRQn 0 */ + HAL_GPIO_EXTI_IRQHandler(GPIO_PIN_11); + HAL_GPIO_EXTI_IRQHandler(GPIO_PIN_15); + /* USER CODE BEGIN EXTI15_10_IRQn 1 */ + + /* USER CODE END EXTI15_10_IRQn 1 */ +} + +/** + * @brief This function handles SDMMC1 global interrupt. + */ +void SDMMC1_IRQHandler(void) +{ + /* USER CODE BEGIN SDMMC1_IRQn 0 */ + + /* USER CODE END SDMMC1_IRQn 0 */ + HAL_SD_IRQHandler(&hsd1); + /* USER CODE BEGIN SDMMC1_IRQn 1 */ + + /* USER CODE END SDMMC1_IRQn 1 */ +} + +/** + * @brief This function handles DMA2 stream3 global interrupt. + */ +void DMA2_Stream3_IRQHandler(void) +{ + /* USER CODE BEGIN DMA2_Stream3_IRQn 0 */ + + /* USER CODE END DMA2_Stream3_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_sdmmc1_rx); + /* USER CODE BEGIN DMA2_Stream3_IRQn 1 */ + + /* USER CODE END DMA2_Stream3_IRQn 1 */ +} + +/** + * @brief This function handles Ethernet global interrupt. + */ +void ETH_IRQHandler(void) +{ + /* USER CODE BEGIN ETH_IRQn 0 */ + + /* USER CODE END ETH_IRQn 0 */ + HAL_ETH_IRQHandler(&heth); + /* USER CODE BEGIN ETH_IRQn 1 */ + + /* USER CODE END ETH_IRQn 1 */ +} + +/** + * @brief This function handles USB On The Go FS global interrupt. + */ +void OTG_FS_IRQHandler(void) +{ + /* USER CODE BEGIN OTG_FS_IRQn 0 */ + + /* USER CODE END OTG_FS_IRQn 0 */ + HAL_PCD_IRQHandler(&hpcd_USB_OTG_FS); + /* USER CODE BEGIN OTG_FS_IRQn 1 */ + + /* USER CODE END OTG_FS_IRQn 1 */ +} + +/** + * @brief This function handles DMA2 stream6 global interrupt. + */ +void DMA2_Stream6_IRQHandler(void) +{ + /* USER CODE BEGIN DMA2_Stream6_IRQn 0 */ + + /* USER CODE END DMA2_Stream6_IRQn 0 */ + HAL_DMA_IRQHandler(&hdma_sdmmc1_tx); + /* USER CODE BEGIN DMA2_Stream6_IRQn 1 */ + + /* USER CODE END DMA2_Stream6_IRQn 1 */ +} + +/** + * @brief This function handles UART7 global interrupt. + */ +void UART7_IRQHandler(void) +{ + /* USER CODE BEGIN UART7_IRQn 0 */ + + /* USER CODE END UART7_IRQn 0 */ + HAL_UART_IRQHandler(&huart7); + /* USER CODE BEGIN UART7_IRQn 1 */ + + /* USER CODE END UART7_IRQn 1 */ +} + +/* USER CODE BEGIN 1 */ +extern void byteFromSlave(); + +void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart) { + if (huart == &huart7) { + byteFromSlave(); + } +} +/* USER CODE END 1 */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/syscalls.c b/src/third_party/stm32_cubeide/Core/Src/syscalls.c new file mode 100644 index 000000000..4ec95844d --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/syscalls.c @@ -0,0 +1,159 @@ +/** + ****************************************************************************** + * @file syscalls.c + * @author Auto-generated by STM32CubeIDE + * @brief STM32CubeIDE Minimal System calls file + * + * For more information about which c-functions + * need which of these lowlevel functions + * please consult the Newlib libc-manual + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes */ +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Variables */ +//#undef errno +extern int errno; +extern int __io_putchar(int ch) __attribute__((weak)); +extern int __io_getchar(void) __attribute__((weak)); + +register char * stack_ptr asm("sp"); + +char *__env[1] = { 0 }; +char **environ = __env; + + +/* Functions */ +void initialise_monitor_handles() +{ +} + +int _getpid(void) +{ + return 1; +} + +int _kill(int pid, int sig) +{ + errno = EINVAL; + return -1; +} + +void _exit (int status) +{ + _kill(status, -1); + while (1) {} /* Make sure we hang here */ +} + +__attribute__((weak)) int _read(int file, char *ptr, int len) +{ + int DataIdx; + + for (DataIdx = 0; DataIdx < len; DataIdx++) + { + *ptr++ = __io_getchar(); + } + +return len; +} + +__attribute__((weak)) int _write(int file, char *ptr, int len) +{ + int DataIdx; + + for (DataIdx = 0; DataIdx < len; DataIdx++) + { + __io_putchar(*ptr++); + } + return len; +} + +int _close(int file) +{ + return -1; +} + + +int _fstat(int file, struct stat *st) +{ + st->st_mode = S_IFCHR; + return 0; +} + +int _isatty(int file) +{ + return 1; +} + +int _lseek(int file, int ptr, int dir) +{ + return 0; +} + +int _open(char *path, int flags, ...) +{ + /* Pretend like we always fail */ + return -1; +} + +int _wait(int *status) +{ + errno = ECHILD; + return -1; +} + +int _unlink(char *name) +{ + errno = ENOENT; + return -1; +} + +int _times(struct tms *buf) +{ + return -1; +} + +int _stat(char *file, struct stat *st) +{ + st->st_mode = S_IFCHR; + return 0; +} + +int _link(char *old, char *new) +{ + errno = EMLINK; + return -1; +} + +int _fork(void) +{ + errno = EAGAIN; + return -1; +} + +int _execve(char *name, char **argv, char **env) +{ + errno = ENOMEM; + return -1; +} diff --git a/src/third_party/stm32_cubeide/Core/Src/sysmem.c b/src/third_party/stm32_cubeide/Core/Src/sysmem.c new file mode 100644 index 000000000..4665417ed --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/sysmem.c @@ -0,0 +1,58 @@ +/** + ****************************************************************************** + * @file sysmem.c + * @author Auto-generated by STM32CubeIDE + * @brief STM32CubeIDE Minimal System Memory calls file + * + * For more information about which c-functions + * need which of these lowlevel functions + * please consult the Newlib libc-manual + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes */ +#include +#include + +/* Variables */ +extern int errno; +register char * stack_ptr asm("sp"); + +/* Functions */ + +/** + _sbrk + Increase program data space. Malloc and related functions depend on this +**/ +caddr_t _sbrk(int incr) +{ + extern char end asm("end"); + static char *heap_end; + char *prev_heap_end; + + if (heap_end == 0) + heap_end = &end; + + prev_heap_end = heap_end; + if (heap_end + incr > stack_ptr) + { + errno = ENOMEM; + return (caddr_t) -1; + } + + heap_end += incr; + + return (caddr_t) prev_heap_end; +} + diff --git a/src/third_party/stm32_cubeide/Core/Src/system_stm32f7xx.c b/src/third_party/stm32_cubeide/Core/Src/system_stm32f7xx.c new file mode 100644 index 000000000..174364021 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/system_stm32f7xx.c @@ -0,0 +1,244 @@ +/** + ****************************************************************************** + * @file system_stm32f7xx.c + * @author MCD Application Team + * @brief CMSIS Cortex-M7 Device Peripheral Access Layer System Source File. + * + * This file provides two functions and one global variable to be called from + * user application: + * - SystemInit(): This function is called at startup just after reset and + * before branch to main program. This call is made inside + * the "startup_stm32f7xx.s" file. + * + * - SystemCoreClock variable: Contains the core clock (HCLK), it can be used + * by the user application to setup the SysTick + * timer or configure other parameters. + * + * - SystemCoreClockUpdate(): Updates the variable SystemCoreClock and must + * be called whenever the core clock is changed + * during program execution. + * + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2016 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS + * @{ + */ + +/** @addtogroup stm32f7xx_system + * @{ + */ + +/** @addtogroup STM32F7xx_System_Private_Includes + * @{ + */ + +#include "stm32f7xx.h" + +#if !defined (HSE_VALUE) + #define HSE_VALUE ((uint32_t)25000000) /*!< Default value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_TypesDefinitions + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Defines + * @{ + */ + +/************************* Miscellaneous Configuration ************************/ + +/*!< Uncomment the following line if you need to relocate your vector Table in + Internal SRAM. */ +/* #define VECT_TAB_SRAM */ +#define VECT_TAB_OFFSET 0x00 /*!< Vector Table base offset field. + This value must be a multiple of 0x200. */ +/******************************************************************************/ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Macros + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Variables + * @{ + */ + + /* This variable is updated in three ways: + 1) by calling CMSIS function SystemCoreClockUpdate() + 2) by calling HAL API function HAL_RCC_GetHCLKFreq() + 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency + Note: If you use this function to configure the system clock; then there + is no need to call the 2 first functions listed above, since SystemCoreClock + variable is updated automatically. + */ + uint32_t SystemCoreClock = 16000000; + const uint8_t AHBPrescTable[16] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 6, 7, 8, 9}; + const uint8_t APBPrescTable[8] = {0, 0, 0, 0, 1, 2, 3, 4}; + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_FunctionPrototypes + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Functions + * @{ + */ + +/** + * @brief Setup the microcontroller system + * Initialize the Embedded Flash Interface, the PLL and update the + * SystemFrequency variable. + * @param None + * @retval None + */ +void SystemInit(void) +{ + /* FPU settings ------------------------------------------------------------*/ +#if (__FPU_PRESENT == 1) && (__FPU_USED == 1) + SCB->CPACR |= ((3UL << 10*2)|(3UL << 11*2)); /* set CP10 and CP11 Full Access */ +#endif + + /* Configure the Vector Table location add offset address ------------------*/ +#ifdef VECT_TAB_SRAM + SCB->VTOR = RAMDTCM_BASE | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal SRAM */ +#else + SCB->VTOR = FLASH_BASE | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal FLASH */ +#endif +} + +/** + * @brief Update SystemCoreClock variable according to Clock Register Values. + * The SystemCoreClock variable contains the core clock (HCLK), it can + * be used by the user application to setup the SysTick timer or configure + * other parameters. + * + * @note Each time the core clock (HCLK) changes, this function must be called + * to update SystemCoreClock variable value. Otherwise, any configuration + * based on this variable will be incorrect. + * + * @note - The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * + * - If SYSCLK source is HSI, SystemCoreClock will contain the HSI_VALUE(*) + * + * - If SYSCLK source is HSE, SystemCoreClock will contain the HSE_VALUE(**) + * + * - If SYSCLK source is PLL, SystemCoreClock will contain the HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * + * (*) HSI_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * + * (**) HSE_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * - The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @param None + * @retval None + */ +void SystemCoreClockUpdate(void) +{ + uint32_t tmp = 0, pllvco = 0, pllp = 2, pllsource = 0, pllm = 2; + + /* Get SYSCLK source -------------------------------------------------------*/ + tmp = RCC->CFGR & RCC_CFGR_SWS; + + switch (tmp) + { + case 0x00: /* HSI used as system clock source */ + SystemCoreClock = HSI_VALUE; + break; + case 0x04: /* HSE used as system clock source */ + SystemCoreClock = HSE_VALUE; + break; + case 0x08: /* PLL used as system clock source */ + + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLL_M) * PLL_N + SYSCLK = PLL_VCO / PLL_P + */ + pllsource = (RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) >> 22; + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + + if (pllsource != 0) + { + /* HSE used as PLL clock source */ + pllvco = (HSE_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (HSI_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + + pllp = (((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >>16) + 1 ) *2; + SystemCoreClock = pllvco/pllp; + break; + default: + SystemCoreClock = HSI_VALUE; + break; + } + /* Compute HCLK frequency --------------------------------------------------*/ + /* Get HCLK prescaler */ + tmp = AHBPrescTable[((RCC->CFGR & RCC_CFGR_HPRE) >> 4)]; + /* HCLK frequency */ + SystemCoreClock >>= tmp; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/tim.c b/src/third_party/stm32_cubeide/Core/Src/tim.c new file mode 100644 index 000000000..309c9cfe0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/tim.c @@ -0,0 +1,347 @@ +/** + ****************************************************************************** + * File Name : TIM.c + * Description : This file provides code for the configuration + * of the TIM instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "tim.h" + +/* USER CODE BEGIN 0 */ + +/* + +htim6: + + 108,000,000 / (2249 + 1) = 48,000 -----> 48KHz + +*/ + +/* USER CODE END 0 */ + +TIM_HandleTypeDef htim3; +TIM_HandleTypeDef htim6; +TIM_HandleTypeDef htim8; +TIM_HandleTypeDef htim12; + +/* TIM3 init function */ +void MX_TIM3_Init(void) +{ + TIM_MasterConfigTypeDef sMasterConfig = {0}; + TIM_OC_InitTypeDef sConfigOC = {0}; + + htim3.Instance = TIM3; + htim3.Init.Prescaler = 0; + htim3.Init.CounterMode = TIM_COUNTERMODE_UP; + htim3.Init.Period = 4095; + htim3.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1; + htim3.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_ENABLE; + if (HAL_TIM_PWM_Init(&htim3) != HAL_OK) + { + Error_Handler(); + } + sMasterConfig.MasterOutputTrigger = TIM_TRGO_RESET; + sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE; + if (HAL_TIMEx_MasterConfigSynchronization(&htim3, &sMasterConfig) != HAL_OK) + { + Error_Handler(); + } + sConfigOC.OCMode = TIM_OCMODE_PWM1; + sConfigOC.Pulse = 400; + sConfigOC.OCPolarity = TIM_OCPOLARITY_HIGH; + sConfigOC.OCFastMode = TIM_OCFAST_DISABLE; + if (HAL_TIM_PWM_ConfigChannel(&htim3, &sConfigOC, TIM_CHANNEL_2) != HAL_OK) + { + Error_Handler(); + } + HAL_TIM_MspPostInit(&htim3); + +} +/* TIM6 init function */ +void MX_TIM6_Init(void) +{ + TIM_MasterConfigTypeDef sMasterConfig = {0}; + + htim6.Instance = TIM6; + htim6.Init.Prescaler = 0; + htim6.Init.CounterMode = TIM_COUNTERMODE_UP; + htim6.Init.Period = 2249; + htim6.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_ENABLE; + if (HAL_TIM_Base_Init(&htim6) != HAL_OK) + { + Error_Handler(); + } + sMasterConfig.MasterOutputTrigger = TIM_TRGO_UPDATE; + sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE; + if (HAL_TIMEx_MasterConfigSynchronization(&htim6, &sMasterConfig) != HAL_OK) + { + Error_Handler(); + } + +} +/* TIM8 init function */ +void MX_TIM8_Init(void) +{ + TIM_Encoder_InitTypeDef sConfig = {0}; + TIM_MasterConfigTypeDef sMasterConfig = {0}; + + htim8.Instance = TIM8; + htim8.Init.Prescaler = 0; + htim8.Init.CounterMode = TIM_COUNTERMODE_UP; + htim8.Init.Period = 65535; + htim8.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1; + htim8.Init.RepetitionCounter = 0; + htim8.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_DISABLE; + sConfig.EncoderMode = TIM_ENCODERMODE_TI12; + sConfig.IC1Polarity = TIM_ICPOLARITY_FALLING; + sConfig.IC1Selection = TIM_ICSELECTION_DIRECTTI; + sConfig.IC1Prescaler = TIM_ICPSC_DIV1; + sConfig.IC1Filter = 5; + sConfig.IC2Polarity = TIM_ICPOLARITY_FALLING; + sConfig.IC2Selection = TIM_ICSELECTION_DIRECTTI; + sConfig.IC2Prescaler = TIM_ICPSC_DIV1; + sConfig.IC2Filter = 5; + if (HAL_TIM_Encoder_Init(&htim8, &sConfig) != HAL_OK) + { + Error_Handler(); + } + sMasterConfig.MasterOutputTrigger = TIM_TRGO_RESET; + sMasterConfig.MasterOutputTrigger2 = TIM_TRGO2_RESET; + sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE; + if (HAL_TIMEx_MasterConfigSynchronization(&htim8, &sMasterConfig) != HAL_OK) + { + Error_Handler(); + } + +} +/* TIM12 init function */ +void MX_TIM12_Init(void) +{ + TIM_OC_InitTypeDef sConfigOC = {0}; + + htim12.Instance = TIM12; + htim12.Init.Prescaler = 3; + htim12.Init.CounterMode = TIM_COUNTERMODE_UP; + htim12.Init.Period = 999; + htim12.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1; + htim12.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_DISABLE; + if (HAL_TIM_PWM_Init(&htim12) != HAL_OK) + { + Error_Handler(); + } + sConfigOC.OCMode = TIM_OCMODE_PWM1; + sConfigOC.Pulse = 499; + sConfigOC.OCPolarity = TIM_OCPOLARITY_HIGH; + sConfigOC.OCFastMode = TIM_OCFAST_DISABLE; + if (HAL_TIM_PWM_ConfigChannel(&htim12, &sConfigOC, TIM_CHANNEL_2) != HAL_OK) + { + Error_Handler(); + } + HAL_TIM_MspPostInit(&htim12); + +} + +void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef* tim_pwmHandle) +{ + + if(tim_pwmHandle->Instance==TIM3) + { + /* USER CODE BEGIN TIM3_MspInit 0 */ + + /* USER CODE END TIM3_MspInit 0 */ + /* TIM3 clock enable */ + __HAL_RCC_TIM3_CLK_ENABLE(); + /* USER CODE BEGIN TIM3_MspInit 1 */ + + /* USER CODE END TIM3_MspInit 1 */ + } + else if(tim_pwmHandle->Instance==TIM12) + { + /* USER CODE BEGIN TIM12_MspInit 0 */ + + /* USER CODE END TIM12_MspInit 0 */ + /* TIM12 clock enable */ + __HAL_RCC_TIM12_CLK_ENABLE(); + /* USER CODE BEGIN TIM12_MspInit 1 */ + + /* USER CODE END TIM12_MspInit 1 */ + } +} + +void HAL_TIM_Base_MspInit(TIM_HandleTypeDef* tim_baseHandle) +{ + + if(tim_baseHandle->Instance==TIM6) + { + /* USER CODE BEGIN TIM6_MspInit 0 */ + + /* USER CODE END TIM6_MspInit 0 */ + /* TIM6 clock enable */ + __HAL_RCC_TIM6_CLK_ENABLE(); + /* USER CODE BEGIN TIM6_MspInit 1 */ + + /* USER CODE END TIM6_MspInit 1 */ + } +} + +void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef* tim_encoderHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(tim_encoderHandle->Instance==TIM8) + { + /* USER CODE BEGIN TIM8_MspInit 0 */ + + /* USER CODE END TIM8_MspInit 0 */ + /* TIM8 clock enable */ + __HAL_RCC_TIM8_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + /**TIM8 GPIO Configuration + PC6 ------> TIM8_CH1 + PC7 ------> TIM8_CH2 + */ + GPIO_InitStruct.Pin = ENC_A_Pin|ENC_B_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_PULLUP; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + GPIO_InitStruct.Alternate = GPIO_AF3_TIM8; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + /* USER CODE BEGIN TIM8_MspInit 1 */ + + /* USER CODE END TIM8_MspInit 1 */ + } +} +void HAL_TIM_MspPostInit(TIM_HandleTypeDef* timHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(timHandle->Instance==TIM3) + { + /* USER CODE BEGIN TIM3_MspPostInit 0 */ + + /* USER CODE END TIM3_MspPostInit 0 */ + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**TIM3 GPIO Configuration + PB5 ------> TIM3_CH2 + */ + GPIO_InitStruct.Pin = DOUT2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + GPIO_InitStruct.Alternate = GPIO_AF2_TIM3; + HAL_GPIO_Init(DOUT2_GPIO_Port, &GPIO_InitStruct); + + /* USER CODE BEGIN TIM3_MspPostInit 1 */ + + /* USER CODE END TIM3_MspPostInit 1 */ + } + else if(timHandle->Instance==TIM12) + { + /* USER CODE BEGIN TIM12_MspPostInit 0 */ + + /* USER CODE END TIM12_MspPostInit 0 */ + + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**TIM12 GPIO Configuration + PB15 ------> TIM12_CH2 + */ + GPIO_InitStruct.Pin = TFT_BRIGHTNESS_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + GPIO_InitStruct.Alternate = GPIO_AF9_TIM12; + HAL_GPIO_Init(TFT_BRIGHTNESS_GPIO_Port, &GPIO_InitStruct); + + /* USER CODE BEGIN TIM12_MspPostInit 1 */ + + /* USER CODE END TIM12_MspPostInit 1 */ + } + +} + +void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef* tim_pwmHandle) +{ + + if(tim_pwmHandle->Instance==TIM3) + { + /* USER CODE BEGIN TIM3_MspDeInit 0 */ + + /* USER CODE END TIM3_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM3_CLK_DISABLE(); + /* USER CODE BEGIN TIM3_MspDeInit 1 */ + + /* USER CODE END TIM3_MspDeInit 1 */ + } + else if(tim_pwmHandle->Instance==TIM12) + { + /* USER CODE BEGIN TIM12_MspDeInit 0 */ + + /* USER CODE END TIM12_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM12_CLK_DISABLE(); + /* USER CODE BEGIN TIM12_MspDeInit 1 */ + + /* USER CODE END TIM12_MspDeInit 1 */ + } +} + +void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef* tim_baseHandle) +{ + + if(tim_baseHandle->Instance==TIM6) + { + /* USER CODE BEGIN TIM6_MspDeInit 0 */ + + /* USER CODE END TIM6_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM6_CLK_DISABLE(); + /* USER CODE BEGIN TIM6_MspDeInit 1 */ + + /* USER CODE END TIM6_MspDeInit 1 */ + } +} + +void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef* tim_encoderHandle) +{ + + if(tim_encoderHandle->Instance==TIM8) + { + /* USER CODE BEGIN TIM8_MspDeInit 0 */ + + /* USER CODE END TIM8_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_TIM8_CLK_DISABLE(); + + /**TIM8 GPIO Configuration + PC6 ------> TIM8_CH1 + PC7 ------> TIM8_CH2 + */ + HAL_GPIO_DeInit(GPIOC, ENC_A_Pin|ENC_B_Pin); + + /* USER CODE BEGIN TIM8_MspDeInit 1 */ + + /* USER CODE END TIM8_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Src/usart.c b/src/third_party/stm32_cubeide/Core/Src/usart.c new file mode 100644 index 000000000..430ee8221 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Src/usart.c @@ -0,0 +1,122 @@ +/** + ****************************************************************************** + * File Name : USART.c + * Description : This file provides code for the configuration + * of the USART instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "usart.h" + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +UART_HandleTypeDef huart7; + +/* UART7 init function */ +void MX_UART7_Init(void) +{ + + huart7.Instance = UART7; + huart7.Init.BaudRate = 115200; + huart7.Init.WordLength = UART_WORDLENGTH_9B; + huart7.Init.StopBits = UART_STOPBITS_1; + huart7.Init.Parity = UART_PARITY_EVEN; + huart7.Init.Mode = UART_MODE_TX_RX; + huart7.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart7.Init.OverSampling = UART_OVERSAMPLING_16; + huart7.Init.OneBitSampling = UART_ONE_BIT_SAMPLE_DISABLE; + huart7.AdvancedInit.AdvFeatureInit = UART_ADVFEATURE_NO_INIT; + if (HAL_UART_Init(&huart7) != HAL_OK) + { + Error_Handler(); + } + +} + +void HAL_UART_MspInit(UART_HandleTypeDef* uartHandle) +{ + + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(uartHandle->Instance==UART7) + { + /* USER CODE BEGIN UART7_MspInit 0 */ + + /* USER CODE END UART7_MspInit 0 */ + /* UART7 clock enable */ + __HAL_RCC_UART7_CLK_ENABLE(); + + __HAL_RCC_GPIOF_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + /**UART7 GPIO Configuration + PF6 ------> UART7_RX + PB4 ------> UART7_TX + */ + GPIO_InitStruct.Pin = UART_RX_DIN1_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF8_UART7; + HAL_GPIO_Init(UART_RX_DIN1_GPIO_Port, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = UART_TX_DOUT1_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF12_UART7; + HAL_GPIO_Init(UART_TX_DOUT1_GPIO_Port, &GPIO_InitStruct); + + /* UART7 interrupt Init */ + HAL_NVIC_SetPriority(UART7_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(UART7_IRQn); + /* USER CODE BEGIN UART7_MspInit 1 */ + + /* USER CODE END UART7_MspInit 1 */ + } +} + +void HAL_UART_MspDeInit(UART_HandleTypeDef* uartHandle) +{ + + if(uartHandle->Instance==UART7) + { + /* USER CODE BEGIN UART7_MspDeInit 0 */ + + /* USER CODE END UART7_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_UART7_CLK_DISABLE(); + + /**UART7 GPIO Configuration + PF6 ------> UART7_RX + PB4 ------> UART7_TX + */ + HAL_GPIO_DeInit(UART_RX_DIN1_GPIO_Port, UART_RX_DIN1_Pin); + + HAL_GPIO_DeInit(UART_TX_DOUT1_GPIO_Port, UART_TX_DOUT1_Pin); + + /* UART7 interrupt Deinit */ + HAL_NVIC_DisableIRQ(UART7_IRQn); + /* USER CODE BEGIN UART7_MspDeInit 1 */ + + /* USER CODE END UART7_MspDeInit 1 */ + } +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Core/Startup/startup_stm32f769igtx.s b/src/third_party/stm32_cubeide/Core/Startup/startup_stm32f769igtx.s new file mode 100644 index 000000000..88aa2f235 --- /dev/null +++ b/src/third_party/stm32_cubeide/Core/Startup/startup_stm32f769igtx.s @@ -0,0 +1,621 @@ +/** + ****************************************************************************** + * @file startup_stm32f769xx.s + * @author MCD Application Team + * @brief STM32F769xx Devices vector table for GCC based toolchain. + * This module performs: + * - Set the initial SP + * - Set the initial PC == Reset_Handler, + * - Set the vector table entries with the exceptions ISR address + * - Branches to main in the C library (which eventually + * calls main()). + * After Reset the Cortex-M7 processor is in Thread mode, + * priority is Privileged, and the Stack is set to Main. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2016 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + + .syntax unified + .cpu cortex-m7 + .fpu softvfp + .thumb + +.global g_pfnVectors +.global Default_Handler + +/* start address for the initialization values of the .data section. +defined in linker script */ +.word _sidata +/* start address for the .data section. defined in linker script */ +.word _sdata +/* end address for the .data section. defined in linker script */ +.word _edata +/* start address for the .bss section. defined in linker script */ +.word _sbss +/* end address for the .bss section. defined in linker script */ +.word _ebss +/* stack used for SystemInit_ExtMemCtl; always internal RAM used */ + +/** + * @brief This is the code that gets called when the processor first + * starts execution following a reset event. Only the absolutely + * necessary set is performed, after which the application + * supplied main() routine is called. + * @param None + * @retval : None +*/ + + .section .text.Reset_Handler + .weak Reset_Handler + .type Reset_Handler, %function +Reset_Handler: + ldr sp, =_estack /* set stack pointer */ + +/* Copy the data segment initializers from flash to SRAM */ + movs r1, #0 + b LoopCopyDataInit + +CopyDataInit: + ldr r3, =_sidata + ldr r3, [r3, r1] + str r3, [r0, r1] + adds r1, r1, #4 + +LoopCopyDataInit: + ldr r0, =_sdata + ldr r3, =_edata + adds r2, r0, r1 + cmp r2, r3 + bcc CopyDataInit + ldr r2, =_sbss + b LoopFillZerobss +/* Zero fill the bss segment. */ +FillZerobss: + movs r3, #0 + str r3, [r2], #4 + +LoopFillZerobss: + ldr r3, = _ebss + cmp r2, r3 + bcc FillZerobss + +/* Call the clock system intitialization function.*/ + bl SystemInit +/* Call static constructors */ + bl __libc_init_array +/* Call the application's entry point.*/ + bl main + bx lr +.size Reset_Handler, .-Reset_Handler + +/** + * @brief This is the code that gets called when the processor receives an + * unexpected interrupt. This simply enters an infinite loop, preserving + * the system state for examination by a debugger. + * @param None + * @retval None +*/ + .section .text.Default_Handler,"ax",%progbits +Default_Handler: +Infinite_Loop: + b Infinite_Loop + .size Default_Handler, .-Default_Handler +/****************************************************************************** +* +* The minimal vector table for a Cortex M7. Note that the proper constructs +* must be placed on this to ensure that it ends up at physical address +* 0x0000.0000. +* +*******************************************************************************/ + .section .isr_vector,"a",%progbits + .type g_pfnVectors, %object + .size g_pfnVectors, .-g_pfnVectors + + +g_pfnVectors: + .word _estack + .word Reset_Handler + + .word NMI_Handler + .word HardFault_Handler + .word MemManage_Handler + .word BusFault_Handler + .word UsageFault_Handler + .word 0 + .word 0 + .word 0 + .word 0 + .word SVC_Handler + .word DebugMon_Handler + .word 0 + .word PendSV_Handler + .word SysTick_Handler + + /* External Interrupts */ + .word WWDG_IRQHandler /* Window WatchDog */ + .word PVD_IRQHandler /* PVD through EXTI Line detection */ + .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ + .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ + .word FLASH_IRQHandler /* FLASH */ + .word RCC_IRQHandler /* RCC */ + .word EXTI0_IRQHandler /* EXTI Line0 */ + .word EXTI1_IRQHandler /* EXTI Line1 */ + .word EXTI2_IRQHandler /* EXTI Line2 */ + .word EXTI3_IRQHandler /* EXTI Line3 */ + .word EXTI4_IRQHandler /* EXTI Line4 */ + .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ + .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ + .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ + .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ + .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ + .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ + .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ + .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ + .word CAN1_TX_IRQHandler /* CAN1 TX */ + .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ + .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ + .word CAN1_SCE_IRQHandler /* CAN1 SCE */ + .word EXTI9_5_IRQHandler /* External Line[9:5]s */ + .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ + .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ + .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ + .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ + .word TIM2_IRQHandler /* TIM2 */ + .word TIM3_IRQHandler /* TIM3 */ + .word TIM4_IRQHandler /* TIM4 */ + .word I2C1_EV_IRQHandler /* I2C1 Event */ + .word I2C1_ER_IRQHandler /* I2C1 Error */ + .word I2C2_EV_IRQHandler /* I2C2 Event */ + .word I2C2_ER_IRQHandler /* I2C2 Error */ + .word SPI1_IRQHandler /* SPI1 */ + .word SPI2_IRQHandler /* SPI2 */ + .word USART1_IRQHandler /* USART1 */ + .word USART2_IRQHandler /* USART2 */ + .word USART3_IRQHandler /* USART3 */ + .word EXTI15_10_IRQHandler /* External Line[15:10]s */ + .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ + .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ + .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ + .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ + .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ + .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ + .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ + .word FMC_IRQHandler /* FMC */ + .word SDMMC1_IRQHandler /* SDMMC1 */ + .word TIM5_IRQHandler /* TIM5 */ + .word SPI3_IRQHandler /* SPI3 */ + .word UART4_IRQHandler /* UART4 */ + .word UART5_IRQHandler /* UART5 */ + .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ + .word TIM7_IRQHandler /* TIM7 */ + .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ + .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ + .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ + .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ + .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ + .word ETH_IRQHandler /* Ethernet */ + .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ + .word CAN2_TX_IRQHandler /* CAN2 TX */ + .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ + .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ + .word CAN2_SCE_IRQHandler /* CAN2 SCE */ + .word OTG_FS_IRQHandler /* USB OTG FS */ + .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ + .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ + .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ + .word USART6_IRQHandler /* USART6 */ + .word I2C3_EV_IRQHandler /* I2C3 event */ + .word I2C3_ER_IRQHandler /* I2C3 error */ + .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ + .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ + .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ + .word OTG_HS_IRQHandler /* USB OTG HS */ + .word DCMI_IRQHandler /* DCMI */ + .word 0 /* Reserved */ + .word RNG_IRQHandler /* RNG */ + .word FPU_IRQHandler /* FPU */ + .word UART7_IRQHandler /* UART7 */ + .word UART8_IRQHandler /* UART8 */ + .word SPI4_IRQHandler /* SPI4 */ + .word SPI5_IRQHandler /* SPI5 */ + .word SPI6_IRQHandler /* SPI6 */ + .word SAI1_IRQHandler /* SAI1 */ + .word LTDC_IRQHandler /* LTDC */ + .word LTDC_ER_IRQHandler /* LTDC error */ + .word DMA2D_IRQHandler /* DMA2D */ + .word SAI2_IRQHandler /* SAI2 */ + .word QUADSPI_IRQHandler /* QUADSPI */ + .word LPTIM1_IRQHandler /* LPTIM1 */ + .word CEC_IRQHandler /* HDMI_CEC */ + .word I2C4_EV_IRQHandler /* I2C4 Event */ + .word I2C4_ER_IRQHandler /* I2C4 Error */ + .word SPDIF_RX_IRQHandler /* SPDIF_RX */ + .word DSI_IRQHandler /* DSI */ + .word DFSDM1_FLT0_IRQHandler /* DFSDM1 Filter 0 global Interrupt */ + .word DFSDM1_FLT1_IRQHandler /* DFSDM1 Filter 1 global Interrupt */ + .word DFSDM1_FLT2_IRQHandler /* DFSDM1 Filter 2 global Interrupt */ + .word DFSDM1_FLT3_IRQHandler /* DFSDM1 Filter 3 global Interrupt */ + .word SDMMC2_IRQHandler /* SDMMC2 */ + .word CAN3_TX_IRQHandler /* CAN3 TX */ + .word CAN3_RX0_IRQHandler /* CAN3 RX0 */ + .word CAN3_RX1_IRQHandler /* CAN3 RX1 */ + .word CAN3_SCE_IRQHandler /* CAN3 SCE */ + .word JPEG_IRQHandler /* JPEG */ + .word MDIOS_IRQHandler /* MDIOS */ + +/******************************************************************************* +* +* Provide weak aliases for each Exception handler to the Default_Handler. +* As they are weak aliases, any function with the same name will override +* this definition. +* +*******************************************************************************/ + .weak NMI_Handler + .thumb_set NMI_Handler,Default_Handler + + .weak HardFault_Handler + .thumb_set HardFault_Handler,Default_Handler + + .weak MemManage_Handler + .thumb_set MemManage_Handler,Default_Handler + + .weak BusFault_Handler + .thumb_set BusFault_Handler,Default_Handler + + .weak UsageFault_Handler + .thumb_set UsageFault_Handler,Default_Handler + + .weak SVC_Handler + .thumb_set SVC_Handler,Default_Handler + + .weak DebugMon_Handler + .thumb_set DebugMon_Handler,Default_Handler + + .weak PendSV_Handler + .thumb_set PendSV_Handler,Default_Handler + + .weak SysTick_Handler + .thumb_set SysTick_Handler,Default_Handler + + .weak WWDG_IRQHandler + .thumb_set WWDG_IRQHandler,Default_Handler + + .weak PVD_IRQHandler + .thumb_set PVD_IRQHandler,Default_Handler + + .weak TAMP_STAMP_IRQHandler + .thumb_set TAMP_STAMP_IRQHandler,Default_Handler + + .weak RTC_WKUP_IRQHandler + .thumb_set RTC_WKUP_IRQHandler,Default_Handler + + .weak FLASH_IRQHandler + .thumb_set FLASH_IRQHandler,Default_Handler + + .weak RCC_IRQHandler + .thumb_set RCC_IRQHandler,Default_Handler + + .weak EXTI0_IRQHandler + .thumb_set EXTI0_IRQHandler,Default_Handler + + .weak EXTI1_IRQHandler + .thumb_set EXTI1_IRQHandler,Default_Handler + + .weak EXTI2_IRQHandler + .thumb_set EXTI2_IRQHandler,Default_Handler + + .weak EXTI3_IRQHandler + .thumb_set EXTI3_IRQHandler,Default_Handler + + .weak EXTI4_IRQHandler + .thumb_set EXTI4_IRQHandler,Default_Handler + + .weak DMA1_Stream0_IRQHandler + .thumb_set DMA1_Stream0_IRQHandler,Default_Handler + + .weak DMA1_Stream1_IRQHandler + .thumb_set DMA1_Stream1_IRQHandler,Default_Handler + + .weak DMA1_Stream2_IRQHandler + .thumb_set DMA1_Stream2_IRQHandler,Default_Handler + + .weak DMA1_Stream3_IRQHandler + .thumb_set DMA1_Stream3_IRQHandler,Default_Handler + + .weak DMA1_Stream4_IRQHandler + .thumb_set DMA1_Stream4_IRQHandler,Default_Handler + + .weak DMA1_Stream5_IRQHandler + .thumb_set DMA1_Stream5_IRQHandler,Default_Handler + + .weak DMA1_Stream6_IRQHandler + .thumb_set DMA1_Stream6_IRQHandler,Default_Handler + + .weak ADC_IRQHandler + .thumb_set ADC_IRQHandler,Default_Handler + + .weak CAN1_TX_IRQHandler + .thumb_set CAN1_TX_IRQHandler,Default_Handler + + .weak CAN1_RX0_IRQHandler + .thumb_set CAN1_RX0_IRQHandler,Default_Handler + + .weak CAN1_RX1_IRQHandler + .thumb_set CAN1_RX1_IRQHandler,Default_Handler + + .weak CAN1_SCE_IRQHandler + .thumb_set CAN1_SCE_IRQHandler,Default_Handler + + .weak EXTI9_5_IRQHandler + .thumb_set EXTI9_5_IRQHandler,Default_Handler + + .weak TIM1_BRK_TIM9_IRQHandler + .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler + + .weak TIM1_UP_TIM10_IRQHandler + .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler + + .weak TIM1_TRG_COM_TIM11_IRQHandler + .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler + + .weak TIM1_CC_IRQHandler + .thumb_set TIM1_CC_IRQHandler,Default_Handler + + .weak TIM2_IRQHandler + .thumb_set TIM2_IRQHandler,Default_Handler + + .weak TIM3_IRQHandler + .thumb_set TIM3_IRQHandler,Default_Handler + + .weak TIM4_IRQHandler + .thumb_set TIM4_IRQHandler,Default_Handler + + .weak I2C1_EV_IRQHandler + .thumb_set I2C1_EV_IRQHandler,Default_Handler + + .weak I2C1_ER_IRQHandler + .thumb_set I2C1_ER_IRQHandler,Default_Handler + + .weak I2C2_EV_IRQHandler + .thumb_set I2C2_EV_IRQHandler,Default_Handler + + .weak I2C2_ER_IRQHandler + .thumb_set I2C2_ER_IRQHandler,Default_Handler + + .weak SPI1_IRQHandler + .thumb_set SPI1_IRQHandler,Default_Handler + + .weak SPI2_IRQHandler + .thumb_set SPI2_IRQHandler,Default_Handler + + .weak USART1_IRQHandler + .thumb_set USART1_IRQHandler,Default_Handler + + .weak USART2_IRQHandler + .thumb_set USART2_IRQHandler,Default_Handler + + .weak USART3_IRQHandler + .thumb_set USART3_IRQHandler,Default_Handler + + .weak EXTI15_10_IRQHandler + .thumb_set EXTI15_10_IRQHandler,Default_Handler + + .weak RTC_Alarm_IRQHandler + .thumb_set RTC_Alarm_IRQHandler,Default_Handler + + .weak OTG_FS_WKUP_IRQHandler + .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler + + .weak TIM8_BRK_TIM12_IRQHandler + .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler + + .weak TIM8_UP_TIM13_IRQHandler + .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler + + .weak TIM8_TRG_COM_TIM14_IRQHandler + .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler + + .weak TIM8_CC_IRQHandler + .thumb_set TIM8_CC_IRQHandler,Default_Handler + + .weak DMA1_Stream7_IRQHandler + .thumb_set DMA1_Stream7_IRQHandler,Default_Handler + + .weak FMC_IRQHandler + .thumb_set FMC_IRQHandler,Default_Handler + + .weak SDMMC1_IRQHandler + .thumb_set SDMMC1_IRQHandler,Default_Handler + + .weak TIM5_IRQHandler + .thumb_set TIM5_IRQHandler,Default_Handler + + .weak SPI3_IRQHandler + .thumb_set SPI3_IRQHandler,Default_Handler + + .weak UART4_IRQHandler + .thumb_set UART4_IRQHandler,Default_Handler + + .weak UART5_IRQHandler + .thumb_set UART5_IRQHandler,Default_Handler + + .weak TIM6_DAC_IRQHandler + .thumb_set TIM6_DAC_IRQHandler,Default_Handler + + .weak TIM7_IRQHandler + .thumb_set TIM7_IRQHandler,Default_Handler + + .weak DMA2_Stream0_IRQHandler + .thumb_set DMA2_Stream0_IRQHandler,Default_Handler + + .weak DMA2_Stream1_IRQHandler + .thumb_set DMA2_Stream1_IRQHandler,Default_Handler + + .weak DMA2_Stream2_IRQHandler + .thumb_set DMA2_Stream2_IRQHandler,Default_Handler + + .weak DMA2_Stream3_IRQHandler + .thumb_set DMA2_Stream3_IRQHandler,Default_Handler + + .weak DMA2_Stream4_IRQHandler + .thumb_set DMA2_Stream4_IRQHandler,Default_Handler + + .weak DMA2_Stream4_IRQHandler + .thumb_set DMA2_Stream4_IRQHandler,Default_Handler + + .weak ETH_IRQHandler + .thumb_set ETH_IRQHandler,Default_Handler + + .weak ETH_WKUP_IRQHandler + .thumb_set ETH_WKUP_IRQHandler,Default_Handler + + .weak CAN2_TX_IRQHandler + .thumb_set CAN2_TX_IRQHandler,Default_Handler + + .weak CAN2_RX0_IRQHandler + .thumb_set CAN2_RX0_IRQHandler,Default_Handler + + .weak CAN2_RX1_IRQHandler + .thumb_set CAN2_RX1_IRQHandler,Default_Handler + + .weak CAN2_SCE_IRQHandler + .thumb_set CAN2_SCE_IRQHandler,Default_Handler + + .weak OTG_FS_IRQHandler + .thumb_set OTG_FS_IRQHandler,Default_Handler + + .weak DMA2_Stream5_IRQHandler + .thumb_set DMA2_Stream5_IRQHandler,Default_Handler + + .weak DMA2_Stream6_IRQHandler + .thumb_set DMA2_Stream6_IRQHandler,Default_Handler + + .weak DMA2_Stream7_IRQHandler + .thumb_set DMA2_Stream7_IRQHandler,Default_Handler + + .weak USART6_IRQHandler + .thumb_set USART6_IRQHandler,Default_Handler + + .weak I2C3_EV_IRQHandler + .thumb_set I2C3_EV_IRQHandler,Default_Handler + + .weak I2C3_ER_IRQHandler + .thumb_set I2C3_ER_IRQHandler,Default_Handler + + .weak OTG_HS_EP1_OUT_IRQHandler + .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler + + .weak OTG_HS_EP1_IN_IRQHandler + .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler + + .weak OTG_HS_WKUP_IRQHandler + .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler + + .weak OTG_HS_IRQHandler + .thumb_set OTG_HS_IRQHandler,Default_Handler + + .weak DCMI_IRQHandler + .thumb_set DCMI_IRQHandler,Default_Handler + + .weak RNG_IRQHandler + .thumb_set RNG_IRQHandler,Default_Handler + + .weak FPU_IRQHandler + .thumb_set FPU_IRQHandler,Default_Handler + + .weak UART7_IRQHandler + .thumb_set UART7_IRQHandler,Default_Handler + + .weak UART8_IRQHandler + .thumb_set UART8_IRQHandler,Default_Handler + + .weak SPI4_IRQHandler + .thumb_set SPI4_IRQHandler,Default_Handler + + .weak SPI5_IRQHandler + .thumb_set SPI5_IRQHandler,Default_Handler + + .weak SPI6_IRQHandler + .thumb_set SPI6_IRQHandler,Default_Handler + + .weak SAI1_IRQHandler + .thumb_set SAI1_IRQHandler,Default_Handler + + .weak LTDC_IRQHandler + .thumb_set LTDC_IRQHandler,Default_Handler + + .weak LTDC_ER_IRQHandler + .thumb_set LTDC_ER_IRQHandler,Default_Handler + + .weak DMA2D_IRQHandler + .thumb_set DMA2D_IRQHandler,Default_Handler + + .weak SAI2_IRQHandler + .thumb_set SAI2_IRQHandler,Default_Handler + + .weak QUADSPI_IRQHandler + .thumb_set QUADSPI_IRQHandler,Default_Handler + + .weak LPTIM1_IRQHandler + .thumb_set LPTIM1_IRQHandler,Default_Handler + + .weak CEC_IRQHandler + .thumb_set CEC_IRQHandler,Default_Handler + + .weak I2C4_EV_IRQHandler + .thumb_set I2C4_EV_IRQHandler,Default_Handler + + .weak I2C4_ER_IRQHandler + .thumb_set I2C4_ER_IRQHandler,Default_Handler + + .weak SPDIF_RX_IRQHandler + .thumb_set SPDIF_RX_IRQHandler,Default_Handler + + .weak DSI_IRQHandler + .thumb_set DSI_IRQHandler,Default_Handler + + .weak DFSDM1_FLT0_IRQHandler + .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler + + .weak DFSDM1_FLT1_IRQHandler + .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler + + .weak DFSDM1_FLT2_IRQHandler + .thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler + + .weak DFSDM1_FLT3_IRQHandler + .thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler + + .weak SDMMC2_IRQHandler + .thumb_set SDMMC2_IRQHandler,Default_Handler + + .weak CAN3_TX_IRQHandler + .thumb_set CAN3_TX_IRQHandler,Default_Handler + + .weak CAN3_RX0_IRQHandler + .thumb_set CAN3_RX0_IRQHandler,Default_Handler + + .weak CAN3_RX1_IRQHandler + .thumb_set CAN3_RX1_IRQHandler,Default_Handler + + .weak CAN3_SCE_IRQHandler + .thumb_set CAN3_SCE_IRQHandler,Default_Handler + + .weak JPEG_IRQHandler + .thumb_set JPEG_IRQHandler,Default_Handler + + .weak MDIOS_IRQHandler + .thumb_set MDIOS_IRQHandler,Default_Handler + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f769xx.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f769xx.h new file mode 100644 index 000000000..88afec7cf --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f769xx.h @@ -0,0 +1,22098 @@ +/** + ****************************************************************************** + * @file stm32f769xx.h + * @author MCD Application Team + * @brief CMSIS Cortex-M7 Device Peripheral Access Layer Header File. + * + * This file contains: + * - Data structures and the address mapping for all peripherals + * - Peripheral's registers declarations and bits definition + * - Macros to access peripheral’s registers hardware + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2016 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS_Device + * @{ + */ + +/** @addtogroup stm32f769xx + * @{ + */ + +#ifndef __STM32F769xx_H +#define __STM32F769xx_H + +#ifdef __cplusplus + extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup Configuration_section_for_CMSIS + * @{ + */ + +/** + * @brief STM32F7xx Interrupt Number Definition, according to the selected device + * in @ref Library_configuration_section + */ +typedef enum +{ +/****** Cortex-M7 Processor Exceptions Numbers ****************************************************************/ + NonMaskableInt_IRQn = -14, /*!< 2 Non Maskable Interrupt */ + MemoryManagement_IRQn = -12, /*!< 4 Cortex-M7 Memory Management Interrupt */ + BusFault_IRQn = -11, /*!< 5 Cortex-M7 Bus Fault Interrupt */ + UsageFault_IRQn = -10, /*!< 6 Cortex-M7 Usage Fault Interrupt */ + SVCall_IRQn = -5, /*!< 11 Cortex-M7 SV Call Interrupt */ + DebugMonitor_IRQn = -4, /*!< 12 Cortex-M7 Debug Monitor Interrupt */ + PendSV_IRQn = -2, /*!< 14 Cortex-M7 Pend SV Interrupt */ + SysTick_IRQn = -1, /*!< 15 Cortex-M7 System Tick Interrupt */ +/****** STM32 specific Interrupt Numbers **********************************************************************/ + WWDG_IRQn = 0, /*!< Window WatchDog Interrupt */ + PVD_IRQn = 1, /*!< PVD through EXTI Line detection Interrupt */ + TAMP_STAMP_IRQn = 2, /*!< Tamper and TimeStamp interrupts through the EXTI line */ + RTC_WKUP_IRQn = 3, /*!< RTC Wakeup interrupt through the EXTI line */ + FLASH_IRQn = 4, /*!< FLASH global Interrupt */ + RCC_IRQn = 5, /*!< RCC global Interrupt */ + EXTI0_IRQn = 6, /*!< EXTI Line0 Interrupt */ + EXTI1_IRQn = 7, /*!< EXTI Line1 Interrupt */ + EXTI2_IRQn = 8, /*!< EXTI Line2 Interrupt */ + EXTI3_IRQn = 9, /*!< EXTI Line3 Interrupt */ + EXTI4_IRQn = 10, /*!< EXTI Line4 Interrupt */ + DMA1_Stream0_IRQn = 11, /*!< DMA1 Stream 0 global Interrupt */ + DMA1_Stream1_IRQn = 12, /*!< DMA1 Stream 1 global Interrupt */ + DMA1_Stream2_IRQn = 13, /*!< DMA1 Stream 2 global Interrupt */ + DMA1_Stream3_IRQn = 14, /*!< DMA1 Stream 3 global Interrupt */ + DMA1_Stream4_IRQn = 15, /*!< DMA1 Stream 4 global Interrupt */ + DMA1_Stream5_IRQn = 16, /*!< DMA1 Stream 5 global Interrupt */ + DMA1_Stream6_IRQn = 17, /*!< DMA1 Stream 6 global Interrupt */ + ADC_IRQn = 18, /*!< ADC1, ADC2 and ADC3 global Interrupts */ + CAN1_TX_IRQn = 19, /*!< CAN1 TX Interrupt */ + CAN1_RX0_IRQn = 20, /*!< CAN1 RX0 Interrupt */ + CAN1_RX1_IRQn = 21, /*!< CAN1 RX1 Interrupt */ + CAN1_SCE_IRQn = 22, /*!< CAN1 SCE Interrupt */ + EXTI9_5_IRQn = 23, /*!< External Line[9:5] Interrupts */ + TIM1_BRK_TIM9_IRQn = 24, /*!< TIM1 Break interrupt and TIM9 global interrupt */ + TIM1_UP_TIM10_IRQn = 25, /*!< TIM1 Update Interrupt and TIM10 global interrupt */ + TIM1_TRG_COM_TIM11_IRQn = 26, /*!< TIM1 Trigger and Commutation Interrupt and TIM11 global interrupt */ + TIM1_CC_IRQn = 27, /*!< TIM1 Capture Compare Interrupt */ + TIM2_IRQn = 28, /*!< TIM2 global Interrupt */ + TIM3_IRQn = 29, /*!< TIM3 global Interrupt */ + TIM4_IRQn = 30, /*!< TIM4 global Interrupt */ + I2C1_EV_IRQn = 31, /*!< I2C1 Event Interrupt */ + I2C1_ER_IRQn = 32, /*!< I2C1 Error Interrupt */ + I2C2_EV_IRQn = 33, /*!< I2C2 Event Interrupt */ + I2C2_ER_IRQn = 34, /*!< I2C2 Error Interrupt */ + SPI1_IRQn = 35, /*!< SPI1 global Interrupt */ + SPI2_IRQn = 36, /*!< SPI2 global Interrupt */ + USART1_IRQn = 37, /*!< USART1 global Interrupt */ + USART2_IRQn = 38, /*!< USART2 global Interrupt */ + USART3_IRQn = 39, /*!< USART3 global Interrupt */ + EXTI15_10_IRQn = 40, /*!< External Line[15:10] Interrupts */ + RTC_Alarm_IRQn = 41, /*!< RTC Alarm (A and B) through EXTI Line Interrupt */ + OTG_FS_WKUP_IRQn = 42, /*!< USB OTG FS Wakeup through EXTI line interrupt */ + TIM8_BRK_TIM12_IRQn = 43, /*!< TIM8 Break Interrupt and TIM12 global interrupt */ + TIM8_UP_TIM13_IRQn = 44, /*!< TIM8 Update Interrupt and TIM13 global interrupt */ + TIM8_TRG_COM_TIM14_IRQn = 45, /*!< TIM8 Trigger and Commutation Interrupt and TIM14 global interrupt */ + TIM8_CC_IRQn = 46, /*!< TIM8 Capture Compare Interrupt */ + DMA1_Stream7_IRQn = 47, /*!< DMA1 Stream7 Interrupt */ + FMC_IRQn = 48, /*!< FMC global Interrupt */ + SDMMC1_IRQn = 49, /*!< SDMMC1 global Interrupt */ + TIM5_IRQn = 50, /*!< TIM5 global Interrupt */ + SPI3_IRQn = 51, /*!< SPI3 global Interrupt */ + UART4_IRQn = 52, /*!< UART4 global Interrupt */ + UART5_IRQn = 53, /*!< UART5 global Interrupt */ + TIM6_DAC_IRQn = 54, /*!< TIM6 global and DAC1&2 underrun error interrupts */ + TIM7_IRQn = 55, /*!< TIM7 global interrupt */ + DMA2_Stream0_IRQn = 56, /*!< DMA2 Stream 0 global Interrupt */ + DMA2_Stream1_IRQn = 57, /*!< DMA2 Stream 1 global Interrupt */ + DMA2_Stream2_IRQn = 58, /*!< DMA2 Stream 2 global Interrupt */ + DMA2_Stream3_IRQn = 59, /*!< DMA2 Stream 3 global Interrupt */ + DMA2_Stream4_IRQn = 60, /*!< DMA2 Stream 4 global Interrupt */ + ETH_IRQn = 61, /*!< Ethernet global Interrupt */ + ETH_WKUP_IRQn = 62, /*!< Ethernet Wakeup through EXTI line Interrupt */ + CAN2_TX_IRQn = 63, /*!< CAN2 TX Interrupt */ + CAN2_RX0_IRQn = 64, /*!< CAN2 RX0 Interrupt */ + CAN2_RX1_IRQn = 65, /*!< CAN2 RX1 Interrupt */ + CAN2_SCE_IRQn = 66, /*!< CAN2 SCE Interrupt */ + OTG_FS_IRQn = 67, /*!< USB OTG FS global Interrupt */ + DMA2_Stream5_IRQn = 68, /*!< DMA2 Stream 5 global interrupt */ + DMA2_Stream6_IRQn = 69, /*!< DMA2 Stream 6 global interrupt */ + DMA2_Stream7_IRQn = 70, /*!< DMA2 Stream 7 global interrupt */ + USART6_IRQn = 71, /*!< USART6 global interrupt */ + I2C3_EV_IRQn = 72, /*!< I2C3 event interrupt */ + I2C3_ER_IRQn = 73, /*!< I2C3 error interrupt */ + OTG_HS_EP1_OUT_IRQn = 74, /*!< USB OTG HS End Point 1 Out global interrupt */ + OTG_HS_EP1_IN_IRQn = 75, /*!< USB OTG HS End Point 1 In global interrupt */ + OTG_HS_WKUP_IRQn = 76, /*!< USB OTG HS Wakeup through EXTI interrupt */ + OTG_HS_IRQn = 77, /*!< USB OTG HS global interrupt */ + DCMI_IRQn = 78, /*!< DCMI global interrupt */ + RNG_IRQn = 80, /*!< RNG global interrupt */ + FPU_IRQn = 81, /*!< FPU global interrupt */ + UART7_IRQn = 82, /*!< UART7 global interrupt */ + UART8_IRQn = 83, /*!< UART8 global interrupt */ + SPI4_IRQn = 84, /*!< SPI4 global Interrupt */ + SPI5_IRQn = 85, /*!< SPI5 global Interrupt */ + SPI6_IRQn = 86, /*!< SPI6 global Interrupt */ + SAI1_IRQn = 87, /*!< SAI1 global Interrupt */ + LTDC_IRQn = 88, /*!< LTDC global Interrupt */ + LTDC_ER_IRQn = 89, /*!< LTDC Error global Interrupt */ + DMA2D_IRQn = 90, /*!< DMA2D global Interrupt */ + SAI2_IRQn = 91, /*!< SAI2 global Interrupt */ + QUADSPI_IRQn = 92, /*!< Quad SPI global interrupt */ + LPTIM1_IRQn = 93, /*!< LP TIM1 interrupt */ + CEC_IRQn = 94, /*!< HDMI-CEC global Interrupt */ + I2C4_EV_IRQn = 95, /*!< I2C4 Event Interrupt */ + I2C4_ER_IRQn = 96, /*!< I2C4 Error Interrupt */ + SPDIF_RX_IRQn = 97, /*!< SPDIF-RX global Interrupt */ + DSI_IRQn = 98, /*!< DSI global Interrupt */ + DFSDM1_FLT0_IRQn = 99, /*!< DFSDM1 Filter 0 global Interrupt */ + DFSDM1_FLT1_IRQn = 100, /*!< DFSDM1 Filter 1 global Interrupt */ + DFSDM1_FLT2_IRQn = 101, /*!< DFSDM1 Filter 2 global Interrupt */ + DFSDM1_FLT3_IRQn = 102, /*!< DFSDM1 Filter 3 global Interrupt */ + SDMMC2_IRQn = 103, /*!< SDMMC2 global Interrupt */ + CAN3_TX_IRQn = 104, /*!< CAN3 TX Interrupt */ + CAN3_RX0_IRQn = 105, /*!< CAN3 RX0 Interrupt */ + CAN3_RX1_IRQn = 106, /*!< CAN3 RX1 Interrupt */ + CAN3_SCE_IRQn = 107, /*!< CAN3 SCE Interrupt */ + JPEG_IRQn = 108, /*!< JPEG global Interrupt */ + MDIOS_IRQn = 109 /*!< MDIO Slave global Interrupt */ +} IRQn_Type; + +/** + * @} + */ + +/** + * @brief Configuration of the Cortex-M7 Processor and Core Peripherals + */ +#define __CM7_REV 0x0100U /*!< Cortex-M7 revision r1p0 */ +#define __MPU_PRESENT 1 /*!< CM7 provides an MPU */ +#define __NVIC_PRIO_BITS 4 /*!< CM7 uses 4 Bits for the Priority Levels */ +#define __Vendor_SysTickConfig 0 /*!< Set to 1 if different SysTick Config is used */ +#define __FPU_PRESENT 1 /*!< FPU present */ +#define __ICACHE_PRESENT 1 /*!< CM7 instruction cache present */ +#define __DCACHE_PRESENT 1 /*!< CM7 data cache present */ +#include "core_cm7.h" /*!< Cortex-M7 processor and core peripherals */ + + +#include "system_stm32f7xx.h" +#include + +/** @addtogroup Peripheral_registers_structures + * @{ + */ + +/** + * @brief Analog to Digital Converter + */ + +typedef struct +{ + __IO uint32_t SR; /*!< ADC status register, Address offset: 0x00 */ + __IO uint32_t CR1; /*!< ADC control register 1, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< ADC control register 2, Address offset: 0x08 */ + __IO uint32_t SMPR1; /*!< ADC sample time register 1, Address offset: 0x0C */ + __IO uint32_t SMPR2; /*!< ADC sample time register 2, Address offset: 0x10 */ + __IO uint32_t JOFR1; /*!< ADC injected channel data offset register 1, Address offset: 0x14 */ + __IO uint32_t JOFR2; /*!< ADC injected channel data offset register 2, Address offset: 0x18 */ + __IO uint32_t JOFR3; /*!< ADC injected channel data offset register 3, Address offset: 0x1C */ + __IO uint32_t JOFR4; /*!< ADC injected channel data offset register 4, Address offset: 0x20 */ + __IO uint32_t HTR; /*!< ADC watchdog higher threshold register, Address offset: 0x24 */ + __IO uint32_t LTR; /*!< ADC watchdog lower threshold register, Address offset: 0x28 */ + __IO uint32_t SQR1; /*!< ADC regular sequence register 1, Address offset: 0x2C */ + __IO uint32_t SQR2; /*!< ADC regular sequence register 2, Address offset: 0x30 */ + __IO uint32_t SQR3; /*!< ADC regular sequence register 3, Address offset: 0x34 */ + __IO uint32_t JSQR; /*!< ADC injected sequence register, Address offset: 0x38*/ + __IO uint32_t JDR1; /*!< ADC injected data register 1, Address offset: 0x3C */ + __IO uint32_t JDR2; /*!< ADC injected data register 2, Address offset: 0x40 */ + __IO uint32_t JDR3; /*!< ADC injected data register 3, Address offset: 0x44 */ + __IO uint32_t JDR4; /*!< ADC injected data register 4, Address offset: 0x48 */ + __IO uint32_t DR; /*!< ADC regular data register, Address offset: 0x4C */ +} ADC_TypeDef; + +typedef struct +{ + __IO uint32_t CSR; /*!< ADC Common status register, Address offset: ADC1 base address + 0x300 */ + __IO uint32_t CCR; /*!< ADC common control register, Address offset: ADC1 base address + 0x304 */ + __IO uint32_t CDR; /*!< ADC common regular data register for dual + AND triple modes, Address offset: ADC1 base address + 0x308 */ +} ADC_Common_TypeDef; + + +/** + * @brief Controller Area Network TxMailBox + */ + +typedef struct +{ + __IO uint32_t TIR; /*!< CAN TX mailbox identifier register */ + __IO uint32_t TDTR; /*!< CAN mailbox data length control and time stamp register */ + __IO uint32_t TDLR; /*!< CAN mailbox data low register */ + __IO uint32_t TDHR; /*!< CAN mailbox data high register */ +} CAN_TxMailBox_TypeDef; + +/** + * @brief Controller Area Network FIFOMailBox + */ + +typedef struct +{ + __IO uint32_t RIR; /*!< CAN receive FIFO mailbox identifier register */ + __IO uint32_t RDTR; /*!< CAN receive FIFO mailbox data length control and time stamp register */ + __IO uint32_t RDLR; /*!< CAN receive FIFO mailbox data low register */ + __IO uint32_t RDHR; /*!< CAN receive FIFO mailbox data high register */ +} CAN_FIFOMailBox_TypeDef; + +/** + * @brief Controller Area Network FilterRegister + */ + +typedef struct +{ + __IO uint32_t FR1; /*!< CAN Filter bank register 1 */ + __IO uint32_t FR2; /*!< CAN Filter bank register 1 */ +} CAN_FilterRegister_TypeDef; + +/** + * @brief Controller Area Network + */ + +typedef struct +{ + __IO uint32_t MCR; /*!< CAN master control register, Address offset: 0x00 */ + __IO uint32_t MSR; /*!< CAN master status register, Address offset: 0x04 */ + __IO uint32_t TSR; /*!< CAN transmit status register, Address offset: 0x08 */ + __IO uint32_t RF0R; /*!< CAN receive FIFO 0 register, Address offset: 0x0C */ + __IO uint32_t RF1R; /*!< CAN receive FIFO 1 register, Address offset: 0x10 */ + __IO uint32_t IER; /*!< CAN interrupt enable register, Address offset: 0x14 */ + __IO uint32_t ESR; /*!< CAN error status register, Address offset: 0x18 */ + __IO uint32_t BTR; /*!< CAN bit timing register, Address offset: 0x1C */ + uint32_t RESERVED0[88]; /*!< Reserved, 0x020 - 0x17F */ + CAN_TxMailBox_TypeDef sTxMailBox[3]; /*!< CAN Tx MailBox, Address offset: 0x180 - 0x1AC */ + CAN_FIFOMailBox_TypeDef sFIFOMailBox[2]; /*!< CAN FIFO MailBox, Address offset: 0x1B0 - 0x1CC */ + uint32_t RESERVED1[12]; /*!< Reserved, 0x1D0 - 0x1FF */ + __IO uint32_t FMR; /*!< CAN filter master register, Address offset: 0x200 */ + __IO uint32_t FM1R; /*!< CAN filter mode register, Address offset: 0x204 */ + uint32_t RESERVED2; /*!< Reserved, 0x208 */ + __IO uint32_t FS1R; /*!< CAN filter scale register, Address offset: 0x20C */ + uint32_t RESERVED3; /*!< Reserved, 0x210 */ + __IO uint32_t FFA1R; /*!< CAN filter FIFO assignment register, Address offset: 0x214 */ + uint32_t RESERVED4; /*!< Reserved, 0x218 */ + __IO uint32_t FA1R; /*!< CAN filter activation register, Address offset: 0x21C */ + uint32_t RESERVED5[8]; /*!< Reserved, 0x220-0x23F */ + CAN_FilterRegister_TypeDef sFilterRegister[28]; /*!< CAN Filter Register, Address offset: 0x240-0x31C */ +} CAN_TypeDef; + +/** + * @brief HDMI-CEC + */ + +typedef struct +{ + __IO uint32_t CR; /*!< CEC control register, Address offset:0x00 */ + __IO uint32_t CFGR; /*!< CEC configuration register, Address offset:0x04 */ + __IO uint32_t TXDR; /*!< CEC Tx data register , Address offset:0x08 */ + __IO uint32_t RXDR; /*!< CEC Rx Data Register, Address offset:0x0C */ + __IO uint32_t ISR; /*!< CEC Interrupt and Status Register, Address offset:0x10 */ + __IO uint32_t IER; /*!< CEC interrupt enable register, Address offset:0x14 */ +}CEC_TypeDef; + +/** + * @brief CRC calculation unit + */ + +typedef struct +{ + __IO uint32_t DR; /*!< CRC Data register, Address offset: 0x00 */ + __IO uint8_t IDR; /*!< CRC Independent data register, Address offset: 0x04 */ + uint8_t RESERVED0; /*!< Reserved, 0x05 */ + uint16_t RESERVED1; /*!< Reserved, 0x06 */ + __IO uint32_t CR; /*!< CRC Control register, Address offset: 0x08 */ + uint32_t RESERVED2; /*!< Reserved, 0x0C */ + __IO uint32_t INIT; /*!< Initial CRC value register, Address offset: 0x10 */ + __IO uint32_t POL; /*!< CRC polynomial register, Address offset: 0x14 */ +} CRC_TypeDef; + +/** + * @brief Digital to Analog Converter + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DAC control register, Address offset: 0x00 */ + __IO uint32_t SWTRIGR; /*!< DAC software trigger register, Address offset: 0x04 */ + __IO uint32_t DHR12R1; /*!< DAC channel1 12-bit right-aligned data holding register, Address offset: 0x08 */ + __IO uint32_t DHR12L1; /*!< DAC channel1 12-bit left aligned data holding register, Address offset: 0x0C */ + __IO uint32_t DHR8R1; /*!< DAC channel1 8-bit right aligned data holding register, Address offset: 0x10 */ + __IO uint32_t DHR12R2; /*!< DAC channel2 12-bit right aligned data holding register, Address offset: 0x14 */ + __IO uint32_t DHR12L2; /*!< DAC channel2 12-bit left aligned data holding register, Address offset: 0x18 */ + __IO uint32_t DHR8R2; /*!< DAC channel2 8-bit right-aligned data holding register, Address offset: 0x1C */ + __IO uint32_t DHR12RD; /*!< Dual DAC 12-bit right-aligned data holding register, Address offset: 0x20 */ + __IO uint32_t DHR12LD; /*!< DUAL DAC 12-bit left aligned data holding register, Address offset: 0x24 */ + __IO uint32_t DHR8RD; /*!< DUAL DAC 8-bit right aligned data holding register, Address offset: 0x28 */ + __IO uint32_t DOR1; /*!< DAC channel1 data output register, Address offset: 0x2C */ + __IO uint32_t DOR2; /*!< DAC channel2 data output register, Address offset: 0x30 */ + __IO uint32_t SR; /*!< DAC status register, Address offset: 0x34 */ +} DAC_TypeDef; + +/** + * @brief DFSDM module registers + */ +typedef struct +{ + __IO uint32_t FLTCR1; /*!< DFSDM control register1, Address offset: 0x100 */ + __IO uint32_t FLTCR2; /*!< DFSDM control register2, Address offset: 0x104 */ + __IO uint32_t FLTISR; /*!< DFSDM interrupt and status register, Address offset: 0x108 */ + __IO uint32_t FLTICR; /*!< DFSDM interrupt flag clear register, Address offset: 0x10C */ + __IO uint32_t FLTJCHGR; /*!< DFSDM injected channel group selection register, Address offset: 0x110 */ + __IO uint32_t FLTFCR; /*!< DFSDM filter control register, Address offset: 0x114 */ + __IO uint32_t FLTJDATAR; /*!< DFSDM data register for injected group, Address offset: 0x118 */ + __IO uint32_t FLTRDATAR; /*!< DFSDM data register for regular group, Address offset: 0x11C */ + __IO uint32_t FLTAWHTR; /*!< DFSDM analog watchdog high threshold register, Address offset: 0x120 */ + __IO uint32_t FLTAWLTR; /*!< DFSDM analog watchdog low threshold register, Address offset: 0x124 */ + __IO uint32_t FLTAWSR; /*!< DFSDM analog watchdog status register Address offset: 0x128 */ + __IO uint32_t FLTAWCFR; /*!< DFSDM analog watchdog clear flag register Address offset: 0x12C */ + __IO uint32_t FLTEXMAX; /*!< DFSDM extreme detector maximum register, Address offset: 0x130 */ + __IO uint32_t FLTEXMIN; /*!< DFSDM extreme detector minimum register Address offset: 0x134 */ + __IO uint32_t FLTCNVTIMR; /*!< DFSDM conversion timer, Address offset: 0x138 */ +} DFSDM_Filter_TypeDef; + +/** + * @brief DFSDM channel configuration registers + */ +typedef struct +{ + __IO uint32_t CHCFGR1; /*!< DFSDM channel configuration register1, Address offset: 0x00 */ + __IO uint32_t CHCFGR2; /*!< DFSDM channel configuration register2, Address offset: 0x04 */ + __IO uint32_t CHAWSCDR; /*!< DFSDM channel analog watchdog and + short circuit detector register, Address offset: 0x08 */ + __IO uint32_t CHWDATAR; /*!< DFSDM channel watchdog filter data register, Address offset: 0x0C */ + __IO uint32_t CHDATINR; /*!< DFSDM channel data input register, Address offset: 0x10 */ +} DFSDM_Channel_TypeDef; + +/** + * @brief Debug MCU + */ + +typedef struct +{ + __IO uint32_t IDCODE; /*!< MCU device ID code, Address offset: 0x00 */ + __IO uint32_t CR; /*!< Debug MCU configuration register, Address offset: 0x04 */ + __IO uint32_t APB1FZ; /*!< Debug MCU APB1 freeze register, Address offset: 0x08 */ + __IO uint32_t APB2FZ; /*!< Debug MCU APB2 freeze register, Address offset: 0x0C */ +}DBGMCU_TypeDef; + +/** + * @brief DCMI + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DCMI control register 1, Address offset: 0x00 */ + __IO uint32_t SR; /*!< DCMI status register, Address offset: 0x04 */ + __IO uint32_t RISR; /*!< DCMI raw interrupt status register, Address offset: 0x08 */ + __IO uint32_t IER; /*!< DCMI interrupt enable register, Address offset: 0x0C */ + __IO uint32_t MISR; /*!< DCMI masked interrupt status register, Address offset: 0x10 */ + __IO uint32_t ICR; /*!< DCMI interrupt clear register, Address offset: 0x14 */ + __IO uint32_t ESCR; /*!< DCMI embedded synchronization code register, Address offset: 0x18 */ + __IO uint32_t ESUR; /*!< DCMI embedded synchronization unmask register, Address offset: 0x1C */ + __IO uint32_t CWSTRTR; /*!< DCMI crop window start, Address offset: 0x20 */ + __IO uint32_t CWSIZER; /*!< DCMI crop window size, Address offset: 0x24 */ + __IO uint32_t DR; /*!< DCMI data register, Address offset: 0x28 */ +} DCMI_TypeDef; + +/** + * @brief DMA Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DMA stream x configuration register */ + __IO uint32_t NDTR; /*!< DMA stream x number of data register */ + __IO uint32_t PAR; /*!< DMA stream x peripheral address register */ + __IO uint32_t M0AR; /*!< DMA stream x memory 0 address register */ + __IO uint32_t M1AR; /*!< DMA stream x memory 1 address register */ + __IO uint32_t FCR; /*!< DMA stream x FIFO control register */ +} DMA_Stream_TypeDef; + +typedef struct +{ + __IO uint32_t LISR; /*!< DMA low interrupt status register, Address offset: 0x00 */ + __IO uint32_t HISR; /*!< DMA high interrupt status register, Address offset: 0x04 */ + __IO uint32_t LIFCR; /*!< DMA low interrupt flag clear register, Address offset: 0x08 */ + __IO uint32_t HIFCR; /*!< DMA high interrupt flag clear register, Address offset: 0x0C */ +} DMA_TypeDef; + +/** + * @brief DMA2D Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DMA2D Control Register, Address offset: 0x00 */ + __IO uint32_t ISR; /*!< DMA2D Interrupt Status Register, Address offset: 0x04 */ + __IO uint32_t IFCR; /*!< DMA2D Interrupt Flag Clear Register, Address offset: 0x08 */ + __IO uint32_t FGMAR; /*!< DMA2D Foreground Memory Address Register, Address offset: 0x0C */ + __IO uint32_t FGOR; /*!< DMA2D Foreground Offset Register, Address offset: 0x10 */ + __IO uint32_t BGMAR; /*!< DMA2D Background Memory Address Register, Address offset: 0x14 */ + __IO uint32_t BGOR; /*!< DMA2D Background Offset Register, Address offset: 0x18 */ + __IO uint32_t FGPFCCR; /*!< DMA2D Foreground PFC Control Register, Address offset: 0x1C */ + __IO uint32_t FGCOLR; /*!< DMA2D Foreground Color Register, Address offset: 0x20 */ + __IO uint32_t BGPFCCR; /*!< DMA2D Background PFC Control Register, Address offset: 0x24 */ + __IO uint32_t BGCOLR; /*!< DMA2D Background Color Register, Address offset: 0x28 */ + __IO uint32_t FGCMAR; /*!< DMA2D Foreground CLUT Memory Address Register, Address offset: 0x2C */ + __IO uint32_t BGCMAR; /*!< DMA2D Background CLUT Memory Address Register, Address offset: 0x30 */ + __IO uint32_t OPFCCR; /*!< DMA2D Output PFC Control Register, Address offset: 0x34 */ + __IO uint32_t OCOLR; /*!< DMA2D Output Color Register, Address offset: 0x38 */ + __IO uint32_t OMAR; /*!< DMA2D Output Memory Address Register, Address offset: 0x3C */ + __IO uint32_t OOR; /*!< DMA2D Output Offset Register, Address offset: 0x40 */ + __IO uint32_t NLR; /*!< DMA2D Number of Line Register, Address offset: 0x44 */ + __IO uint32_t LWR; /*!< DMA2D Line Watermark Register, Address offset: 0x48 */ + __IO uint32_t AMTCR; /*!< DMA2D AHB Master Timer Configuration Register, Address offset: 0x4C */ + uint32_t RESERVED[236]; /*!< Reserved, 0x50-0x3FF */ + __IO uint32_t FGCLUT[256]; /*!< DMA2D Foreground CLUT, Address offset:400-7FF */ + __IO uint32_t BGCLUT[256]; /*!< DMA2D Background CLUT, Address offset:800-BFF */ +} DMA2D_TypeDef; + + +/** + * @brief Ethernet MAC + */ + +typedef struct +{ + __IO uint32_t MACCR; + __IO uint32_t MACFFR; + __IO uint32_t MACHTHR; + __IO uint32_t MACHTLR; + __IO uint32_t MACMIIAR; + __IO uint32_t MACMIIDR; + __IO uint32_t MACFCR; + __IO uint32_t MACVLANTR; /* 8 */ + uint32_t RESERVED0[2]; + __IO uint32_t MACRWUFFR; /* 11 */ + __IO uint32_t MACPMTCSR; + uint32_t RESERVED1; + __IO uint32_t MACDBGR; + __IO uint32_t MACSR; /* 15 */ + __IO uint32_t MACIMR; + __IO uint32_t MACA0HR; + __IO uint32_t MACA0LR; + __IO uint32_t MACA1HR; + __IO uint32_t MACA1LR; + __IO uint32_t MACA2HR; + __IO uint32_t MACA2LR; + __IO uint32_t MACA3HR; + __IO uint32_t MACA3LR; /* 24 */ + uint32_t RESERVED2[40]; + __IO uint32_t MMCCR; /* 65 */ + __IO uint32_t MMCRIR; + __IO uint32_t MMCTIR; + __IO uint32_t MMCRIMR; + __IO uint32_t MMCTIMR; /* 69 */ + uint32_t RESERVED3[14]; + __IO uint32_t MMCTGFSCCR; /* 84 */ + __IO uint32_t MMCTGFMSCCR; + uint32_t RESERVED4[5]; + __IO uint32_t MMCTGFCR; + uint32_t RESERVED5[10]; + __IO uint32_t MMCRFCECR; + __IO uint32_t MMCRFAECR; + uint32_t RESERVED6[10]; + __IO uint32_t MMCRGUFCR; + uint32_t RESERVED7[334]; + __IO uint32_t PTPTSCR; + __IO uint32_t PTPSSIR; + __IO uint32_t PTPTSHR; + __IO uint32_t PTPTSLR; + __IO uint32_t PTPTSHUR; + __IO uint32_t PTPTSLUR; + __IO uint32_t PTPTSAR; + __IO uint32_t PTPTTHR; + __IO uint32_t PTPTTLR; + __IO uint32_t RESERVED8; + __IO uint32_t PTPTSSR; + uint32_t RESERVED9[565]; + __IO uint32_t DMABMR; + __IO uint32_t DMATPDR; + __IO uint32_t DMARPDR; + __IO uint32_t DMARDLAR; + __IO uint32_t DMATDLAR; + __IO uint32_t DMASR; + __IO uint32_t DMAOMR; + __IO uint32_t DMAIER; + __IO uint32_t DMAMFBOCR; + __IO uint32_t DMARSWTR; + uint32_t RESERVED10[8]; + __IO uint32_t DMACHTDR; + __IO uint32_t DMACHRDR; + __IO uint32_t DMACHTBAR; + __IO uint32_t DMACHRBAR; +} ETH_TypeDef; + +/** + * @brief External Interrupt/Event Controller + */ + +typedef struct +{ + __IO uint32_t IMR; /*!< EXTI Interrupt mask register, Address offset: 0x00 */ + __IO uint32_t EMR; /*!< EXTI Event mask register, Address offset: 0x04 */ + __IO uint32_t RTSR; /*!< EXTI Rising trigger selection register, Address offset: 0x08 */ + __IO uint32_t FTSR; /*!< EXTI Falling trigger selection register, Address offset: 0x0C */ + __IO uint32_t SWIER; /*!< EXTI Software interrupt event register, Address offset: 0x10 */ + __IO uint32_t PR; /*!< EXTI Pending register, Address offset: 0x14 */ +} EXTI_TypeDef; + +/** + * @brief FLASH Registers + */ + +typedef struct +{ + __IO uint32_t ACR; /*!< FLASH access control register, Address offset: 0x00 */ + __IO uint32_t KEYR; /*!< FLASH key register, Address offset: 0x04 */ + __IO uint32_t OPTKEYR; /*!< FLASH option key register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< FLASH status register, Address offset: 0x0C */ + __IO uint32_t CR; /*!< FLASH control register, Address offset: 0x10 */ + __IO uint32_t OPTCR; /*!< FLASH option control register , Address offset: 0x14 */ + __IO uint32_t OPTCR1; /*!< FLASH option control register 1 , Address offset: 0x18 */ +} FLASH_TypeDef; + + + +/** + * @brief Flexible Memory Controller + */ + +typedef struct +{ + __IO uint32_t BTCR[8]; /*!< NOR/PSRAM chip-select control register(BCR) and chip-select timing register(BTR), Address offset: 0x00-1C */ +} FMC_Bank1_TypeDef; + +/** + * @brief Flexible Memory Controller Bank1E + */ + +typedef struct +{ + __IO uint32_t BWTR[7]; /*!< NOR/PSRAM write timing registers, Address offset: 0x104-0x11C */ +} FMC_Bank1E_TypeDef; + +/** + * @brief Flexible Memory Controller Bank3 + */ + +typedef struct +{ + __IO uint32_t PCR; /*!< NAND Flash control register, Address offset: 0x80 */ + __IO uint32_t SR; /*!< NAND Flash FIFO status and interrupt register, Address offset: 0x84 */ + __IO uint32_t PMEM; /*!< NAND Flash Common memory space timing register, Address offset: 0x88 */ + __IO uint32_t PATT; /*!< NAND Flash Attribute memory space timing register, Address offset: 0x8C */ + uint32_t RESERVED0; /*!< Reserved, 0x90 */ + __IO uint32_t ECCR; /*!< NAND Flash ECC result registers, Address offset: 0x94 */ +} FMC_Bank3_TypeDef; + +/** + * @brief Flexible Memory Controller Bank5_6 + */ + +typedef struct +{ + __IO uint32_t SDCR[2]; /*!< SDRAM Control registers , Address offset: 0x140-0x144 */ + __IO uint32_t SDTR[2]; /*!< SDRAM Timing registers , Address offset: 0x148-0x14C */ + __IO uint32_t SDCMR; /*!< SDRAM Command Mode register, Address offset: 0x150 */ + __IO uint32_t SDRTR; /*!< SDRAM Refresh Timer register, Address offset: 0x154 */ + __IO uint32_t SDSR; /*!< SDRAM Status register, Address offset: 0x158 */ +} FMC_Bank5_6_TypeDef; + + +/** + * @brief General Purpose I/O + */ + +typedef struct +{ + __IO uint32_t MODER; /*!< GPIO port mode register, Address offset: 0x00 */ + __IO uint32_t OTYPER; /*!< GPIO port output type register, Address offset: 0x04 */ + __IO uint32_t OSPEEDR; /*!< GPIO port output speed register, Address offset: 0x08 */ + __IO uint32_t PUPDR; /*!< GPIO port pull-up/pull-down register, Address offset: 0x0C */ + __IO uint32_t IDR; /*!< GPIO port input data register, Address offset: 0x10 */ + __IO uint32_t ODR; /*!< GPIO port output data register, Address offset: 0x14 */ + __IO uint32_t BSRR; /*!< GPIO port bit set/reset register, Address offset: 0x18 */ + __IO uint32_t LCKR; /*!< GPIO port configuration lock register, Address offset: 0x1C */ + __IO uint32_t AFR[2]; /*!< GPIO alternate function registers, Address offset: 0x20-0x24 */ +} GPIO_TypeDef; + +/** + * @brief System configuration controller + */ + +typedef struct +{ + __IO uint32_t MEMRMP; /*!< SYSCFG memory remap register, Address offset: 0x00 */ + __IO uint32_t PMC; /*!< SYSCFG peripheral mode configuration register, Address offset: 0x04 */ + __IO uint32_t EXTICR[4]; /*!< SYSCFG external interrupt configuration registers, Address offset: 0x08-0x14 */ + uint32_t RESERVED; /*!< Reserved, 0x18 */ + __IO uint32_t CBR; /*!< SYSCFG Class B register, Address offset: 0x1C */ + __IO uint32_t CMPCR; /*!< SYSCFG Compensation cell control register, Address offset: 0x20 */ +} SYSCFG_TypeDef; + +/** + * @brief Inter-integrated Circuit Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< I2C Control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< I2C Control register 2, Address offset: 0x04 */ + __IO uint32_t OAR1; /*!< I2C Own address 1 register, Address offset: 0x08 */ + __IO uint32_t OAR2; /*!< I2C Own address 2 register, Address offset: 0x0C */ + __IO uint32_t TIMINGR; /*!< I2C Timing register, Address offset: 0x10 */ + __IO uint32_t TIMEOUTR; /*!< I2C Timeout register, Address offset: 0x14 */ + __IO uint32_t ISR; /*!< I2C Interrupt and status register, Address offset: 0x18 */ + __IO uint32_t ICR; /*!< I2C Interrupt clear register, Address offset: 0x1C */ + __IO uint32_t PECR; /*!< I2C PEC register, Address offset: 0x20 */ + __IO uint32_t RXDR; /*!< I2C Receive data register, Address offset: 0x24 */ + __IO uint32_t TXDR; /*!< I2C Transmit data register, Address offset: 0x28 */ +} I2C_TypeDef; + +/** + * @brief Independent WATCHDOG + */ + +typedef struct +{ + __IO uint32_t KR; /*!< IWDG Key register, Address offset: 0x00 */ + __IO uint32_t PR; /*!< IWDG Prescaler register, Address offset: 0x04 */ + __IO uint32_t RLR; /*!< IWDG Reload register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< IWDG Status register, Address offset: 0x0C */ + __IO uint32_t WINR; /*!< IWDG Window register, Address offset: 0x10 */ +} IWDG_TypeDef; + + +/** + * @brief LCD-TFT Display Controller + */ + +typedef struct +{ + uint32_t RESERVED0[2]; /*!< Reserved, 0x00-0x04 */ + __IO uint32_t SSCR; /*!< LTDC Synchronization Size Configuration Register, Address offset: 0x08 */ + __IO uint32_t BPCR; /*!< LTDC Back Porch Configuration Register, Address offset: 0x0C */ + __IO uint32_t AWCR; /*!< LTDC Active Width Configuration Register, Address offset: 0x10 */ + __IO uint32_t TWCR; /*!< LTDC Total Width Configuration Register, Address offset: 0x14 */ + __IO uint32_t GCR; /*!< LTDC Global Control Register, Address offset: 0x18 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0x1C-0x20 */ + __IO uint32_t SRCR; /*!< LTDC Shadow Reload Configuration Register, Address offset: 0x24 */ + uint32_t RESERVED2[1]; /*!< Reserved, 0x28 */ + __IO uint32_t BCCR; /*!< LTDC Background Color Configuration Register, Address offset: 0x2C */ + uint32_t RESERVED3[1]; /*!< Reserved, 0x30 */ + __IO uint32_t IER; /*!< LTDC Interrupt Enable Register, Address offset: 0x34 */ + __IO uint32_t ISR; /*!< LTDC Interrupt Status Register, Address offset: 0x38 */ + __IO uint32_t ICR; /*!< LTDC Interrupt Clear Register, Address offset: 0x3C */ + __IO uint32_t LIPCR; /*!< LTDC Line Interrupt Position Configuration Register, Address offset: 0x40 */ + __IO uint32_t CPSR; /*!< LTDC Current Position Status Register, Address offset: 0x44 */ + __IO uint32_t CDSR; /*!< LTDC Current Display Status Register, Address offset: 0x48 */ +} LTDC_TypeDef; + +/** + * @brief LCD-TFT Display layer x Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< LTDC Layerx Control Register Address offset: 0x84 */ + __IO uint32_t WHPCR; /*!< LTDC Layerx Window Horizontal Position Configuration Register Address offset: 0x88 */ + __IO uint32_t WVPCR; /*!< LTDC Layerx Window Vertical Position Configuration Register Address offset: 0x8C */ + __IO uint32_t CKCR; /*!< LTDC Layerx Color Keying Configuration Register Address offset: 0x90 */ + __IO uint32_t PFCR; /*!< LTDC Layerx Pixel Format Configuration Register Address offset: 0x94 */ + __IO uint32_t CACR; /*!< LTDC Layerx Constant Alpha Configuration Register Address offset: 0x98 */ + __IO uint32_t DCCR; /*!< LTDC Layerx Default Color Configuration Register Address offset: 0x9C */ + __IO uint32_t BFCR; /*!< LTDC Layerx Blending Factors Configuration Register Address offset: 0xA0 */ + uint32_t RESERVED0[2]; /*!< Reserved */ + __IO uint32_t CFBAR; /*!< LTDC Layerx Color Frame Buffer Address Register Address offset: 0xAC */ + __IO uint32_t CFBLR; /*!< LTDC Layerx Color Frame Buffer Length Register Address offset: 0xB0 */ + __IO uint32_t CFBLNR; /*!< LTDC Layerx ColorFrame Buffer Line Number Register Address offset: 0xB4 */ + uint32_t RESERVED1[3]; /*!< Reserved */ + __IO uint32_t CLUTWR; /*!< LTDC Layerx CLUT Write Register Address offset: 0x144 */ + +} LTDC_Layer_TypeDef; + +/** + * @brief Power Control + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< PWR power control register 1, Address offset: 0x00 */ + __IO uint32_t CSR1; /*!< PWR power control/status register 2, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< PWR power control register 2, Address offset: 0x08 */ + __IO uint32_t CSR2; /*!< PWR power control/status register 2, Address offset: 0x0C */ +} PWR_TypeDef; + + +/** + * @brief Reset and Clock Control + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RCC clock control register, Address offset: 0x00 */ + __IO uint32_t PLLCFGR; /*!< RCC PLL configuration register, Address offset: 0x04 */ + __IO uint32_t CFGR; /*!< RCC clock configuration register, Address offset: 0x08 */ + __IO uint32_t CIR; /*!< RCC clock interrupt register, Address offset: 0x0C */ + __IO uint32_t AHB1RSTR; /*!< RCC AHB1 peripheral reset register, Address offset: 0x10 */ + __IO uint32_t AHB2RSTR; /*!< RCC AHB2 peripheral reset register, Address offset: 0x14 */ + __IO uint32_t AHB3RSTR; /*!< RCC AHB3 peripheral reset register, Address offset: 0x18 */ + uint32_t RESERVED0; /*!< Reserved, 0x1C */ + __IO uint32_t APB1RSTR; /*!< RCC APB1 peripheral reset register, Address offset: 0x20 */ + __IO uint32_t APB2RSTR; /*!< RCC APB2 peripheral reset register, Address offset: 0x24 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0x28-0x2C */ + __IO uint32_t AHB1ENR; /*!< RCC AHB1 peripheral clock register, Address offset: 0x30 */ + __IO uint32_t AHB2ENR; /*!< RCC AHB2 peripheral clock register, Address offset: 0x34 */ + __IO uint32_t AHB3ENR; /*!< RCC AHB3 peripheral clock register, Address offset: 0x38 */ + uint32_t RESERVED2; /*!< Reserved, 0x3C */ + __IO uint32_t APB1ENR; /*!< RCC APB1 peripheral clock enable register, Address offset: 0x40 */ + __IO uint32_t APB2ENR; /*!< RCC APB2 peripheral clock enable register, Address offset: 0x44 */ + uint32_t RESERVED3[2]; /*!< Reserved, 0x48-0x4C */ + __IO uint32_t AHB1LPENR; /*!< RCC AHB1 peripheral clock enable in low power mode register, Address offset: 0x50 */ + __IO uint32_t AHB2LPENR; /*!< RCC AHB2 peripheral clock enable in low power mode register, Address offset: 0x54 */ + __IO uint32_t AHB3LPENR; /*!< RCC AHB3 peripheral clock enable in low power mode register, Address offset: 0x58 */ + uint32_t RESERVED4; /*!< Reserved, 0x5C */ + __IO uint32_t APB1LPENR; /*!< RCC APB1 peripheral clock enable in low power mode register, Address offset: 0x60 */ + __IO uint32_t APB2LPENR; /*!< RCC APB2 peripheral clock enable in low power mode register, Address offset: 0x64 */ + uint32_t RESERVED5[2]; /*!< Reserved, 0x68-0x6C */ + __IO uint32_t BDCR; /*!< RCC Backup domain control register, Address offset: 0x70 */ + __IO uint32_t CSR; /*!< RCC clock control & status register, Address offset: 0x74 */ + uint32_t RESERVED6[2]; /*!< Reserved, 0x78-0x7C */ + __IO uint32_t SSCGR; /*!< RCC spread spectrum clock generation register, Address offset: 0x80 */ + __IO uint32_t PLLI2SCFGR; /*!< RCC PLLI2S configuration register, Address offset: 0x84 */ + __IO uint32_t PLLSAICFGR; /*!< RCC PLLSAI configuration register, Address offset: 0x88 */ + __IO uint32_t DCKCFGR1; /*!< RCC Dedicated Clocks configuration register1, Address offset: 0x8C */ + __IO uint32_t DCKCFGR2; /*!< RCC Dedicated Clocks configuration register 2, Address offset: 0x90 */ + +} RCC_TypeDef; + +/** + * @brief Real-Time Clock + */ + +typedef struct +{ + __IO uint32_t TR; /*!< RTC time register, Address offset: 0x00 */ + __IO uint32_t DR; /*!< RTC date register, Address offset: 0x04 */ + __IO uint32_t CR; /*!< RTC control register, Address offset: 0x08 */ + __IO uint32_t ISR; /*!< RTC initialization and status register, Address offset: 0x0C */ + __IO uint32_t PRER; /*!< RTC prescaler register, Address offset: 0x10 */ + __IO uint32_t WUTR; /*!< RTC wakeup timer register, Address offset: 0x14 */ + uint32_t reserved; /*!< Reserved */ + __IO uint32_t ALRMAR; /*!< RTC alarm A register, Address offset: 0x1C */ + __IO uint32_t ALRMBR; /*!< RTC alarm B register, Address offset: 0x20 */ + __IO uint32_t WPR; /*!< RTC write protection register, Address offset: 0x24 */ + __IO uint32_t SSR; /*!< RTC sub second register, Address offset: 0x28 */ + __IO uint32_t SHIFTR; /*!< RTC shift control register, Address offset: 0x2C */ + __IO uint32_t TSTR; /*!< RTC time stamp time register, Address offset: 0x30 */ + __IO uint32_t TSDR; /*!< RTC time stamp date register, Address offset: 0x34 */ + __IO uint32_t TSSSR; /*!< RTC time-stamp sub second register, Address offset: 0x38 */ + __IO uint32_t CALR; /*!< RTC calibration register, Address offset: 0x3C */ + __IO uint32_t TAMPCR; /*!< RTC tamper configuration register, Address offset: 0x40 */ + __IO uint32_t ALRMASSR; /*!< RTC alarm A sub second register, Address offset: 0x44 */ + __IO uint32_t ALRMBSSR; /*!< RTC alarm B sub second register, Address offset: 0x48 */ + __IO uint32_t OR; /*!< RTC option register, Address offset: 0x4C */ + __IO uint32_t BKP0R; /*!< RTC backup register 0, Address offset: 0x50 */ + __IO uint32_t BKP1R; /*!< RTC backup register 1, Address offset: 0x54 */ + __IO uint32_t BKP2R; /*!< RTC backup register 2, Address offset: 0x58 */ + __IO uint32_t BKP3R; /*!< RTC backup register 3, Address offset: 0x5C */ + __IO uint32_t BKP4R; /*!< RTC backup register 4, Address offset: 0x60 */ + __IO uint32_t BKP5R; /*!< RTC backup register 5, Address offset: 0x64 */ + __IO uint32_t BKP6R; /*!< RTC backup register 6, Address offset: 0x68 */ + __IO uint32_t BKP7R; /*!< RTC backup register 7, Address offset: 0x6C */ + __IO uint32_t BKP8R; /*!< RTC backup register 8, Address offset: 0x70 */ + __IO uint32_t BKP9R; /*!< RTC backup register 9, Address offset: 0x74 */ + __IO uint32_t BKP10R; /*!< RTC backup register 10, Address offset: 0x78 */ + __IO uint32_t BKP11R; /*!< RTC backup register 11, Address offset: 0x7C */ + __IO uint32_t BKP12R; /*!< RTC backup register 12, Address offset: 0x80 */ + __IO uint32_t BKP13R; /*!< RTC backup register 13, Address offset: 0x84 */ + __IO uint32_t BKP14R; /*!< RTC backup register 14, Address offset: 0x88 */ + __IO uint32_t BKP15R; /*!< RTC backup register 15, Address offset: 0x8C */ + __IO uint32_t BKP16R; /*!< RTC backup register 16, Address offset: 0x90 */ + __IO uint32_t BKP17R; /*!< RTC backup register 17, Address offset: 0x94 */ + __IO uint32_t BKP18R; /*!< RTC backup register 18, Address offset: 0x98 */ + __IO uint32_t BKP19R; /*!< RTC backup register 19, Address offset: 0x9C */ + __IO uint32_t BKP20R; /*!< RTC backup register 20, Address offset: 0xA0 */ + __IO uint32_t BKP21R; /*!< RTC backup register 21, Address offset: 0xA4 */ + __IO uint32_t BKP22R; /*!< RTC backup register 22, Address offset: 0xA8 */ + __IO uint32_t BKP23R; /*!< RTC backup register 23, Address offset: 0xAC */ + __IO uint32_t BKP24R; /*!< RTC backup register 24, Address offset: 0xB0 */ + __IO uint32_t BKP25R; /*!< RTC backup register 25, Address offset: 0xB4 */ + __IO uint32_t BKP26R; /*!< RTC backup register 26, Address offset: 0xB8 */ + __IO uint32_t BKP27R; /*!< RTC backup register 27, Address offset: 0xBC */ + __IO uint32_t BKP28R; /*!< RTC backup register 28, Address offset: 0xC0 */ + __IO uint32_t BKP29R; /*!< RTC backup register 29, Address offset: 0xC4 */ + __IO uint32_t BKP30R; /*!< RTC backup register 30, Address offset: 0xC8 */ + __IO uint32_t BKP31R; /*!< RTC backup register 31, Address offset: 0xCC */ +} RTC_TypeDef; + + +/** + * @brief Serial Audio Interface + */ + +typedef struct +{ + __IO uint32_t GCR; /*!< SAI global configuration register, Address offset: 0x00 */ +} SAI_TypeDef; + +typedef struct +{ + __IO uint32_t CR1; /*!< SAI block x configuration register 1, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< SAI block x configuration register 2, Address offset: 0x08 */ + __IO uint32_t FRCR; /*!< SAI block x frame configuration register, Address offset: 0x0C */ + __IO uint32_t SLOTR; /*!< SAI block x slot register, Address offset: 0x10 */ + __IO uint32_t IMR; /*!< SAI block x interrupt mask register, Address offset: 0x14 */ + __IO uint32_t SR; /*!< SAI block x status register, Address offset: 0x18 */ + __IO uint32_t CLRFR; /*!< SAI block x clear flag register, Address offset: 0x1C */ + __IO uint32_t DR; /*!< SAI block x data register, Address offset: 0x20 */ +} SAI_Block_TypeDef; + +/** + * @brief SPDIF-RX Interface + */ + +typedef struct +{ + __IO uint32_t CR; /*!< Control register, Address offset: 0x00 */ + __IO uint32_t IMR; /*!< Interrupt mask register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< Status register, Address offset: 0x08 */ + __IO uint32_t IFCR; /*!< Interrupt Flag Clear register, Address offset: 0x0C */ + __IO uint32_t DR; /*!< Data input register, Address offset: 0x10 */ + __IO uint32_t CSR; /*!< Channel Status register, Address offset: 0x14 */ + __IO uint32_t DIR; /*!< Debug Information register, Address offset: 0x18 */ +} SPDIFRX_TypeDef; + +/** + * @brief SD host Interface + */ + +typedef struct +{ + __IO uint32_t POWER; /*!< SDMMC power control register, Address offset: 0x00 */ + __IO uint32_t CLKCR; /*!< SDMMClock control register, Address offset: 0x04 */ + __IO uint32_t ARG; /*!< SDMMC argument register, Address offset: 0x08 */ + __IO uint32_t CMD; /*!< SDMMC command register, Address offset: 0x0C */ + __I uint32_t RESPCMD; /*!< SDMMC command response register, Address offset: 0x10 */ + __I uint32_t RESP1; /*!< SDMMC response 1 register, Address offset: 0x14 */ + __I uint32_t RESP2; /*!< SDMMC response 2 register, Address offset: 0x18 */ + __I uint32_t RESP3; /*!< SDMMC response 3 register, Address offset: 0x1C */ + __I uint32_t RESP4; /*!< SDMMC response 4 register, Address offset: 0x20 */ + __IO uint32_t DTIMER; /*!< SDMMC data timer register, Address offset: 0x24 */ + __IO uint32_t DLEN; /*!< SDMMC data length register, Address offset: 0x28 */ + __IO uint32_t DCTRL; /*!< SDMMC data control register, Address offset: 0x2C */ + __I uint32_t DCOUNT; /*!< SDMMC data counter register, Address offset: 0x30 */ + __I uint32_t STA; /*!< SDMMC status register, Address offset: 0x34 */ + __IO uint32_t ICR; /*!< SDMMC interrupt clear register, Address offset: 0x38 */ + __IO uint32_t MASK; /*!< SDMMC mask register, Address offset: 0x3C */ + uint32_t RESERVED0[2]; /*!< Reserved, 0x40-0x44 */ + __I uint32_t FIFOCNT; /*!< SDMMC FIFO counter register, Address offset: 0x48 */ + uint32_t RESERVED1[13]; /*!< Reserved, 0x4C-0x7C */ + __IO uint32_t FIFO; /*!< SDMMC data FIFO register, Address offset: 0x80 */ +} SDMMC_TypeDef; + +/** + * @brief Serial Peripheral Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< SPI control register 1 (not used in I2S mode), Address offset: 0x00 */ + __IO uint32_t CR2; /*!< SPI control register 2, Address offset: 0x04 */ + __IO uint32_t SR; /*!< SPI status register, Address offset: 0x08 */ + __IO uint32_t DR; /*!< SPI data register, Address offset: 0x0C */ + __IO uint32_t CRCPR; /*!< SPI CRC polynomial register (not used in I2S mode), Address offset: 0x10 */ + __IO uint32_t RXCRCR; /*!< SPI RX CRC register (not used in I2S mode), Address offset: 0x14 */ + __IO uint32_t TXCRCR; /*!< SPI TX CRC register (not used in I2S mode), Address offset: 0x18 */ + __IO uint32_t I2SCFGR; /*!< SPI_I2S configuration register, Address offset: 0x1C */ + __IO uint32_t I2SPR; /*!< SPI_I2S prescaler register, Address offset: 0x20 */ +} SPI_TypeDef; + +/** + * @brief QUAD Serial Peripheral Interface + */ + +typedef struct +{ + __IO uint32_t CR; /*!< QUADSPI Control register, Address offset: 0x00 */ + __IO uint32_t DCR; /*!< QUADSPI Device Configuration register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< QUADSPI Status register, Address offset: 0x08 */ + __IO uint32_t FCR; /*!< QUADSPI Flag Clear register, Address offset: 0x0C */ + __IO uint32_t DLR; /*!< QUADSPI Data Length register, Address offset: 0x10 */ + __IO uint32_t CCR; /*!< QUADSPI Communication Configuration register, Address offset: 0x14 */ + __IO uint32_t AR; /*!< QUADSPI Address register, Address offset: 0x18 */ + __IO uint32_t ABR; /*!< QUADSPI Alternate Bytes register, Address offset: 0x1C */ + __IO uint32_t DR; /*!< QUADSPI Data register, Address offset: 0x20 */ + __IO uint32_t PSMKR; /*!< QUADSPI Polling Status Mask register, Address offset: 0x24 */ + __IO uint32_t PSMAR; /*!< QUADSPI Polling Status Match register, Address offset: 0x28 */ + __IO uint32_t PIR; /*!< QUADSPI Polling Interval register, Address offset: 0x2C */ + __IO uint32_t LPTR; /*!< QUADSPI Low Power Timeout register, Address offset: 0x30 */ +} QUADSPI_TypeDef; + +/** + * @brief TIM + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< TIM control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< TIM control register 2, Address offset: 0x04 */ + __IO uint32_t SMCR; /*!< TIM slave mode control register, Address offset: 0x08 */ + __IO uint32_t DIER; /*!< TIM DMA/interrupt enable register, Address offset: 0x0C */ + __IO uint32_t SR; /*!< TIM status register, Address offset: 0x10 */ + __IO uint32_t EGR; /*!< TIM event generation register, Address offset: 0x14 */ + __IO uint32_t CCMR1; /*!< TIM capture/compare mode register 1, Address offset: 0x18 */ + __IO uint32_t CCMR2; /*!< TIM capture/compare mode register 2, Address offset: 0x1C */ + __IO uint32_t CCER; /*!< TIM capture/compare enable register, Address offset: 0x20 */ + __IO uint32_t CNT; /*!< TIM counter register, Address offset: 0x24 */ + __IO uint32_t PSC; /*!< TIM prescaler, Address offset: 0x28 */ + __IO uint32_t ARR; /*!< TIM auto-reload register, Address offset: 0x2C */ + __IO uint32_t RCR; /*!< TIM repetition counter register, Address offset: 0x30 */ + __IO uint32_t CCR1; /*!< TIM capture/compare register 1, Address offset: 0x34 */ + __IO uint32_t CCR2; /*!< TIM capture/compare register 2, Address offset: 0x38 */ + __IO uint32_t CCR3; /*!< TIM capture/compare register 3, Address offset: 0x3C */ + __IO uint32_t CCR4; /*!< TIM capture/compare register 4, Address offset: 0x40 */ + __IO uint32_t BDTR; /*!< TIM break and dead-time register, Address offset: 0x44 */ + __IO uint32_t DCR; /*!< TIM DMA control register, Address offset: 0x48 */ + __IO uint32_t DMAR; /*!< TIM DMA address for full transfer, Address offset: 0x4C */ + __IO uint32_t OR; /*!< TIM option register, Address offset: 0x50 */ + __IO uint32_t CCMR3; /*!< TIM capture/compare mode register 3, Address offset: 0x54 */ + __IO uint32_t CCR5; /*!< TIM capture/compare mode register5, Address offset: 0x58 */ + __IO uint32_t CCR6; /*!< TIM capture/compare mode register6, Address offset: 0x5C */ + __IO uint32_t AF1; /*!< TIM Alternate function option register 1, Address offset: 0x60 */ + __IO uint32_t AF2; /*!< TIM Alternate function option register 2, Address offset: 0x64 */ + +} TIM_TypeDef; + +/** + * @brief LPTIMIMER + */ +typedef struct +{ + __IO uint32_t ISR; /*!< LPTIM Interrupt and Status register, Address offset: 0x00 */ + __IO uint32_t ICR; /*!< LPTIM Interrupt Clear register, Address offset: 0x04 */ + __IO uint32_t IER; /*!< LPTIM Interrupt Enable register, Address offset: 0x08 */ + __IO uint32_t CFGR; /*!< LPTIM Configuration register, Address offset: 0x0C */ + __IO uint32_t CR; /*!< LPTIM Control register, Address offset: 0x10 */ + __IO uint32_t CMP; /*!< LPTIM Compare register, Address offset: 0x14 */ + __IO uint32_t ARR; /*!< LPTIM Autoreload register, Address offset: 0x18 */ + __IO uint32_t CNT; /*!< LPTIM Counter register, Address offset: 0x1C */ +} LPTIM_TypeDef; + + +/** + * @brief Universal Synchronous Asynchronous Receiver Transmitter + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< USART Control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< USART Control register 2, Address offset: 0x04 */ + __IO uint32_t CR3; /*!< USART Control register 3, Address offset: 0x08 */ + __IO uint32_t BRR; /*!< USART Baud rate register, Address offset: 0x0C */ + __IO uint32_t GTPR; /*!< USART Guard time and prescaler register, Address offset: 0x10 */ + __IO uint32_t RTOR; /*!< USART Receiver Time Out register, Address offset: 0x14 */ + __IO uint32_t RQR; /*!< USART Request register, Address offset: 0x18 */ + __IO uint32_t ISR; /*!< USART Interrupt and status register, Address offset: 0x1C */ + __IO uint32_t ICR; /*!< USART Interrupt flag Clear register, Address offset: 0x20 */ + __IO uint32_t RDR; /*!< USART Receive Data register, Address offset: 0x24 */ + __IO uint32_t TDR; /*!< USART Transmit Data register, Address offset: 0x28 */ +} USART_TypeDef; + + +/** + * @brief Window WATCHDOG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< WWDG Control register, Address offset: 0x00 */ + __IO uint32_t CFR; /*!< WWDG Configuration register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< WWDG Status register, Address offset: 0x08 */ +} WWDG_TypeDef; + + +/** + * @brief RNG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RNG control register, Address offset: 0x00 */ + __IO uint32_t SR; /*!< RNG status register, Address offset: 0x04 */ + __IO uint32_t DR; /*!< RNG data register, Address offset: 0x08 */ +} RNG_TypeDef; + +/** + * @} + */ + +/** + * @brief USB_OTG_Core_Registers + */ +typedef struct +{ + __IO uint32_t GOTGCTL; /*!< USB_OTG Control and Status Register 000h */ + __IO uint32_t GOTGINT; /*!< USB_OTG Interrupt Register 004h */ + __IO uint32_t GAHBCFG; /*!< Core AHB Configuration Register 008h */ + __IO uint32_t GUSBCFG; /*!< Core USB Configuration Register 00Ch */ + __IO uint32_t GRSTCTL; /*!< Core Reset Register 010h */ + __IO uint32_t GINTSTS; /*!< Core Interrupt Register 014h */ + __IO uint32_t GINTMSK; /*!< Core Interrupt Mask Register 018h */ + __IO uint32_t GRXSTSR; /*!< Receive Sts Q Read Register 01Ch */ + __IO uint32_t GRXSTSP; /*!< Receive Sts Q Read & POP Register 020h */ + __IO uint32_t GRXFSIZ; /*!< Receive FIFO Size Register 024h */ + __IO uint32_t DIEPTXF0_HNPTXFSIZ; /*!< EP0 / Non Periodic Tx FIFO Size Register 028h */ + __IO uint32_t HNPTXSTS; /*!< Non Periodic Tx FIFO/Queue Sts reg 02Ch */ + uint32_t Reserved30[2]; /*!< Reserved 030h */ + __IO uint32_t GCCFG; /*!< General Purpose IO Register 038h */ + __IO uint32_t CID; /*!< User ID Register 03Ch */ + uint32_t Reserved5[3]; /*!< Reserved 040h-048h */ + __IO uint32_t GHWCFG3; /*!< User HW config3 04Ch */ + uint32_t Reserved6; /*!< Reserved 050h */ + __IO uint32_t GLPMCFG; /*!< LPM Register 054h */ + uint32_t Reserved7; /*!< Reserved 058h */ + __IO uint32_t GDFIFOCFG; /*!< DFIFO Software Config Register 05Ch */ + uint32_t Reserved43[40]; /*!< Reserved 60h-0FFh */ + __IO uint32_t HPTXFSIZ; /*!< Host Periodic Tx FIFO Size Reg 100h */ + __IO uint32_t DIEPTXF[0x0F]; /*!< dev Periodic Transmit FIFO 104h-13Ch */ +} USB_OTG_GlobalTypeDef; + + +/** + * @brief USB_OTG_device_Registers + */ +typedef struct +{ + __IO uint32_t DCFG; /*!< dev Configuration Register 800h */ + __IO uint32_t DCTL; /*!< dev Control Register 804h */ + __IO uint32_t DSTS; /*!< dev Status Register (RO) 808h */ + uint32_t Reserved0C; /*!< Reserved 80Ch */ + __IO uint32_t DIEPMSK; /*!< dev IN Endpoint Mask 810h */ + __IO uint32_t DOEPMSK; /*!< dev OUT Endpoint Mask 814h */ + __IO uint32_t DAINT; /*!< dev All Endpoints Itr Reg 818h */ + __IO uint32_t DAINTMSK; /*!< dev All Endpoints Itr Mask 81Ch */ + uint32_t Reserved20; /*!< Reserved 820h */ + uint32_t Reserved9; /*!< Reserved 824h */ + __IO uint32_t DVBUSDIS; /*!< dev VBUS discharge Register 828h */ + __IO uint32_t DVBUSPULSE; /*!< dev VBUS Pulse Register 82Ch */ + __IO uint32_t DTHRCTL; /*!< dev threshold 830h */ + __IO uint32_t DIEPEMPMSK; /*!< dev empty msk 834h */ + __IO uint32_t DEACHINT; /*!< dedicated EP interrupt 838h */ + __IO uint32_t DEACHMSK; /*!< dedicated EP msk 83Ch */ + uint32_t Reserved40; /*!< dedicated EP mask 840h */ + __IO uint32_t DINEP1MSK; /*!< dedicated EP mask 844h */ + uint32_t Reserved44[15]; /*!< Reserved 844-87Ch */ + __IO uint32_t DOUTEP1MSK; /*!< dedicated EP msk 884h */ +} USB_OTG_DeviceTypeDef; + + +/** + * @brief USB_OTG_IN_Endpoint-Specific_Register + */ +typedef struct +{ + __IO uint32_t DIEPCTL; /*!< dev IN Endpoint Control Reg 900h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved 900h + (ep_num * 20h) + 04h */ + __IO uint32_t DIEPINT; /*!< dev IN Endpoint Itr Reg 900h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved 900h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DIEPTSIZ; /*!< IN Endpoint Txfer Size 900h + (ep_num * 20h) + 10h */ + __IO uint32_t DIEPDMA; /*!< IN Endpoint DMA Address Reg 900h + (ep_num * 20h) + 14h */ + __IO uint32_t DTXFSTS; /*!< IN Endpoint Tx FIFO Status Reg 900h + (ep_num * 20h) + 18h */ + uint32_t Reserved18; /*!< Reserved 900h+(ep_num*20h)+1Ch-900h+ (ep_num * 20h) + 1Ch */ +} USB_OTG_INEndpointTypeDef; + + +/** + * @brief USB_OTG_OUT_Endpoint-Specific_Registers + */ +typedef struct +{ + __IO uint32_t DOEPCTL; /*!< dev OUT Endpoint Control Reg B00h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved B00h + (ep_num * 20h) + 04h */ + __IO uint32_t DOEPINT; /*!< dev OUT Endpoint Itr Reg B00h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved B00h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DOEPTSIZ; /*!< dev OUT Endpoint Txfer Size B00h + (ep_num * 20h) + 10h */ + __IO uint32_t DOEPDMA; /*!< dev OUT Endpoint DMA Address B00h + (ep_num * 20h) + 14h */ + uint32_t Reserved18[2]; /*!< Reserved B00h + (ep_num * 20h) + 18h - B00h + (ep_num * 20h) + 1Ch */ +} USB_OTG_OUTEndpointTypeDef; + + +/** + * @brief USB_OTG_Host_Mode_Register_Structures + */ +typedef struct +{ + __IO uint32_t HCFG; /*!< Host Configuration Register 400h */ + __IO uint32_t HFIR; /*!< Host Frame Interval Register 404h */ + __IO uint32_t HFNUM; /*!< Host Frame Nbr/Frame Remaining 408h */ + uint32_t Reserved40C; /*!< Reserved 40Ch */ + __IO uint32_t HPTXSTS; /*!< Host Periodic Tx FIFO/ Queue Status 410h */ + __IO uint32_t HAINT; /*!< Host All Channels Interrupt Register 414h */ + __IO uint32_t HAINTMSK; /*!< Host All Channels Interrupt Mask 418h */ +} USB_OTG_HostTypeDef; + +/** + * @brief USB_OTG_Host_Channel_Specific_Registers + */ +typedef struct +{ + __IO uint32_t HCCHAR; /*!< Host Channel Characteristics Register 500h */ + __IO uint32_t HCSPLT; /*!< Host Channel Split Control Register 504h */ + __IO uint32_t HCINT; /*!< Host Channel Interrupt Register 508h */ + __IO uint32_t HCINTMSK; /*!< Host Channel Interrupt Mask Register 50Ch */ + __IO uint32_t HCTSIZ; /*!< Host Channel Transfer Size Register 510h */ + __IO uint32_t HCDMA; /*!< Host Channel DMA Address Register 514h */ + uint32_t Reserved[2]; /*!< Reserved */ +} USB_OTG_HostChannelTypeDef; +/** + * @} + */ + +/** + * @brief JPEG Codec + */ +typedef struct +{ + __IO uint32_t CONFR0; /*!< JPEG Codec Control Register (JPEG_CONFR0), Address offset: 00h */ + __IO uint32_t CONFR1; /*!< JPEG Codec Control Register (JPEG_CONFR1), Address offset: 04h */ + __IO uint32_t CONFR2; /*!< JPEG Codec Control Register (JPEG_CONFR2), Address offset: 08h */ + __IO uint32_t CONFR3; /*!< JPEG Codec Control Register (JPEG_CONFR3), Address offset: 0Ch */ + __IO uint32_t CONFR4; /*!< JPEG Codec Control Register (JPEG_CONFR4), Address offset: 10h */ + __IO uint32_t CONFR5; /*!< JPEG Codec Control Register (JPEG_CONFR5), Address offset: 14h */ + __IO uint32_t CONFR6; /*!< JPEG Codec Control Register (JPEG_CONFR6), Address offset: 18h */ + __IO uint32_t CONFR7; /*!< JPEG Codec Control Register (JPEG_CONFR7), Address offset: 1Ch */ + uint32_t Reserved20[4]; /* Reserved Address offset: 20h-2Ch */ + __IO uint32_t CR; /*!< JPEG Control Register (JPEG_CR), Address offset: 30h */ + __IO uint32_t SR; /*!< JPEG Status Register (JPEG_SR), Address offset: 34h */ + __IO uint32_t CFR; /*!< JPEG Clear Flag Register (JPEG_CFR), Address offset: 38h */ + uint32_t Reserved3c; /* Reserved Address offset: 3Ch */ + __IO uint32_t DIR; /*!< JPEG Data Input Register (JPEG_DIR), Address offset: 40h */ + __IO uint32_t DOR; /*!< JPEG Data Output Register (JPEG_DOR), Address offset: 44h */ + uint32_t Reserved48[2]; /* Reserved Address offset: 48h-4Ch */ + __IO uint32_t QMEM0[16]; /*!< JPEG quantization tables 0, Address offset: 50h-8Ch */ + __IO uint32_t QMEM1[16]; /*!< JPEG quantization tables 1, Address offset: 90h-CCh */ + __IO uint32_t QMEM2[16]; /*!< JPEG quantization tables 2, Address offset: D0h-10Ch */ + __IO uint32_t QMEM3[16]; /*!< JPEG quantization tables 3, Address offset: 110h-14Ch */ + __IO uint32_t HUFFMIN[16]; /*!< JPEG HuffMin tables, Address offset: 150h-18Ch */ + __IO uint32_t HUFFBASE[32]; /*!< JPEG HuffSymb tables, Address offset: 190h-20Ch */ + __IO uint32_t HUFFSYMB[84]; /*!< JPEG HUFFSYMB tables, Address offset: 210h-35Ch */ + __IO uint32_t DHTMEM[103]; /*!< JPEG DHTMem tables, Address offset: 360h-4F8h */ + uint32_t Reserved4FC; /* Reserved Address offset: 4FCh */ + __IO uint32_t HUFFENC_AC0[88]; /*!< JPEG encoder, AC Huffman table 0, Address offset: 500h-65Ch */ + __IO uint32_t HUFFENC_AC1[88]; /*!< JPEG encoder, AC Huffman table 1, Address offset: 660h-7BCh */ + __IO uint32_t HUFFENC_DC0[8]; /*!< JPEG encoder, DC Huffman table 0, Address offset: 7C0h-7DCh */ + __IO uint32_t HUFFENC_DC1[8]; /*!< JPEG encoder, DC Huffman table 1, Address offset: 7E0h-7FCh */ + +} JPEG_TypeDef; + +/** + * @brief MDIOS + */ + +typedef struct +{ + __IO uint32_t CR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 00h */ + __IO uint32_t WRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 04h */ + __IO uint32_t CWRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 08h */ + __IO uint32_t RDFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 0Ch */ + __IO uint32_t CRDFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 10h */ + __IO uint32_t SR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 14h */ + __IO uint32_t CLRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 18h */ + uint32_t RESERVED0[57]; /* Reserved Address offset: 1Ch */ + __IO uint32_t DINR0; /*!< MDIOS Input Data Register (MDIOS_DINR0), Address offset: 100h */ + __IO uint32_t DINR1; /*!< MDIOS Input Data Register (MDIOS_DINR1), Address offset: 104h */ + __IO uint32_t DINR2; /*!< MDIOS Input Data Register (MDIOS_DINR2), Address offset: 108h */ + __IO uint32_t DINR3; /*!< MDIOS Input Data Register (MDIOS_DINR3), Address offset: 10Ch */ + __IO uint32_t DINR4; /*!< MDIOS Input Data Register (MDIOS_DINR4), Address offset: 110h */ + __IO uint32_t DINR5; /*!< MDIOS Input Data Register (MDIOS_DINR5), Address offset: 114h */ + __IO uint32_t DINR6; /*!< MDIOS Input Data Register (MDIOS_DINR6), Address offset: 118h */ + __IO uint32_t DINR7; /*!< MDIOS Input Data Register (MDIOS_DINR7), Address offset: 11Ch */ + __IO uint32_t DINR8; /*!< MDIOS Input Data Register (MDIOS_DINR8), Address offset: 120h */ + __IO uint32_t DINR9; /*!< MDIOS Input Data Register (MDIOS_DINR9), Address offset: 124h */ + __IO uint32_t DINR10; /*!< MDIOS Input Data Register (MDIOS_DINR10), Address offset: 128h */ + __IO uint32_t DINR11; /*!< MDIOS Input Data Register (MDIOS_DINR11), Address offset: 12Ch */ + __IO uint32_t DINR12; /*!< MDIOS Input Data Register (MDIOS_DINR12), Address offset: 130h */ + __IO uint32_t DINR13; /*!< MDIOS Input Data Register (MDIOS_DINR13), Address offset: 134h */ + __IO uint32_t DINR14; /*!< MDIOS Input Data Register (MDIOS_DINR14), Address offset: 138h */ + __IO uint32_t DINR15; /*!< MDIOS Input Data Register (MDIOS_DINR15), Address offset: 13Ch */ + __IO uint32_t DINR16; /*!< MDIOS Input Data Register (MDIOS_DINR16), Address offset: 140h */ + __IO uint32_t DINR17; /*!< MDIOS Input Data Register (MDIOS_DINR17), Address offset: 144h */ + __IO uint32_t DINR18; /*!< MDIOS Input Data Register (MDIOS_DINR18), Address offset: 148h */ + __IO uint32_t DINR19; /*!< MDIOS Input Data Register (MDIOS_DINR19), Address offset: 14Ch */ + __IO uint32_t DINR20; /*!< MDIOS Input Data Register (MDIOS_DINR20), Address offset: 150h */ + __IO uint32_t DINR21; /*!< MDIOS Input Data Register (MDIOS_DINR21), Address offset: 154h */ + __IO uint32_t DINR22; /*!< MDIOS Input Data Register (MDIOS_DINR22), Address offset: 158h */ + __IO uint32_t DINR23; /*!< MDIOS Input Data Register (MDIOS_DINR23), Address offset: 15Ch */ + __IO uint32_t DINR24; /*!< MDIOS Input Data Register (MDIOS_DINR24), Address offset: 160h */ + __IO uint32_t DINR25; /*!< MDIOS Input Data Register (MDIOS_DINR25), Address offset: 164h */ + __IO uint32_t DINR26; /*!< MDIOS Input Data Register (MDIOS_DINR26), Address offset: 168h */ + __IO uint32_t DINR27; /*!< MDIOS Input Data Register (MDIOS_DINR27), Address offset: 16Ch */ + __IO uint32_t DINR28; /*!< MDIOS Input Data Register (MDIOS_DINR28), Address offset: 170h */ + __IO uint32_t DINR29; /*!< MDIOS Input Data Register (MDIOS_DINR29), Address offset: 174h */ + __IO uint32_t DINR30; /*!< MDIOS Input Data Register (MDIOS_DINR30), Address offset: 178h */ + __IO uint32_t DINR31; /*!< MDIOS Input Data Register (MDIOS_DINR31), Address offset: 17Ch */ + __IO uint32_t DOUTR0; /*!< MDIOS Output Data Register (MDIOS_DOUTR0), Address offset: 180h */ + __IO uint32_t DOUTR1; /*!< MDIOS Output Data Register (MDIOS_DOUTR1), Address offset: 184h */ + __IO uint32_t DOUTR2; /*!< MDIOS Output Data Register (MDIOS_DOUTR2), Address offset: 188h */ + __IO uint32_t DOUTR3; /*!< MDIOS Output Data Register (MDIOS_DOUTR3), Address offset: 18Ch */ + __IO uint32_t DOUTR4; /*!< MDIOS Output Data Register (MDIOS_DOUTR4), Address offset: 190h */ + __IO uint32_t DOUTR5; /*!< MDIOS Output Data Register (MDIOS_DOUTR5), Address offset: 194h */ + __IO uint32_t DOUTR6; /*!< MDIOS Output Data Register (MDIOS_DOUTR6), Address offset: 198h */ + __IO uint32_t DOUTR7; /*!< MDIOS Output Data Register (MDIOS_DOUTR7), Address offset: 19Ch */ + __IO uint32_t DOUTR8; /*!< MDIOS Output Data Register (MDIOS_DOUTR8), Address offset: 1A0h */ + __IO uint32_t DOUTR9; /*!< MDIOS Output Data Register (MDIOS_DOUTR9), Address offset: 1A4h */ + __IO uint32_t DOUTR10; /*!< MDIOS Output Data Register (MDIOS_DOUTR10), Address offset: 1A8h */ + __IO uint32_t DOUTR11; /*!< MDIOS Output Data Register (MDIOS_DOUTR11), Address offset: 1ACh */ + __IO uint32_t DOUTR12; /*!< MDIOS Output Data Register (MDIOS_DOUTR12), Address offset: 1B0h */ + __IO uint32_t DOUTR13; /*!< MDIOS Output Data Register (MDIOS_DOUTR13), Address offset: 1B4h */ + __IO uint32_t DOUTR14; /*!< MDIOS Output Data Register (MDIOS_DOUTR14), Address offset: 1B8h */ + __IO uint32_t DOUTR15; /*!< MDIOS Output Data Register (MDIOS_DOUTR15), Address offset: 1BCh */ + __IO uint32_t DOUTR16; /*!< MDIOS Output Data Register (MDIOS_DOUTR16), Address offset: 1C0h */ + __IO uint32_t DOUTR17; /*!< MDIOS Output Data Register (MDIOS_DOUTR17), Address offset: 1C4h */ + __IO uint32_t DOUTR18; /*!< MDIOS Output Data Register (MDIOS_DOUTR18), Address offset: 1C8h */ + __IO uint32_t DOUTR19; /*!< MDIOS Output Data Register (MDIOS_DOUTR19), Address offset: 1CCh */ + __IO uint32_t DOUTR20; /*!< MDIOS Output Data Register (MDIOS_DOUTR20), Address offset: 1D0h */ + __IO uint32_t DOUTR21; /*!< MDIOS Output Data Register (MDIOS_DOUTR21), Address offset: 1D4h */ + __IO uint32_t DOUTR22; /*!< MDIOS Output Data Register (MDIOS_DOUTR22), Address offset: 1D8h */ + __IO uint32_t DOUTR23; /*!< MDIOS Output Data Register (MDIOS_DOUTR23), Address offset: 1DCh */ + __IO uint32_t DOUTR24; /*!< MDIOS Output Data Register (MDIOS_DOUTR24), Address offset: 1E0h */ + __IO uint32_t DOUTR25; /*!< MDIOS Output Data Register (MDIOS_DOUTR25), Address offset: 1E4h */ + __IO uint32_t DOUTR26; /*!< MDIOS Output Data Register (MDIOS_DOUTR26), Address offset: 1E8h */ + __IO uint32_t DOUTR27; /*!< MDIOS Output Data Register (MDIOS_DOUTR27), Address offset: 1ECh */ + __IO uint32_t DOUTR28; /*!< MDIOS Output Data Register (MDIOS_DOUTR28), Address offset: 1F0h */ + __IO uint32_t DOUTR29; /*!< MDIOS Output Data Register (MDIOS_DOUTR29), Address offset: 1F4h */ + __IO uint32_t DOUTR30; /*!< MDIOS Output Data Register (MDIOS_DOUTR30), Address offset: 1F8h */ + __IO uint32_t DOUTR31; /*!< MDIOS Output Data Register (MDIOS_DOUTR31), Address offset: 1FCh */ +} MDIOS_TypeDef; + +/** + * @brief DSI Controller + */ + +typedef struct +{ + __IO uint32_t VR; /*!< DSI Host Version Register, Address offset: 0x00 */ + __IO uint32_t CR; /*!< DSI Host Control Register, Address offset: 0x04 */ + __IO uint32_t CCR; /*!< DSI HOST Clock Control Register, Address offset: 0x08 */ + __IO uint32_t LVCIDR; /*!< DSI Host LTDC VCID Register, Address offset: 0x0C */ + __IO uint32_t LCOLCR; /*!< DSI Host LTDC Color Coding Register, Address offset: 0x10 */ + __IO uint32_t LPCR; /*!< DSI Host LTDC Polarity Configuration Register, Address offset: 0x14 */ + __IO uint32_t LPMCR; /*!< DSI Host Low-Power Mode Configuration Register, Address offset: 0x18 */ + uint32_t RESERVED0[4]; /*!< Reserved, 0x1C - 0x2B */ + __IO uint32_t PCR; /*!< DSI Host Protocol Configuration Register, Address offset: 0x2C */ + __IO uint32_t GVCIDR; /*!< DSI Host Generic VCID Register, Address offset: 0x30 */ + __IO uint32_t MCR; /*!< DSI Host Mode Configuration Register, Address offset: 0x34 */ + __IO uint32_t VMCR; /*!< DSI Host Video Mode Configuration Register, Address offset: 0x38 */ + __IO uint32_t VPCR; /*!< DSI Host Video Packet Configuration Register, Address offset: 0x3C */ + __IO uint32_t VCCR; /*!< DSI Host Video Chunks Configuration Register, Address offset: 0x40 */ + __IO uint32_t VNPCR; /*!< DSI Host Video Null Packet Configuration Register, Address offset: 0x44 */ + __IO uint32_t VHSACR; /*!< DSI Host Video HSA Configuration Register, Address offset: 0x48 */ + __IO uint32_t VHBPCR; /*!< DSI Host Video HBP Configuration Register, Address offset: 0x4C */ + __IO uint32_t VLCR; /*!< DSI Host Video Line Configuration Register, Address offset: 0x50 */ + __IO uint32_t VVSACR; /*!< DSI Host Video VSA Configuration Register, Address offset: 0x54 */ + __IO uint32_t VVBPCR; /*!< DSI Host Video VBP Configuration Register, Address offset: 0x58 */ + __IO uint32_t VVFPCR; /*!< DSI Host Video VFP Configuration Register, Address offset: 0x5C */ + __IO uint32_t VVACR; /*!< DSI Host Video VA Configuration Register, Address offset: 0x60 */ + __IO uint32_t LCCR; /*!< DSI Host LTDC Command Configuration Register, Address offset: 0x64 */ + __IO uint32_t CMCR; /*!< DSI Host Command Mode Configuration Register, Address offset: 0x68 */ + __IO uint32_t GHCR; /*!< DSI Host Generic Header Configuration Register, Address offset: 0x6C */ + __IO uint32_t GPDR; /*!< DSI Host Generic Payload Data Register, Address offset: 0x70 */ + __IO uint32_t GPSR; /*!< DSI Host Generic Packet Status Register, Address offset: 0x74 */ + __IO uint32_t TCCR[6]; /*!< DSI Host Timeout Counter Configuration Register, Address offset: 0x78-0x8F */ + __IO uint32_t TDCR; /*!< DSI Host 3D Configuration Register, Address offset: 0x90 */ + __IO uint32_t CLCR; /*!< DSI Host Clock Lane Configuration Register, Address offset: 0x94 */ + __IO uint32_t CLTCR; /*!< DSI Host Clock Lane Timer Configuration Register, Address offset: 0x98 */ + __IO uint32_t DLTCR; /*!< DSI Host Data Lane Timer Configuration Register, Address offset: 0x9C */ + __IO uint32_t PCTLR; /*!< DSI Host PHY Control Register, Address offset: 0xA0 */ + __IO uint32_t PCONFR; /*!< DSI Host PHY Configuration Register, Address offset: 0xA4 */ + __IO uint32_t PUCR; /*!< DSI Host PHY ULPS Control Register, Address offset: 0xA8 */ + __IO uint32_t PTTCR; /*!< DSI Host PHY TX Triggers Configuration Register, Address offset: 0xAC */ + __IO uint32_t PSR; /*!< DSI Host PHY Status Register, Address offset: 0xB0 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0xB4 - 0xBB */ + __IO uint32_t ISR[2]; /*!< DSI Host Interrupt & Status Register, Address offset: 0xBC-0xC3 */ + __IO uint32_t IER[2]; /*!< DSI Host Interrupt Enable Register, Address offset: 0xC4-0xCB */ + uint32_t RESERVED2[3]; /*!< Reserved, 0xD0 - 0xD7 */ + __IO uint32_t FIR[2]; /*!< DSI Host Force Interrupt Register, Address offset: 0xD8-0xDF */ + uint32_t RESERVED3[8]; /*!< Reserved, 0xE0 - 0xFF */ + __IO uint32_t VSCR; /*!< DSI Host Video Shadow Control Register, Address offset: 0x100 */ + uint32_t RESERVED4[2]; /*!< Reserved, 0x104 - 0x10B */ + __IO uint32_t LCVCIDR; /*!< DSI Host LTDC Current VCID Register, Address offset: 0x10C */ + __IO uint32_t LCCCR; /*!< DSI Host LTDC Current Color Coding Register, Address offset: 0x110 */ + uint32_t RESERVED5; /*!< Reserved, 0x114 */ + __IO uint32_t LPMCCR; /*!< DSI Host Low-power Mode Current Configuration Register, Address offset: 0x118 */ + uint32_t RESERVED6[7]; /*!< Reserved, 0x11C - 0x137 */ + __IO uint32_t VMCCR; /*!< DSI Host Video Mode Current Configuration Register, Address offset: 0x138 */ + __IO uint32_t VPCCR; /*!< DSI Host Video Packet Current Configuration Register, Address offset: 0x13C */ + __IO uint32_t VCCCR; /*!< DSI Host Video Chuncks Current Configuration Register, Address offset: 0x140 */ + __IO uint32_t VNPCCR; /*!< DSI Host Video Null Packet Current Configuration Register, Address offset: 0x144 */ + __IO uint32_t VHSACCR; /*!< DSI Host Video HSA Current Configuration Register, Address offset: 0x148 */ + __IO uint32_t VHBPCCR; /*!< DSI Host Video HBP Current Configuration Register, Address offset: 0x14C */ + __IO uint32_t VLCCR; /*!< DSI Host Video Line Current Configuration Register, Address offset: 0x150 */ + __IO uint32_t VVSACCR; /*!< DSI Host Video VSA Current Configuration Register, Address offset: 0x154 */ + __IO uint32_t VVBPCCR; /*!< DSI Host Video VBP Current Configuration Register, Address offset: 0x158 */ + __IO uint32_t VVFPCCR; /*!< DSI Host Video VFP Current Configuration Register, Address offset: 0x15C */ + __IO uint32_t VVACCR; /*!< DSI Host Video VA Current Configuration Register, Address offset: 0x160 */ + uint32_t RESERVED7[11]; /*!< Reserved, 0x164 - 0x18F */ + __IO uint32_t TDCCR; /*!< DSI Host 3D Current Configuration Register, Address offset: 0x190 */ + uint32_t RESERVED8[155]; /*!< Reserved, 0x194 - 0x3FF */ + __IO uint32_t WCFGR; /*!< DSI Wrapper Configuration Register, Address offset: 0x400 */ + __IO uint32_t WCR; /*!< DSI Wrapper Control Register, Address offset: 0x404 */ + __IO uint32_t WIER; /*!< DSI Wrapper Interrupt Enable Register, Address offset: 0x408 */ + __IO uint32_t WISR; /*!< DSI Wrapper Interrupt and Status Register, Address offset: 0x40C */ + __IO uint32_t WIFCR; /*!< DSI Wrapper Interrupt Flag Clear Register, Address offset: 0x410 */ + uint32_t RESERVED9; /*!< Reserved, 0x414 */ + __IO uint32_t WPCR[5]; /*!< DSI Wrapper PHY Configuration Register, Address offset: 0x418-0x42B */ + uint32_t RESERVED10; /*!< Reserved, 0x42C */ + __IO uint32_t WRPCR; /*!< DSI Wrapper Regulator and PLL Control Register, Address offset: 0x430 */ +} DSI_TypeDef; + +/** @addtogroup Peripheral_memory_map + * @{ + */ +#define RAMITCM_BASE 0x00000000UL /*!< Base address of : 16KB RAM reserved for CPU execution/instruction accessible over ITCM */ +#define FLASHITCM_BASE 0x00200000UL /*!< Base address of : (up to 2 MB) embedded FLASH memory accessible over ITCM */ +#define FLASHAXI_BASE 0x08000000UL /*!< Base address of : (up to 2 MB) embedded FLASH memory accessible over AXI */ +#define RAMDTCM_BASE 0x20000000UL /*!< Base address of : 128KB system data RAM accessible over DTCM */ +#define PERIPH_BASE 0x40000000UL /*!< Base address of : AHB/ABP Peripherals */ +#define BKPSRAM_BASE 0x40024000UL /*!< Base address of : Backup SRAM(4 KB) */ +#define QSPI_BASE 0x90000000UL /*!< Base address of : QSPI memories accessible over AXI */ +#define FMC_R_BASE 0xA0000000UL /*!< Base address of : FMC Control registers */ +#define QSPI_R_BASE 0xA0001000UL /*!< Base address of : QSPI Control registers */ +#define SRAM1_BASE 0x20020000UL /*!< Base address of : 368KB RAM1 accessible over AXI/AHB */ +#define SRAM2_BASE 0x2007C000UL /*!< Base address of : 16KB RAM2 accessible over AXI/AHB */ +#define FLASH_END 0x081FFFFFUL /*!< FLASH end address */ +#define FLASH_OTP_BASE 0x1FF0F000UL /*!< Base address of : (up to 1024 Bytes) embedded FLASH OTP Area */ +#define FLASH_OTP_END 0x1FF0F41FUL /*!< End address of : (up to 1024 Bytes) embedded FLASH OTP Area */ + +/* Legacy define */ +#define FLASH_BASE FLASHAXI_BASE + +/*!< Peripheral memory map */ +#define APB1PERIPH_BASE PERIPH_BASE +#define APB2PERIPH_BASE (PERIPH_BASE + 0x00010000UL) +#define AHB1PERIPH_BASE (PERIPH_BASE + 0x00020000UL) +#define AHB2PERIPH_BASE (PERIPH_BASE + 0x10000000UL) + +/*!< APB1 peripherals */ +#define TIM2_BASE (APB1PERIPH_BASE + 0x0000UL) +#define TIM3_BASE (APB1PERIPH_BASE + 0x0400UL) +#define TIM4_BASE (APB1PERIPH_BASE + 0x0800UL) +#define TIM5_BASE (APB1PERIPH_BASE + 0x0C00UL) +#define TIM6_BASE (APB1PERIPH_BASE + 0x1000UL) +#define TIM7_BASE (APB1PERIPH_BASE + 0x1400UL) +#define TIM12_BASE (APB1PERIPH_BASE + 0x1800UL) +#define TIM13_BASE (APB1PERIPH_BASE + 0x1C00UL) +#define TIM14_BASE (APB1PERIPH_BASE + 0x2000UL) +#define LPTIM1_BASE (APB1PERIPH_BASE + 0x2400UL) +#define RTC_BASE (APB1PERIPH_BASE + 0x2800UL) +#define WWDG_BASE (APB1PERIPH_BASE + 0x2C00UL) +#define IWDG_BASE (APB1PERIPH_BASE + 0x3000UL) +#define CAN3_BASE (APB1PERIPH_BASE + 0x3400UL) +#define SPI2_BASE (APB1PERIPH_BASE + 0x3800UL) +#define SPI3_BASE (APB1PERIPH_BASE + 0x3C00UL) +#define SPDIFRX_BASE (APB1PERIPH_BASE + 0x4000UL) +#define USART2_BASE (APB1PERIPH_BASE + 0x4400UL) +#define USART3_BASE (APB1PERIPH_BASE + 0x4800UL) +#define UART4_BASE (APB1PERIPH_BASE + 0x4C00UL) +#define UART5_BASE (APB1PERIPH_BASE + 0x5000UL) +#define I2C1_BASE (APB1PERIPH_BASE + 0x5400UL) +#define I2C2_BASE (APB1PERIPH_BASE + 0x5800UL) +#define I2C3_BASE (APB1PERIPH_BASE + 0x5C00UL) +#define I2C4_BASE (APB1PERIPH_BASE + 0x6000UL) +#define CAN1_BASE (APB1PERIPH_BASE + 0x6400UL) +#define CAN2_BASE (APB1PERIPH_BASE + 0x6800UL) +#define CEC_BASE (APB1PERIPH_BASE + 0x6C00UL) +#define PWR_BASE (APB1PERIPH_BASE + 0x7000UL) +#define DAC_BASE (APB1PERIPH_BASE + 0x7400UL) +#define UART7_BASE (APB1PERIPH_BASE + 0x7800UL) +#define UART8_BASE (APB1PERIPH_BASE + 0x7C00UL) + +/*!< APB2 peripherals */ +#define TIM1_BASE (APB2PERIPH_BASE + 0x0000UL) +#define TIM8_BASE (APB2PERIPH_BASE + 0x0400UL) +#define USART1_BASE (APB2PERIPH_BASE + 0x1000UL) +#define USART6_BASE (APB2PERIPH_BASE + 0x1400UL) +#define SDMMC2_BASE (APB2PERIPH_BASE + 0x1C00UL) +#define ADC1_BASE (APB2PERIPH_BASE + 0x2000UL) +#define ADC2_BASE (APB2PERIPH_BASE + 0x2100UL) +#define ADC3_BASE (APB2PERIPH_BASE + 0x2200UL) +#define ADC_BASE (APB2PERIPH_BASE + 0x2300UL) +#define SDMMC1_BASE (APB2PERIPH_BASE + 0x2C00UL) +#define SPI1_BASE (APB2PERIPH_BASE + 0x3000UL) +#define SPI4_BASE (APB2PERIPH_BASE + 0x3400UL) +#define SYSCFG_BASE (APB2PERIPH_BASE + 0x3800UL) +#define EXTI_BASE (APB2PERIPH_BASE + 0x3C00UL) +#define TIM9_BASE (APB2PERIPH_BASE + 0x4000UL) +#define TIM10_BASE (APB2PERIPH_BASE + 0x4400UL) +#define TIM11_BASE (APB2PERIPH_BASE + 0x4800UL) +#define SPI5_BASE (APB2PERIPH_BASE + 0x5000UL) +#define SPI6_BASE (APB2PERIPH_BASE + 0x5400UL) +#define SAI1_BASE (APB2PERIPH_BASE + 0x5800UL) +#define SAI2_BASE (APB2PERIPH_BASE + 0x5C00UL) +#define SAI1_Block_A_BASE (SAI1_BASE + 0x004UL) +#define SAI1_Block_B_BASE (SAI1_BASE + 0x024UL) +#define SAI2_Block_A_BASE (SAI2_BASE + 0x004UL) +#define SAI2_Block_B_BASE (SAI2_BASE + 0x024UL) +#define LTDC_BASE (APB2PERIPH_BASE + 0x6800UL) +#define LTDC_Layer1_BASE (LTDC_BASE + 0x0084UL) +#define LTDC_Layer2_BASE (LTDC_BASE + 0x0104UL) +#define DSI_BASE (APB2PERIPH_BASE + 0x6C00UL) +#define DFSDM1_BASE (APB2PERIPH_BASE + 0x7400UL) +#define DFSDM1_Channel0_BASE (DFSDM1_BASE + 0x00UL) +#define DFSDM1_Channel1_BASE (DFSDM1_BASE + 0x20UL) +#define DFSDM1_Channel2_BASE (DFSDM1_BASE + 0x40UL) +#define DFSDM1_Channel3_BASE (DFSDM1_BASE + 0x60UL) +#define DFSDM1_Channel4_BASE (DFSDM1_BASE + 0x80UL) +#define DFSDM1_Channel5_BASE (DFSDM1_BASE + 0xA0UL) +#define DFSDM1_Channel6_BASE (DFSDM1_BASE + 0xC0UL) +#define DFSDM1_Channel7_BASE (DFSDM1_BASE + 0xE0UL) +#define DFSDM1_Filter0_BASE (DFSDM1_BASE + 0x100UL) +#define DFSDM1_Filter1_BASE (DFSDM1_BASE + 0x180UL) +#define DFSDM1_Filter2_BASE (DFSDM1_BASE + 0x200UL) +#define DFSDM1_Filter3_BASE (DFSDM1_BASE + 0x280UL) +#define MDIOS_BASE (APB2PERIPH_BASE + 0x7800UL) +/*!< AHB1 peripherals */ +#define GPIOA_BASE (AHB1PERIPH_BASE + 0x0000UL) +#define GPIOB_BASE (AHB1PERIPH_BASE + 0x0400UL) +#define GPIOC_BASE (AHB1PERIPH_BASE + 0x0800UL) +#define GPIOD_BASE (AHB1PERIPH_BASE + 0x0C00UL) +#define GPIOE_BASE (AHB1PERIPH_BASE + 0x1000UL) +#define GPIOF_BASE (AHB1PERIPH_BASE + 0x1400UL) +#define GPIOG_BASE (AHB1PERIPH_BASE + 0x1800UL) +#define GPIOH_BASE (AHB1PERIPH_BASE + 0x1C00UL) +#define GPIOI_BASE (AHB1PERIPH_BASE + 0x2000UL) +#define GPIOJ_BASE (AHB1PERIPH_BASE + 0x2400UL) +#define GPIOK_BASE (AHB1PERIPH_BASE + 0x2800UL) +#define CRC_BASE (AHB1PERIPH_BASE + 0x3000UL) +#define RCC_BASE (AHB1PERIPH_BASE + 0x3800UL) +#define FLASH_R_BASE (AHB1PERIPH_BASE + 0x3C00UL) +#define UID_BASE 0x1FF0F420UL /*!< Unique device ID register base address */ +#define FLASHSIZE_BASE 0x1FF0F442UL /*!< FLASH Size register base address */ +#define PACKAGE_BASE 0x1FF0F7E0UL /*!< Package size register base address */ +/* Legacy define */ +#define PACKAGESIZE_BASE PACKAGE_BASE + +#define DMA1_BASE (AHB1PERIPH_BASE + 0x6000UL) +#define DMA1_Stream0_BASE (DMA1_BASE + 0x010UL) +#define DMA1_Stream1_BASE (DMA1_BASE + 0x028UL) +#define DMA1_Stream2_BASE (DMA1_BASE + 0x040UL) +#define DMA1_Stream3_BASE (DMA1_BASE + 0x058UL) +#define DMA1_Stream4_BASE (DMA1_BASE + 0x070UL) +#define DMA1_Stream5_BASE (DMA1_BASE + 0x088UL) +#define DMA1_Stream6_BASE (DMA1_BASE + 0x0A0UL) +#define DMA1_Stream7_BASE (DMA1_BASE + 0x0B8UL) +#define DMA2_BASE (AHB1PERIPH_BASE + 0x6400UL) +#define DMA2_Stream0_BASE (DMA2_BASE + 0x010UL) +#define DMA2_Stream1_BASE (DMA2_BASE + 0x028UL) +#define DMA2_Stream2_BASE (DMA2_BASE + 0x040UL) +#define DMA2_Stream3_BASE (DMA2_BASE + 0x058UL) +#define DMA2_Stream4_BASE (DMA2_BASE + 0x070UL) +#define DMA2_Stream5_BASE (DMA2_BASE + 0x088UL) +#define DMA2_Stream6_BASE (DMA2_BASE + 0x0A0UL) +#define DMA2_Stream7_BASE (DMA2_BASE + 0x0B8UL) +#define ETH_BASE (AHB1PERIPH_BASE + 0x8000UL) +#define ETH_MAC_BASE (ETH_BASE) +#define ETH_MMC_BASE (ETH_BASE + 0x0100UL) +#define ETH_PTP_BASE (ETH_BASE + 0x0700UL) +#define ETH_DMA_BASE (ETH_BASE + 0x1000UL) +#define DMA2D_BASE (AHB1PERIPH_BASE + 0xB000UL) +/*!< AHB2 peripherals */ +#define DCMI_BASE (AHB2PERIPH_BASE + 0x50000UL) +#define JPEG_BASE (AHB2PERIPH_BASE + 0x51000UL) +#define RNG_BASE (AHB2PERIPH_BASE + 0x60800UL) +/*!< FMC Bankx registers base address */ +#define FMC_Bank1_R_BASE (FMC_R_BASE + 0x0000UL) +#define FMC_Bank1E_R_BASE (FMC_R_BASE + 0x0104UL) +#define FMC_Bank3_R_BASE (FMC_R_BASE + 0x0080UL) +#define FMC_Bank5_6_R_BASE (FMC_R_BASE + 0x0140UL) + +/* Debug MCU registers base address */ +#define DBGMCU_BASE 0xE0042000UL + +/*!< USB registers base address */ +#define USB_OTG_HS_PERIPH_BASE 0x40040000UL +#define USB_OTG_FS_PERIPH_BASE 0x50000000UL + +#define USB_OTG_GLOBAL_BASE 0x0000UL +#define USB_OTG_DEVICE_BASE 0x0800UL +#define USB_OTG_IN_ENDPOINT_BASE 0x0900UL +#define USB_OTG_OUT_ENDPOINT_BASE 0x0B00UL +#define USB_OTG_EP_REG_SIZE 0x0020UL +#define USB_OTG_HOST_BASE 0x0400UL +#define USB_OTG_HOST_PORT_BASE 0x0440UL +#define USB_OTG_HOST_CHANNEL_BASE 0x0500UL +#define USB_OTG_HOST_CHANNEL_SIZE 0x0020UL +#define USB_OTG_PCGCCTL_BASE 0x0E00UL +#define USB_OTG_FIFO_BASE 0x1000UL +#define USB_OTG_FIFO_SIZE 0x1000UL + +/** + * @} + */ + +/** @addtogroup Peripheral_declaration + * @{ + */ +#define TIM2 ((TIM_TypeDef *) TIM2_BASE) +#define TIM3 ((TIM_TypeDef *) TIM3_BASE) +#define TIM4 ((TIM_TypeDef *) TIM4_BASE) +#define TIM5 ((TIM_TypeDef *) TIM5_BASE) +#define TIM6 ((TIM_TypeDef *) TIM6_BASE) +#define TIM7 ((TIM_TypeDef *) TIM7_BASE) +#define TIM12 ((TIM_TypeDef *) TIM12_BASE) +#define TIM13 ((TIM_TypeDef *) TIM13_BASE) +#define TIM14 ((TIM_TypeDef *) TIM14_BASE) +#define LPTIM1 ((LPTIM_TypeDef *) LPTIM1_BASE) +#define RTC ((RTC_TypeDef *) RTC_BASE) +#define WWDG ((WWDG_TypeDef *) WWDG_BASE) +#define IWDG ((IWDG_TypeDef *) IWDG_BASE) +#define SPI2 ((SPI_TypeDef *) SPI2_BASE) +#define SPI3 ((SPI_TypeDef *) SPI3_BASE) +#define SPDIFRX ((SPDIFRX_TypeDef *) SPDIFRX_BASE) +#define USART2 ((USART_TypeDef *) USART2_BASE) +#define USART3 ((USART_TypeDef *) USART3_BASE) +#define UART4 ((USART_TypeDef *) UART4_BASE) +#define UART5 ((USART_TypeDef *) UART5_BASE) +#define I2C1 ((I2C_TypeDef *) I2C1_BASE) +#define I2C2 ((I2C_TypeDef *) I2C2_BASE) +#define I2C3 ((I2C_TypeDef *) I2C3_BASE) +#define I2C4 ((I2C_TypeDef *) I2C4_BASE) +#define CAN1 ((CAN_TypeDef *) CAN1_BASE) +#define CAN2 ((CAN_TypeDef *) CAN2_BASE) +#define CEC ((CEC_TypeDef *) CEC_BASE) +#define PWR ((PWR_TypeDef *) PWR_BASE) +#define DAC1 ((DAC_TypeDef *) DAC_BASE) +#define DAC ((DAC_TypeDef *) DAC_BASE) /* Kept for legacy purpose */ +#define UART7 ((USART_TypeDef *) UART7_BASE) +#define UART8 ((USART_TypeDef *) UART8_BASE) +#define TIM1 ((TIM_TypeDef *) TIM1_BASE) +#define TIM8 ((TIM_TypeDef *) TIM8_BASE) +#define USART1 ((USART_TypeDef *) USART1_BASE) +#define USART6 ((USART_TypeDef *) USART6_BASE) +#define ADC ((ADC_Common_TypeDef *) ADC_BASE) +#define ADC1 ((ADC_TypeDef *) ADC1_BASE) +#define ADC2 ((ADC_TypeDef *) ADC2_BASE) +#define ADC3 ((ADC_TypeDef *) ADC3_BASE) +#define ADC123_COMMON ((ADC_Common_TypeDef *) ADC_BASE) +#define SDMMC1 ((SDMMC_TypeDef *) SDMMC1_BASE) +#define SPI1 ((SPI_TypeDef *) SPI1_BASE) +#define SPI4 ((SPI_TypeDef *) SPI4_BASE) +#define SYSCFG ((SYSCFG_TypeDef *) SYSCFG_BASE) +#define EXTI ((EXTI_TypeDef *) EXTI_BASE) +#define TIM9 ((TIM_TypeDef *) TIM9_BASE) +#define TIM10 ((TIM_TypeDef *) TIM10_BASE) +#define TIM11 ((TIM_TypeDef *) TIM11_BASE) +#define SPI5 ((SPI_TypeDef *) SPI5_BASE) +#define SPI6 ((SPI_TypeDef *) SPI6_BASE) +#define SAI1 ((SAI_TypeDef *) SAI1_BASE) +#define SAI2 ((SAI_TypeDef *) SAI2_BASE) +#define SAI1_Block_A ((SAI_Block_TypeDef *)SAI1_Block_A_BASE) +#define SAI1_Block_B ((SAI_Block_TypeDef *)SAI1_Block_B_BASE) +#define SAI2_Block_A ((SAI_Block_TypeDef *)SAI2_Block_A_BASE) +#define SAI2_Block_B ((SAI_Block_TypeDef *)SAI2_Block_B_BASE) +#define LTDC ((LTDC_TypeDef *)LTDC_BASE) +#define LTDC_Layer1 ((LTDC_Layer_TypeDef *)LTDC_Layer1_BASE) +#define LTDC_Layer2 ((LTDC_Layer_TypeDef *)LTDC_Layer2_BASE) +#define GPIOA ((GPIO_TypeDef *) GPIOA_BASE) +#define GPIOB ((GPIO_TypeDef *) GPIOB_BASE) +#define GPIOC ((GPIO_TypeDef *) GPIOC_BASE) +#define GPIOD ((GPIO_TypeDef *) GPIOD_BASE) +#define GPIOE ((GPIO_TypeDef *) GPIOE_BASE) +#define GPIOF ((GPIO_TypeDef *) GPIOF_BASE) +#define GPIOG ((GPIO_TypeDef *) GPIOG_BASE) +#define GPIOH ((GPIO_TypeDef *) GPIOH_BASE) +#define GPIOI ((GPIO_TypeDef *) GPIOI_BASE) +#define GPIOJ ((GPIO_TypeDef *) GPIOJ_BASE) +#define GPIOK ((GPIO_TypeDef *) GPIOK_BASE) +#define CRC ((CRC_TypeDef *) CRC_BASE) +#define RCC ((RCC_TypeDef *) RCC_BASE) +#define FLASH ((FLASH_TypeDef *) FLASH_R_BASE) +#define DMA1 ((DMA_TypeDef *) DMA1_BASE) +#define DMA1_Stream0 ((DMA_Stream_TypeDef *) DMA1_Stream0_BASE) +#define DMA1_Stream1 ((DMA_Stream_TypeDef *) DMA1_Stream1_BASE) +#define DMA1_Stream2 ((DMA_Stream_TypeDef *) DMA1_Stream2_BASE) +#define DMA1_Stream3 ((DMA_Stream_TypeDef *) DMA1_Stream3_BASE) +#define DMA1_Stream4 ((DMA_Stream_TypeDef *) DMA1_Stream4_BASE) +#define DMA1_Stream5 ((DMA_Stream_TypeDef *) DMA1_Stream5_BASE) +#define DMA1_Stream6 ((DMA_Stream_TypeDef *) DMA1_Stream6_BASE) +#define DMA1_Stream7 ((DMA_Stream_TypeDef *) DMA1_Stream7_BASE) +#define DMA2 ((DMA_TypeDef *) DMA2_BASE) +#define DMA2_Stream0 ((DMA_Stream_TypeDef *) DMA2_Stream0_BASE) +#define DMA2_Stream1 ((DMA_Stream_TypeDef *) DMA2_Stream1_BASE) +#define DMA2_Stream2 ((DMA_Stream_TypeDef *) DMA2_Stream2_BASE) +#define DMA2_Stream3 ((DMA_Stream_TypeDef *) DMA2_Stream3_BASE) +#define DMA2_Stream4 ((DMA_Stream_TypeDef *) DMA2_Stream4_BASE) +#define DMA2_Stream5 ((DMA_Stream_TypeDef *) DMA2_Stream5_BASE) +#define DMA2_Stream6 ((DMA_Stream_TypeDef *) DMA2_Stream6_BASE) +#define DMA2_Stream7 ((DMA_Stream_TypeDef *) DMA2_Stream7_BASE) +#define ETH ((ETH_TypeDef *) ETH_BASE) +#define DMA2D ((DMA2D_TypeDef *)DMA2D_BASE) +#define DCMI ((DCMI_TypeDef *) DCMI_BASE) +#define RNG ((RNG_TypeDef *) RNG_BASE) +#define FMC_Bank1 ((FMC_Bank1_TypeDef *) FMC_Bank1_R_BASE) +#define FMC_Bank1E ((FMC_Bank1E_TypeDef *) FMC_Bank1E_R_BASE) +#define FMC_Bank3 ((FMC_Bank3_TypeDef *) FMC_Bank3_R_BASE) +#define FMC_Bank5_6 ((FMC_Bank5_6_TypeDef *) FMC_Bank5_6_R_BASE) +#define QUADSPI ((QUADSPI_TypeDef *) QSPI_R_BASE) +#define DBGMCU ((DBGMCU_TypeDef *) DBGMCU_BASE) +#define USB_OTG_FS ((USB_OTG_GlobalTypeDef *) USB_OTG_FS_PERIPH_BASE) +#define USB_OTG_HS ((USB_OTG_GlobalTypeDef *) USB_OTG_HS_PERIPH_BASE) +#define CAN3 ((CAN_TypeDef *) CAN3_BASE) +#define SDMMC2 ((SDMMC_TypeDef *) SDMMC2_BASE) +#define MDIOS ((MDIOS_TypeDef *) MDIOS_BASE) +#define DFSDM1_Channel0 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel0_BASE) +#define DFSDM1_Channel1 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel1_BASE) +#define DFSDM1_Channel2 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel2_BASE) +#define DFSDM1_Channel3 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel3_BASE) +#define DFSDM1_Channel4 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel4_BASE) +#define DFSDM1_Channel5 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel5_BASE) +#define DFSDM1_Channel6 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel6_BASE) +#define DFSDM1_Channel7 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel7_BASE) +#define DFSDM1_Filter0 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter0_BASE) +#define DFSDM1_Filter1 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter1_BASE) +#define DFSDM1_Filter2 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter2_BASE) +#define DFSDM1_Filter3 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter3_BASE) +#define JPEG ((JPEG_TypeDef *) JPEG_BASE) +#define DSI ((DSI_TypeDef *)DSI_BASE) + +/** + * @} + */ + +/** @addtogroup Exported_constants + * @{ + */ + + /** @addtogroup Peripheral_Registers_Bits_Definition + * @{ + */ + +/******************************************************************************/ +/* Peripheral Registers_Bits_Definition */ +/******************************************************************************/ + +/******************************************************************************/ +/* */ +/* Analog to Digital Converter */ +/* */ +/******************************************************************************/ +#define VREFINT_CAL_ADDR_CMSIS ((uint16_t*) (0x1FF0F44A)) /*!
© Copyright (c) 2016 STMicroelectronics. + * All rights reserved.
+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS + * @{ + */ + +/** @addtogroup stm32f7xx + * @{ + */ + +#ifndef __STM32F7xx_H +#define __STM32F7xx_H + +#ifdef __cplusplus + extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup Library_configuration_section + * @{ + */ + +/** + * @brief STM32 Family + */ +#if !defined (STM32F7) +#define STM32F7 +#endif /* STM32F7 */ + +/* Uncomment the line below according to the target STM32 device used in your + application + */ +#if !defined (STM32F756xx) && !defined (STM32F746xx) && !defined (STM32F745xx) && !defined (STM32F765xx) && \ + !defined (STM32F767xx) && !defined (STM32F769xx) && !defined (STM32F777xx) && !defined (STM32F779xx) && \ + !defined (STM32F722xx) && !defined (STM32F723xx) && !defined (STM32F732xx) && !defined (STM32F733xx) && \ + !defined (STM32F730xx) && !defined (STM32F750xx) + + /* #define STM32F756xx */ /*!< STM32F756VG, STM32F756ZG, STM32F756ZG, STM32F756IG, STM32F756BG, + STM32F756NG Devices */ + /* #define STM32F746xx */ /*!< STM32F746VE, STM32F746VG, STM32F746ZE, STM32F746ZG, STM32F746IE, STM32F746IG, + STM32F746BE, STM32F746BG, STM32F746NE, STM32F746NG Devices */ + /* #define STM32F745xx */ /*!< STM32F745VE, STM32F745VG, STM32F745ZG, STM32F745ZE, STM32F745IE, STM32F745IG Devices */ + /* #define STM32F765xx */ /*!< STM32F765BI, STM32F765BG, STM32F765NI, STM32F765NG, STM32F765II, STM32F765IG, + STM32F765ZI, STM32F765ZG, STM32F765VI, STM32F765VG Devices */ + /* #define STM32F767xx */ /*!< STM32F767BG, STM32F767BI, STM32F767IG, STM32F767II, STM32F767NG, STM32F767NI, + STM32F767VG, STM32F767VI, STM32F767ZG, STM32F767ZI Devices */ + /* #define STM32F769xx */ /*!< STM32F769AG, STM32F769AI, STM32F769BG, STM32F769BI, STM32F769IG, STM32F769II, + STM32F769NG, STM32F769NI, STM32F768AI Devices */ + /* #define STM32F777xx */ /*!< STM32F777VI, STM32F777ZI, STM32F777II, STM32F777BI, STM32F777NI Devices */ + /* #define STM32F779xx */ /*!< STM32F779II, STM32F779BI, STM32F779NI, STM32F779AI, STM32F778AI Devices */ + /* #define STM32F722xx */ /*!< STM32F722IE, STM32F722ZE, STM32F722VE, STM32F722RE, STM32F722IC, STM32F722ZC, + STM32F722VC, STM32F722RC Devices */ + /* #define STM32F723xx */ /*!< STM32F723IE, STM32F723ZE, STM32F723VE, STM32F723IC, STM32F723ZC, STM32F723VC Devices */ + /* #define STM32F732xx */ /*!< STM32F732IE, STM32F732ZE, STM32F732VE, STM32F732RE Devices */ + /* #define STM32F733xx */ /*!< STM32F733IE, STM32F733ZE, STM32F733VE Devices */ + /* #define STM32F730xx */ /*!< STM32F730R, STM32F730V, STM32F730Z, STM32F730I Devices */ + /* #define STM32F750xx */ /*!< STM32F750V, STM32F750Z, STM32F750N Devices */ +#endif + +/* Tip: To avoid modifying this file each time you need to switch between these + devices, you can define the device in your toolchain compiler preprocessor. + */ + +#if !defined (USE_HAL_DRIVER) +/** + * @brief Comment the line below if you will not use the peripherals drivers. + In this case, these drivers will not be included and the application code will + be based on direct access to peripherals registers + */ + /*#define USE_HAL_DRIVER */ +#endif /* USE_HAL_DRIVER */ + +/** + * @brief CMSIS Device version number V1.2.5 + */ +#define __STM32F7_CMSIS_VERSION_MAIN (0x01) /*!< [31:24] main version */ +#define __STM32F7_CMSIS_VERSION_SUB1 (0x02) /*!< [23:16] sub1 version */ +#define __STM32F7_CMSIS_VERSION_SUB2 (0x05) /*!< [15:8] sub2 version */ +#define __STM32F7_CMSIS_VERSION_RC (0x00) /*!< [7:0] release candidate */ +#define __STM32F7_CMSIS_VERSION ((__STM32F7_CMSIS_VERSION_MAIN << 24)\ + |(__STM32F7_CMSIS_VERSION_SUB1 << 16)\ + |(__STM32F7_CMSIS_VERSION_SUB2 << 8 )\ + |(__STM32F7_CMSIS_VERSION_RC)) +/** + * @} + */ + +/** @addtogroup Device_Included + * @{ + */ +#if defined(STM32F722xx) + #include "stm32f722xx.h" +#elif defined(STM32F723xx) + #include "stm32f723xx.h" +#elif defined(STM32F732xx) + #include "stm32f732xx.h" +#elif defined(STM32F733xx) + #include "stm32f733xx.h" +#elif defined(STM32F756xx) + #include "stm32f756xx.h" +#elif defined(STM32F746xx) + #include "stm32f746xx.h" +#elif defined(STM32F745xx) + #include "stm32f745xx.h" +#elif defined(STM32F765xx) + #include "stm32f765xx.h" +#elif defined(STM32F767xx) + #include "stm32f767xx.h" +#elif defined(STM32F769xx) + #include "stm32f769xx.h" +#elif defined(STM32F777xx) + #include "stm32f777xx.h" +#elif defined(STM32F779xx) + #include "stm32f779xx.h" +#elif defined(STM32F730xx) + #include "stm32f730xx.h" +#elif defined(STM32F750xx) + #include "stm32f750xx.h" +#else + #error "Please select first the target STM32F7xx device used in your application (in stm32f7xx.h file)" +#endif + +/** + * @} + */ + +/** @addtogroup Exported_types + * @{ + */ +typedef enum +{ + RESET = 0U, + SET = !RESET +} FlagStatus, ITStatus; + +typedef enum +{ + DISABLE = 0U, + ENABLE = !DISABLE +} FunctionalState; +#define IS_FUNCTIONAL_STATE(STATE) (((STATE) == DISABLE) || ((STATE) == ENABLE)) + +typedef enum +{ + SUCCESS = 0U, + ERROR = !SUCCESS +} ErrorStatus; + +/** + * @} + */ + +/** @addtogroup Exported_macro + * @{ + */ +#define SET_BIT(REG, BIT) ((REG) |= (BIT)) + +#define CLEAR_BIT(REG, BIT) ((REG) &= ~(BIT)) + +#define READ_BIT(REG, BIT) ((REG) & (BIT)) + +#define CLEAR_REG(REG) ((REG) = (0x0)) + +#define WRITE_REG(REG, VAL) ((REG) = (VAL)) + +#define READ_REG(REG) ((REG)) + +#define MODIFY_REG(REG, CLEARMASK, SETMASK) WRITE_REG((REG), (((READ_REG(REG)) & (~(CLEARMASK))) | (SETMASK))) + +#define POSITION_VAL(VAL) (__CLZ(__RBIT(VAL))) + +/** + * @} + */ + +#ifdef USE_HAL_DRIVER + #include "stm32f7xx_hal.h" +#endif /* USE_HAL_DRIVER */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __STM32F7xx_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/system_stm32f7xx.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/system_stm32f7xx.h new file mode 100644 index 000000000..140be1b60 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Device/ST/STM32F7xx/Include/system_stm32f7xx.h @@ -0,0 +1,123 @@ +/** + ****************************************************************************** + * @file system_stm32f7xx.h + * @author MCD Application Team + * @brief CMSIS Cortex-M7 Device System Source File for STM32F7xx devices. + ****************************************************************************** + * @attention + * + *

© COPYRIGHT(c) 2016 STMicroelectronics

+ * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of STMicroelectronics nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS + * @{ + */ + +/** @addtogroup stm32f7xx_system + * @{ + */ + +/** + * @brief Define to prevent recursive inclusion + */ +#ifndef __SYSTEM_STM32F7XX_H +#define __SYSTEM_STM32F7XX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/** @addtogroup STM32F7xx_System_Includes + * @{ + */ + +/** + * @} + */ + + +/** @addtogroup STM32F7xx_System_Exported_Variables + * @{ + */ + /* The SystemCoreClock variable is updated in three ways: + 1) by calling CMSIS function SystemCoreClockUpdate() + 2) by calling HAL API function HAL_RCC_GetSysClockFreq() + 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency + Note: If you use this function to configure the system clock; then there + is no need to call the 2 first functions listed above, since SystemCoreClock + variable is updated automatically. + */ +extern uint32_t SystemCoreClock; /*!< System Clock Frequency (Core Clock) */ + +extern const uint8_t AHBPrescTable[16]; /*!< AHB prescalers table values */ +extern const uint8_t APBPrescTable[8]; /*!< APB prescalers table values */ + + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Exported_Constants + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Exported_Macros + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Exported_Functions + * @{ + */ + +extern void SystemInit(void); +extern void SystemCoreClockUpdate(void); +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /*__SYSTEM_STM32F7XX_H */ + +/** + * @} + */ + +/** + * @} + */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armcc.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armcc.h new file mode 100644 index 000000000..4d9d0645d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armcc.h @@ -0,0 +1,865 @@ +/**************************************************************************//** + * @file cmsis_armcc.h + * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCC_H +#define __CMSIS_ARMCC_H + + +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677) + #error "Please use Arm Compiler Toolchain V4.0.677 or later!" +#endif + +/* CMSIS compiler control architecture macros */ +#if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \ + (defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) ) + #define __ARM_ARCH_6M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1)) + #define __ARM_ARCH_7M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1)) + #define __ARM_ARCH_7EM__ 1 +#endif + + /* __ARM_ARCH_8M_BASE__ not applicable */ + /* __ARM_ARCH_8M_MAIN__ not applicable */ + + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE static __forceinline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __declspec(noreturn) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT __packed struct +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION __packed union +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr))) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr))) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); */ + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; +} + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_INLINE uint32_t __get_PRIMASK(void) +{ + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); +} + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) +{ + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_INLINE uint32_t __get_FAULTMASK(void) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); +} + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_INLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __nop + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() do {\ + __schedule_barrier();\ + __isb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() do {\ + __schedule_barrier();\ + __dsb(0xF);\ + __schedule_barrier();\ + } while (0U) + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() do {\ + __schedule_barrier();\ + __dmb(0xF);\ + __schedule_barrier();\ + } while (0U) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV __rev + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) +{ + rev16 r0, r0 + bx lr +} +#endif + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) +{ + revsh r0, r0 + bx lr +} +#endif + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR __ror + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __breakpoint(value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __RBIT __rbit +#else +__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ + return result; +} +#endif + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ __clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ + ((int64_t)(ARG3) << 32U) ) >> 32U)) + +#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCC_H */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armclang.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armclang.h new file mode 100644 index 000000000..162a400ea --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_armclang.h @@ -0,0 +1,1869 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __ARM_COMPAT_H +#include /* Compatibility header for Arm Compiler 5 intrinsics */ +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); see arm_compat.h */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); see arm_compat.h */ + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF); + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF); + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF); + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#if 0 +#define __PKHBT(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) +#endif + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_compiler.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_compiler.h new file mode 100644 index 000000000..94212eb87 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_compiler.h @@ -0,0 +1,266 @@ +/**************************************************************************//** + * @file cmsis_compiler.h + * @brief CMSIS compiler generic header file + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#include + +/* + * Arm Compiler 4/5 + */ +#if defined ( __CC_ARM ) + #include "cmsis_armcc.h" + + +/* + * Arm Compiler 6 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #include "cmsis_armclang.h" + + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + #include + + +/* + * TI Arm Compiler + */ +#elif defined ( __TI_ARM__ ) + #include + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __attribute__((packed)) + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed)) + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed)) + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +/* + * TASKING Compiler + */ +#elif defined ( __TASKING__ ) + /* + * The CMSIS functions have been implemented as intrinsics in the compiler. + * Please use "carm -?i" to get an up to date list of all intrinsics, + * Including the CMSIS ones. + */ + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __packed__ + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __packed__ + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __packed__ + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __packed__ T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __align(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +/* + * COSMIC Compiler + */ +#elif defined ( __CSMC__ ) + #include + + #ifndef __ASM + #define __ASM _asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + // NO RETURN is automatically detected hence no warning here + #define __NO_RETURN + #endif + #ifndef __USED + #warning No compiler specific solution for __USED. __USED is ignored. + #define __USED + #endif + #ifndef __WEAK + #define __WEAK __weak + #endif + #ifndef __PACKED + #define __PACKED @packed + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT @packed struct + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION @packed union + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + @packed struct T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. + #define __ALIGNED(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +#else + #error Unknown compiler. +#endif + + +#endif /* __CMSIS_COMPILER_H */ + diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_gcc.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_gcc.h new file mode 100644 index 000000000..2d9db15a5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_gcc.h @@ -0,0 +1,2085 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler GCC header file + * @version V5.0.4 + * @date 09. April 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory"); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory"); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; + + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi") + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __ASM volatile ("wfe") + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __ASM volatile ("sev") + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else + uint32_t result; + + __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; +} + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); +} + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(ARG1,ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1,ARG2) \ + __extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#if 0 +#define __PKHBT(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) +#endif + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_H */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_iccarm.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_iccarm.h new file mode 100644 index 000000000..11c4af0eb --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_iccarm.h @@ -0,0 +1,935 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.7 + * @date 19. June 2018 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2018 IAR Systems +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ +/* Macros already defined */ +#else + #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' + #if __ARM_ARCH == 6 + #define __ARM_ARCH_6M__ 1 + #elif __ARM_ARCH == 7 + #if __ARM_FEATURE_DSP + #define __ARM_ARCH_7EM__ 1 + #else + #define __ARM_ARCH_7M__ 1 + #endif + #endif /* __ARM_ARCH */ + #endif /* __ARM_ARCH_PROFILE == 'M' */ +#endif + +/* Alternativ core deduction for older ICCARM's */ +#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ + !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) + #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) + #define __ARM_ARCH_6M__ 1 + #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) + #define __ARM_ARCH_7M__ 1 + #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) + #define __ARM_ARCH_7EM__ 1 + #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #else + #error "Unknown target." + #endif +#endif + + + +#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 + #define __IAR_M0_FAMILY 1 +#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 + #define __IAR_M0_FAMILY 1 +#else + #define __IAR_M0_FAMILY 0 +#endif + + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef __UNALIGNED_UINT16_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ + return *(__packed uint16_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ + *(__packed uint16_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ + return *(__packed uint32_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ + *(__packed uint32_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32 /* deprecated */ +#pragma language=save +#pragma language=extended +__packed struct __iar_u32 { uint32_t v; }; +#pragma language=restore +#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" + + #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_irq __iar_builtin_disable_interrupt + #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_irq __iar_builtin_enable_interrupt + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr + + + #define __get_APSR() (__arm_rsr("APSR")) + #define __get_BASEPRI() (__arm_rsr("BASEPRI")) + #define __get_CONTROL() (__arm_rsr("CONTROL")) + #define __get_FAULTMASK() (__arm_rsr("FAULTMASK")) + + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) + #else + #define __get_FPSCR() ( 0 ) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #define __get_IPSR() (__arm_rsr("IPSR")) + #define __get_MSP() (__arm_rsr("MSP")) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __get_MSPLIM() (0U) + #else + #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #endif + #define __get_PRIMASK() (__arm_rsr("PRIMASK")) + #define __get_PSP() (__arm_rsr("PSP")) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __get_PSPLIM() (0U) + #else + #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + #endif + + #define __get_xPSR() (__arm_rsr("xPSR")) + + #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) + #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) + #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) + #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __set_MSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + #endif + #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE))) + #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE))) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __set_PSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #endif + + #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) + #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) + #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) + #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) + #define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE))) + #define __TZ_get_SP_NS() (__arm_rsr("SP_NS")) + #define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE))) + #define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS")) + #define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE))) + #define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS")) + #define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE))) + #define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS")) + #define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __TZ_get_PSPLIM_NS() (0U) + #define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE)) + #else + #define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS")) + #define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE))) + #endif + + #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) + #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) + + #define __NOP __iar_builtin_no_operation + + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX + + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB + + #define __LDREXB __iar_builtin_LDREXB + #define __LDREXH __iar_builtin_LDREXH + #define __LDREXW __iar_builtin_LDREX + + #define __RBIT __iar_builtin_RBIT + #define __REV __iar_builtin_REV + #define __REV16 __iar_builtin_REV16 + + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + #define __ROR __iar_builtin_ROR + #define __RRX __iar_builtin_RRX + + #define __SEV __iar_builtin_SEV + + #if !__IAR_M0_FAMILY + #define __SSAT __iar_builtin_SSAT + #endif + + #define __STREXB __iar_builtin_STREXB + #define __STREXH __iar_builtin_STREXH + #define __STREXW __iar_builtin_STREX + + #if !__IAR_M0_FAMILY + #define __USAT __iar_builtin_USAT + #endif + + #define __WFE __iar_builtin_WFE + #define __WFI __iar_builtin_WFI + + #if __ARM_MEDIA__ + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + #endif + +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #define __CLZ __cmsis_iar_clz_not_active + #define __SSAT __cmsis_iar_ssat_not_active + #define __USAT __cmsis_iar_usat_not_active + #define __RBIT __cmsis_iar_rbit_not_active + #define __get_APSR __cmsis_iar_get_APSR_not_active + #endif + + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #define __get_FPSCR __cmsis_iar_get_FPSR_not_active + #define __set_FPSCR __cmsis_iar_set_FPSR_not_active + #endif + + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #undef __CLZ + #undef __SSAT + #undef __USAT + #undef __RBIT + #undef __get_APSR + + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } + + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; + uint32_t r = v; + for (v >>= 1U; v; v >>= 1U) + { + r <<= 1U; + r |= v & 1U; + sc--; + } + return (r << sc); + } + + __STATIC_INLINE uint32_t __get_APSR(void) + { + uint32_t res; + __asm("MRS %0,APSR" : "=r" (res)); + return res; + } + + #endif + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #undef __get_FPSCR + #undef __set_FPSCR + #define __get_FPSCR() (0) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #pragma diag_suppress=Pe940 + #pragma diag_suppress=Pe177 + + #define __enable_irq __enable_interrupt + #define __disable_irq __disable_interrupt + #define __NOP __no_operation + + #define __get_xPSR __get_PSR + + #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } + + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } + #endif + + + /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + #if (__CORTEX_M >= 0x03) + + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); + return(result); + } + + __IAR_FT void __set_BASEPRI_MAX(uint32_t value) + { + __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); + } + + + #define __enable_fault_irq __enable_fiq + #define __disable_fault_irq __disable_fiq + + + #endif /* (__CORTEX_M >= 0x03) */ + + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } + + #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + __IAR_FT uint32_t __get_MSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,MSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_MSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR MSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __get_PSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_PSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,CONTROL_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) + { + __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PSP_NS(uint32_t value) + { + __asm volatile("MSR PSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_MSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSP_NS(uint32_t value) + { + __asm volatile("MSR MSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_SP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,SP_NS" : "=r" (res)); + return res; + } + __IAR_FT void __TZ_set_SP_NS(uint32_t value) + { + __asm volatile("MSR SP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) + { + __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) + { + __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) + { + __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) + { + __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); + } + + #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +#if __IAR_M0_FAMILY + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif + +#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { + uint32_t res; + __ASM("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { + uint32_t res; + __ASM("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { + uint32_t res; + __ASM("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; + } + + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { + __ASM("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { + __ASM("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { + __ASM("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + } + +#endif /* (__CORTEX_M >= 0x03) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + +#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#undef __IAR_FT +#undef __IAR_M0_FAMILY +#undef __ICCARM_V8 + +#pragma diag_default=Pe940 +#pragma diag_default=Pe177 + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_version.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_version.h new file mode 100644 index 000000000..660f612aa --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/cmsis_version.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_version.h + * @brief CMSIS Core(M) Version definitions + * @version V5.0.2 + * @date 19. April 2017 + ******************************************************************************/ +/* + * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_VERSION_H +#define __CMSIS_VERSION_H + +/* CMSIS Version definitions */ +#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 1U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ +#endif diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mbl.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mbl.h new file mode 100644 index 000000000..251e4ede3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mbl.h @@ -0,0 +1,1918 @@ +/**************************************************************************//** + * @file core_armv8mbl.h + * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 22. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_ARMV8MBL_H_GENERIC +#define __CORE_ARMV8MBL_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MBL + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __ARMv8MBL_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MBL_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MBL_CMSIS_VERSION ((__ARMv8MBL_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MBL_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M ( 2U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MBL_H_DEPENDANT +#define __CORE_ARMV8MBL_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MBL_REV + #define __ARMv8MBL_REV 0x0000U + #warning "__ARMv8MBL_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MBL */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mml.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mml.h new file mode 100644 index 000000000..3a3148ea3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_armv8mml.h @@ -0,0 +1,2927 @@ +/**************************************************************************//** + * @file core_armv8mml.h + * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 06. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_ARMV8MML_H_GENERIC +#define __CORE_ARMV8MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS Armv8MML definitions */ +#define __ARMv8MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MML_CMSIS_VERSION ((__ARMv8MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (81U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MML_H_DEPENDANT +#define __CORE_ARMV8MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MML_REV + #define __ARMv8MML_REV 0x0000U + #warning "__ARMv8MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0.h new file mode 100644 index 000000000..f929bba07 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0.h @@ -0,0 +1,949 @@ +/**************************************************************************//** + * @file core_cm0.h + * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File + * @version V5.0.5 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M0 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0 definitions */ +#define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \ + __CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000U + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0plus.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0plus.h new file mode 100644 index 000000000..424011ac3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm0plus.h @@ -0,0 +1,1083 @@ +/**************************************************************************//** + * @file core_cm0plus.h + * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File + * @version V5.0.6 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex-M0+ + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0+ definitions */ +#define __CM0PLUS_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0PLUS_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16U) | \ + __CM0PLUS_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0PLUS_H_DEPENDANT +#define __CORE_CM0PLUS_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0PLUS_REV + #define __CM0PLUS_REV 0x0000U + #warning "__CM0PLUS_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex-M0+ */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0+ header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0+ */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; + +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm1.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm1.h new file mode 100644 index 000000000..0ed678e3b --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm1.h @@ -0,0 +1,976 @@ +/**************************************************************************//** + * @file core_cm1.h + * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 23. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM1_H_GENERIC +#define __CORE_CM1_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M1 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM1 definitions */ +#define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \ + __CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (1U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM1_H_DEPENDANT +#define __CORE_CM1_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM1_REV + #define __CM1_REV 0x0100U + #warning "__CM1_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */ + +#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M1 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm23.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm23.h new file mode 100644 index 000000000..acbc5dfea --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm23.h @@ -0,0 +1,1993 @@ +/**************************************************************************//** + * @file core_cm23.h + * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 22. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM23_H_GENERIC +#define __CORE_CM23_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M23 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __CM23_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM23_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM23_CMSIS_VERSION ((__CM23_CMSIS_VERSION_MAIN << 16U) | \ + __CM23_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (23U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM23_H_DEPENDANT +#define __CORE_CM23_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM23_REV + #define __CM23_REV 0x0000U + #warning "__CM23_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M23 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for Cortex-M23 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for Cortex-M23 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm3.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm3.h new file mode 100644 index 000000000..74bff64be --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm3.h @@ -0,0 +1,1941 @@ +/**************************************************************************//** + * @file core_cm3.h + * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM3_H_GENERIC +#define __CORE_CM3_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M3 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM3 definitions */ +#define __CM3_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM3_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM3_CMSIS_VERSION ((__CM3_CMSIS_VERSION_MAIN << 16U) | \ + __CM3_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (3U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM3_H_DEPENDANT +#define __CORE_CM3_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM3_REV + #define __CM3_REV 0x0200U + #warning "__CM3_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M3 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV < 0x0201U) /* core r2p1 */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#else +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +#else + uint32_t RESERVED1[1U]; +#endif +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm33.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm33.h new file mode 100644 index 000000000..6cd2db77f --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm33.h @@ -0,0 +1,3002 @@ +/**************************************************************************//** + * @file core_cm33.h + * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File + * @version V5.0.9 + * @date 06. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM33_H_GENERIC +#define __CORE_CM33_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M33 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM33 definitions */ +#define __CM33_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM33_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM33_CMSIS_VERSION ((__CM33_CMSIS_VERSION_MAIN << 16U) | \ + __CM33_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (33U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_PCS_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM33_H_DEPENDANT +#define __CORE_CM33_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM33_REV + #define __CM33_REV 0x0000U + #warning "__CM33_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M33 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm4.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm4.h new file mode 100644 index 000000000..7d5687353 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm4.h @@ -0,0 +1,2129 @@ +/**************************************************************************//** + * @file core_cm4.h + * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM4_H_GENERIC +#define __CORE_CM4_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M4 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM4 definitions */ +#define __CM4_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM4_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM4_CMSIS_VERSION ((__CM4_CMSIS_VERSION_MAIN << 16U) | \ + __CM4_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (4U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM4_H_DEPENDANT +#define __CORE_CM4_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM4_REV + #define __CM4_REV 0x0000U + #warning "__CM4_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M4 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm7.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm7.h new file mode 100644 index 000000000..a14dc623b --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_cm7.h @@ -0,0 +1,2671 @@ +/**************************************************************************//** + * @file core_cm7.h + * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM7_H_GENERIC +#define __CORE_CM7_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M7 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM7 definitions */ +#define __CM7_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM7_CMSIS_VERSION_SUB ( __CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM7_CMSIS_VERSION ((__CM7_CMSIS_VERSION_MAIN << 16U) | \ + __CM7_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (7U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM7_H_DEPENDANT +#define __CORE_CM7_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM7_REV + #define __CM7_REV 0x0000U + #warning "__CM7_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M7 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: Branch prediction enable bit Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: Branch prediction enable bit Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: Instruction cache enable bit Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: Instruction cache enable bit Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: Cache enable bit Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: Cache enable bit Mask */ + +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define SCnSCB_ACTLR_DISRAMODE_Pos 11U /*!< ACTLR: DISRAMODE Position */ +#define SCnSCB_ACTLR_DISRAMODE_Msk (1UL << SCnSCB_ACTLR_DISRAMODE_Pos) /*!< ACTLR: DISRAMODE Mask */ + +#define SCnSCB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define SCnSCB_ACTLR_FPEXCODIS_Msk (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED3[981U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = SCB->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_INLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_INLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_INLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_INLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_INLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_INLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_INLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_INLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t)addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCIMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t) addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCCMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t) addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCCIMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/*@} end of CMSIS_Core_CacheFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc000.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc000.h new file mode 100644 index 000000000..9b67c92f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc000.h @@ -0,0 +1,1022 @@ +/**************************************************************************//** + * @file core_sc000.h + * @brief CMSIS SC000 Core Peripheral Access Layer Header File + * @version V5.0.5 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC000_H_GENERIC +#define __CORE_SC000_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC000 definitions */ +#define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \ + __SC000_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (000U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC000_H_DEPENDANT +#define __CORE_SC000_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC000_REV + #define __SC000_REV 0x0000U + #warning "__SC000_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC000 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + uint32_t RESERVED1[154U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the SC000 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc300.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc300.h new file mode 100644 index 000000000..3e8a47109 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/core_sc300.h @@ -0,0 +1,1915 @@ +/**************************************************************************//** + * @file core_sc300.h + * @brief CMSIS SC300 Core Peripheral Access Layer Header File + * @version V5.0.6 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC300_H_GENERIC +#define __CORE_SC300_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC3000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC300 definitions */ +#define __SC300_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC300_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC300_CMSIS_VERSION ((__SC300_CMSIS_VERSION_MAIN << 16U) | \ + __SC300_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (300U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC300_H_DEPENDANT +#define __CORE_SC300_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC300_REV + #define __SC300_REV 0x0000U + #warning "__SC300_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC300 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED1[129U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + uint32_t RESERVED1[1U]; +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv7.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv7.h new file mode 100644 index 000000000..01422033d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv7.h @@ -0,0 +1,270 @@ +/****************************************************************************** + * @file mpu_armv7.h + * @brief CMSIS MPU API for Armv7-M MPU + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV7_H +#define ARM_MPU_ARMV7_H + +#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes +#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes +#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes +#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes +#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes +#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte +#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes +#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes +#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes +#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes +#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes +#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes +#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes +#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes +#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes +#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte +#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes +#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes +#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes +#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes +#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes +#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes +#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes +#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes +#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes +#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte +#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes +#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes + +#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access +#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only +#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only +#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access +#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only +#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access + +/** MPU Region Base Address Register Value +* +* \param Region The region to be configured, number 0 to 15. +* \param BaseAddress The base address for the region. +*/ +#define ARM_MPU_RBAR(Region, BaseAddress) \ + (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \ + ((Region) & MPU_RBAR_REGION_Msk) | \ + (MPU_RBAR_VALID_Msk)) + +/** +* MPU Memory Access Attributes +* +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +*/ +#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ + ((((TypeExtField ) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ + (((IsShareable ) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ + (((IsCacheable ) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ + (((IsBufferable ) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ + ((((DisableExec ) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ + (((AccessAttributes) ) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \ + ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size) + +/** +* MPU Memory Access Attribute for strongly ordered memory. +* - TEX: 000b +* - Shareable +* - Non-cacheable +* - Non-bufferable +*/ +#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U) + +/** +* MPU Memory Access Attribute for device memory. +* - TEX: 000b (if non-shareable) or 010b (if shareable) +* - Shareable or non-shareable +* - Non-cacheable +* - Bufferable (if shareable) or non-bufferable (if non-shareable) +* +* \param IsShareable Configures the device memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U)) + +/** +* MPU Memory Access Attribute for normal memory. +* - TEX: 1BBb (reflecting outer cacheability rules) +* - Shareable or non-shareable +* - Cacheable or non-cacheable (reflecting inner cacheability rules) +* - Bufferable or non-bufferable (reflecting inner cacheability rules) +* +* \param OuterCp Configures the outer cache policy. +* \param InnerCp Configures the inner cache policy. +* \param IsShareable Configures the memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) & 2U), ((InnerCp) & 1U)) + +/** +* MPU Memory Access Attribute non-cacheable policy. +*/ +#define ARM_MPU_CACHEP_NOCACHE 0U + +/** +* MPU Memory Access Attribute write-back, write and read allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_WRA 1U + +/** +* MPU Memory Access Attribute write-through, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WT_NWA 2U + +/** +* MPU Memory Access Attribute write-back, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_NWA 3U + + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; //!< The region base address register value (RBAR) + uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + MPU->RNR = rnr; + MPU->RASR = 0U; +} + +/** Configure an MPU region. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) +{ + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) +{ + MPU->RNR = rnr; + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + while (cnt > MPU_TYPE_RALIASES) { + orderedCpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); + table += MPU_TYPE_RALIASES; + cnt -= MPU_TYPE_RALIASES; + } + orderedCpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); +} + +#endif diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv8.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv8.h new file mode 100644 index 000000000..62571da5b --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/mpu_armv8.h @@ -0,0 +1,333 @@ +/****************************************************************************** + * @file mpu_armv8.h + * @brief CMSIS MPU API for Armv8-M MPU + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV8_H +#define ARM_MPU_ARMV8_H + +/** \brief Attribute for device memory (outer only) */ +#define ARM_MPU_ATTR_DEVICE ( 0U ) + +/** \brief Attribute for non-cacheable, normal memory */ +#define ARM_MPU_ATTR_NON_CACHEABLE ( 4U ) + +/** \brief Attribute for normal memory (outer and inner) +* \param NT Non-Transient: Set to 1 for non-transient data. +* \param WB Write-Back: Set to 1 to use write-back update policy. +* \param RA Read Allocation: Set to 1 to use cache allocation on read miss. +* \param WA Write Allocation: Set to 1 to use cache allocation on write miss. +*/ +#define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \ + (((NT & 1U) << 3U) | ((WB & 1U) << 2U) | ((RA & 1U) << 1U) | (WA & 1U)) + +/** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRnE (0U) + +/** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRE (1U) + +/** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGRE (2U) + +/** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_GRE (3U) + +/** \brief Memory Attribute +* \param O Outer memory attributes +* \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes +*/ +#define ARM_MPU_ATTR(O, I) (((O & 0xFU) << 4U) | (((O & 0xFU) != 0U) ? (I & 0xFU) : ((I & 0x3U) << 2U))) + +/** \brief Normal memory non-shareable */ +#define ARM_MPU_SH_NON (0U) + +/** \brief Normal memory outer shareable */ +#define ARM_MPU_SH_OUTER (2U) + +/** \brief Normal memory inner shareable */ +#define ARM_MPU_SH_INNER (3U) + +/** \brief Memory access permissions +* \param RO Read-Only: Set to 1 for read-only memory. +* \param NP Non-Privileged: Set to 1 for non-privileged memory. +*/ +#define ARM_MPU_AP_(RO, NP) (((RO & 1U) << 1U) | (NP & 1U)) + +/** \brief Region Base Address Register value +* \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned. +* \param SH Defines the Shareability domain for this memory region. +* \param RO Read-Only: Set to 1 for a read-only memory region. +* \param NP Non-Privileged: Set to 1 for a non-privileged memory region. +* \oaram XN eXecute Never: Set to 1 for a non-executable memory region. +*/ +#define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ + ((BASE & MPU_RBAR_BASE_Msk) | \ + ((SH << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ + ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \ + ((XN << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) + +/** \brief Region Limit Address Register value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR(LIMIT, IDX) \ + ((LIMIT & MPU_RLAR_LIMIT_Msk) | \ + ((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; /*!< Region Base Address Register value */ + uint32_t RLAR; /*!< Region Limit Address Register value */ +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} + +#ifdef MPU_NS +/** Enable the Non-secure MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the Non-secure MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable_NS(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} +#endif + +/** Set the memory attribute encoding to the given MPU. +* \param mpu Pointer to the MPU to be configured. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr) +{ + const uint8_t reg = idx / 4U; + const uint32_t pos = ((idx % 4U) * 8U); + const uint32_t mask = 0xFFU << pos; + + if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) { + return; // invalid index + } + + mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask)); +} + +/** Set the memory attribute encoding. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU, idx, attr); +} + +#ifdef MPU_NS +/** Set the memory attribute encoding to the Non-secure MPU. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr); +} +#endif + +/** Clear and disable the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr) +{ + mpu->RNR = rnr; + mpu->RLAR = 0U; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU, rnr); +} + +#ifdef MPU_NS +/** Clear and disable the given Non-secure MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU_NS, rnr); +} +#endif + +/** Configure the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + mpu->RNR = rnr; + mpu->RBAR = rbar; + mpu->RLAR = rlar; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar); +} + +#ifdef MPU_NS +/** Configure the given Non-secure MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar); +} +#endif + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table to the given MPU. +* \param mpu Pointer to the MPU registers to be used. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + if (cnt == 1U) { + mpu->RNR = rnr; + orderedCpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); + } else { + uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); + uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; + + mpu->RNR = rnrBase; + while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { + uint32_t c = MPU_TYPE_RALIASES - rnrOffset; + orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); + table += c; + cnt -= c; + rnrOffset = 0U; + rnrBase += MPU_TYPE_RALIASES; + mpu->RNR = rnrBase; + } + + orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); + } +} + +/** Load the given number of MPU regions from a table. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU, rnr, table, cnt); +} + +#ifdef MPU_NS +/** Load the given number of MPU regions from a table to the Non-secure MPU. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt); +} +#endif + +#endif + diff --git a/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/tz_context.h b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/tz_context.h new file mode 100644 index 000000000..0d09749f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/CMSIS/Include/tz_context.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * @file tz_context.h + * @brief Context Management for Armv8-M TrustZone + * @version V1.0.1 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef TZ_CONTEXT_H +#define TZ_CONTEXT_H + +#include + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + +/// \details TZ Memory ID identifies an allocated memory slot. +typedef uint32_t TZ_MemoryId_t; + +/// Initialize secure context memory system +/// \return execution status (1: success, 0: error) +uint32_t TZ_InitContextSystem_S (void); + +/// Allocate context memory for calling secure software modules in TrustZone +/// \param[in] module identifies software modules called from non-secure mode +/// \return value != 0 id TrustZone memory slot identifier +/// \return value 0 no memory available or internal error +TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module); + +/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id); + +/// Load secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_LoadContext_S (TZ_MemoryId_t id); + +/// Store secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_StoreContext_S (TZ_MemoryId_t id); + +#endif // TZ_CONTEXT_H diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h new file mode 100644 index 000000000..90767ed33 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h @@ -0,0 +1,3776 @@ +/** + ****************************************************************************** + * @file stm32_hal_legacy.h + * @author MCD Application Team + * @brief This file contains aliases definition for the STM32Cube HAL constants + * macros and functions maintained for legacy purpose. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2019 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32_HAL_LEGACY +#define STM32_HAL_LEGACY + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Defines HAL CRYP Aliased Defines maintained for legacy purpose + * @{ + */ +#define AES_FLAG_RDERR CRYP_FLAG_RDERR +#define AES_FLAG_WRERR CRYP_FLAG_WRERR +#define AES_CLEARFLAG_CCF CRYP_CLEARFLAG_CCF +#define AES_CLEARFLAG_RDERR CRYP_CLEARFLAG_RDERR +#define AES_CLEARFLAG_WRERR CRYP_CLEARFLAG_WRERR + +/** + * @} + */ + +/** @defgroup HAL_ADC_Aliased_Defines HAL ADC Aliased Defines maintained for legacy purpose + * @{ + */ +#define ADC_RESOLUTION12b ADC_RESOLUTION_12B +#define ADC_RESOLUTION10b ADC_RESOLUTION_10B +#define ADC_RESOLUTION8b ADC_RESOLUTION_8B +#define ADC_RESOLUTION6b ADC_RESOLUTION_6B +#define OVR_DATA_OVERWRITTEN ADC_OVR_DATA_OVERWRITTEN +#define OVR_DATA_PRESERVED ADC_OVR_DATA_PRESERVED +#define EOC_SINGLE_CONV ADC_EOC_SINGLE_CONV +#define EOC_SEQ_CONV ADC_EOC_SEQ_CONV +#define EOC_SINGLE_SEQ_CONV ADC_EOC_SINGLE_SEQ_CONV +#define REGULAR_GROUP ADC_REGULAR_GROUP +#define INJECTED_GROUP ADC_INJECTED_GROUP +#define REGULAR_INJECTED_GROUP ADC_REGULAR_INJECTED_GROUP +#define AWD_EVENT ADC_AWD_EVENT +#define AWD1_EVENT ADC_AWD1_EVENT +#define AWD2_EVENT ADC_AWD2_EVENT +#define AWD3_EVENT ADC_AWD3_EVENT +#define OVR_EVENT ADC_OVR_EVENT +#define JQOVF_EVENT ADC_JQOVF_EVENT +#define ALL_CHANNELS ADC_ALL_CHANNELS +#define REGULAR_CHANNELS ADC_REGULAR_CHANNELS +#define INJECTED_CHANNELS ADC_INJECTED_CHANNELS +#define SYSCFG_FLAG_SENSOR_ADC ADC_FLAG_SENSOR +#define SYSCFG_FLAG_VREF_ADC ADC_FLAG_VREFINT +#define ADC_CLOCKPRESCALER_PCLK_DIV1 ADC_CLOCK_SYNC_PCLK_DIV1 +#define ADC_CLOCKPRESCALER_PCLK_DIV2 ADC_CLOCK_SYNC_PCLK_DIV2 +#define ADC_CLOCKPRESCALER_PCLK_DIV4 ADC_CLOCK_SYNC_PCLK_DIV4 +#define ADC_CLOCKPRESCALER_PCLK_DIV6 ADC_CLOCK_SYNC_PCLK_DIV6 +#define ADC_CLOCKPRESCALER_PCLK_DIV8 ADC_CLOCK_SYNC_PCLK_DIV8 +#define ADC_EXTERNALTRIG0_T6_TRGO ADC_EXTERNALTRIGCONV_T6_TRGO +#define ADC_EXTERNALTRIG1_T21_CC2 ADC_EXTERNALTRIGCONV_T21_CC2 +#define ADC_EXTERNALTRIG2_T2_TRGO ADC_EXTERNALTRIGCONV_T2_TRGO +#define ADC_EXTERNALTRIG3_T2_CC4 ADC_EXTERNALTRIGCONV_T2_CC4 +#define ADC_EXTERNALTRIG4_T22_TRGO ADC_EXTERNALTRIGCONV_T22_TRGO +#define ADC_EXTERNALTRIG7_EXT_IT11 ADC_EXTERNALTRIGCONV_EXT_IT11 +#define ADC_CLOCK_ASYNC ADC_CLOCK_ASYNC_DIV1 +#define ADC_EXTERNALTRIG_EDGE_NONE ADC_EXTERNALTRIGCONVEDGE_NONE +#define ADC_EXTERNALTRIG_EDGE_RISING ADC_EXTERNALTRIGCONVEDGE_RISING +#define ADC_EXTERNALTRIG_EDGE_FALLING ADC_EXTERNALTRIGCONVEDGE_FALLING +#define ADC_EXTERNALTRIG_EDGE_RISINGFALLING ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING +#define ADC_SAMPLETIME_2CYCLE_5 ADC_SAMPLETIME_2CYCLES_5 + +#define HAL_ADC_STATE_BUSY_REG HAL_ADC_STATE_REG_BUSY +#define HAL_ADC_STATE_BUSY_INJ HAL_ADC_STATE_INJ_BUSY +#define HAL_ADC_STATE_EOC_REG HAL_ADC_STATE_REG_EOC +#define HAL_ADC_STATE_EOC_INJ HAL_ADC_STATE_INJ_EOC +#define HAL_ADC_STATE_ERROR HAL_ADC_STATE_ERROR_INTERNAL +#define HAL_ADC_STATE_BUSY HAL_ADC_STATE_BUSY_INTERNAL +#define HAL_ADC_STATE_AWD HAL_ADC_STATE_AWD1 + +#if defined(STM32H7) +#define ADC_CHANNEL_VBAT_DIV4 ADC_CHANNEL_VBAT +#endif /* STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_CEC_Aliased_Defines HAL CEC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define __HAL_CEC_GET_IT __HAL_CEC_GET_FLAG + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Defines HAL COMP Aliased Defines maintained for legacy purpose + * @{ + */ +#define COMP_WINDOWMODE_DISABLED COMP_WINDOWMODE_DISABLE +#define COMP_WINDOWMODE_ENABLED COMP_WINDOWMODE_ENABLE +#define COMP_EXTI_LINE_COMP1_EVENT COMP_EXTI_LINE_COMP1 +#define COMP_EXTI_LINE_COMP2_EVENT COMP_EXTI_LINE_COMP2 +#define COMP_EXTI_LINE_COMP3_EVENT COMP_EXTI_LINE_COMP3 +#define COMP_EXTI_LINE_COMP4_EVENT COMP_EXTI_LINE_COMP4 +#define COMP_EXTI_LINE_COMP5_EVENT COMP_EXTI_LINE_COMP5 +#define COMP_EXTI_LINE_COMP6_EVENT COMP_EXTI_LINE_COMP6 +#define COMP_EXTI_LINE_COMP7_EVENT COMP_EXTI_LINE_COMP7 +#if defined(STM32L0) +#define COMP_LPTIMCONNECTION_ENABLED ((uint32_t)0x00000003U) /*!< COMPX output generic naming: connected to LPTIM input 1 for COMP1, LPTIM input 2 for COMP2 */ +#endif +#define COMP_OUTPUT_COMP6TIM2OCREFCLR COMP_OUTPUT_COMP6_TIM2OCREFCLR +#if defined(STM32F373xC) || defined(STM32F378xx) +#define COMP_OUTPUT_TIM3IC1 COMP_OUTPUT_COMP1_TIM3IC1 +#define COMP_OUTPUT_TIM3OCREFCLR COMP_OUTPUT_COMP1_TIM3OCREFCLR +#endif /* STM32F373xC || STM32F378xx */ + +#if defined(STM32L0) || defined(STM32L4) +#define COMP_WINDOWMODE_ENABLE COMP_WINDOWMODE_COMP1_INPUT_PLUS_COMMON + +#define COMP_NONINVERTINGINPUT_IO1 COMP_INPUT_PLUS_IO1 +#define COMP_NONINVERTINGINPUT_IO2 COMP_INPUT_PLUS_IO2 +#define COMP_NONINVERTINGINPUT_IO3 COMP_INPUT_PLUS_IO3 +#define COMP_NONINVERTINGINPUT_IO4 COMP_INPUT_PLUS_IO4 +#define COMP_NONINVERTINGINPUT_IO5 COMP_INPUT_PLUS_IO5 +#define COMP_NONINVERTINGINPUT_IO6 COMP_INPUT_PLUS_IO6 + +#define COMP_INVERTINGINPUT_1_4VREFINT COMP_INPUT_MINUS_1_4VREFINT +#define COMP_INVERTINGINPUT_1_2VREFINT COMP_INPUT_MINUS_1_2VREFINT +#define COMP_INVERTINGINPUT_3_4VREFINT COMP_INPUT_MINUS_3_4VREFINT +#define COMP_INVERTINGINPUT_VREFINT COMP_INPUT_MINUS_VREFINT +#define COMP_INVERTINGINPUT_DAC1_CH1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC1_CH2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_DAC1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO1 COMP_INPUT_MINUS_IO1 +#if defined(STM32L0) +/* Issue fixed on STM32L0 COMP driver: only 2 dedicated IO (IO1 and IO2), */ +/* IO2 was wrongly assigned to IO shared with DAC and IO3 was corresponding */ +/* to the second dedicated IO (only for COMP2). */ +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO2 +#else +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_IO2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO3 +#endif +#define COMP_INVERTINGINPUT_IO4 COMP_INPUT_MINUS_IO4 +#define COMP_INVERTINGINPUT_IO5 COMP_INPUT_MINUS_IO5 + +#define COMP_OUTPUTLEVEL_LOW COMP_OUTPUT_LEVEL_LOW +#define COMP_OUTPUTLEVEL_HIGH COMP_OUTPUT_LEVEL_HIGH + +/* Note: Literal "COMP_FLAG_LOCK" kept for legacy purpose. */ +/* To check COMP lock state, use macro "__HAL_COMP_IS_LOCKED()". */ +#if defined(COMP_CSR_LOCK) +#define COMP_FLAG_LOCK COMP_CSR_LOCK +#elif defined(COMP_CSR_COMP1LOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMP1LOCK +#elif defined(COMP_CSR_COMPxLOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMPxLOCK +#endif + +#if defined(STM32L4) +#define COMP_BLANKINGSRCE_TIM1OC5 COMP_BLANKINGSRC_TIM1_OC5_COMP1 +#define COMP_BLANKINGSRCE_TIM2OC3 COMP_BLANKINGSRC_TIM2_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC3 COMP_BLANKINGSRC_TIM3_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC4 COMP_BLANKINGSRC_TIM3_OC4_COMP2 +#define COMP_BLANKINGSRCE_TIM8OC5 COMP_BLANKINGSRC_TIM8_OC5_COMP2 +#define COMP_BLANKINGSRCE_TIM15OC1 COMP_BLANKINGSRC_TIM15_OC1_COMP2 +#define COMP_BLANKINGSRCE_NONE COMP_BLANKINGSRC_NONE +#endif + +#if defined(STM32L0) +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWSPEED COMP_POWERMODE_ULTRALOWPOWER +#else +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_HIGHSPEED +#define COMP_MODE_MEDIUMSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWPOWER COMP_POWERMODE_LOWPOWER +#define COMP_MODE_ULTRALOWPOWER COMP_POWERMODE_ULTRALOWPOWER +#endif + +#endif +/** + * @} + */ + +/** @defgroup HAL_CORTEX_Aliased_Defines HAL CORTEX Aliased Defines maintained for legacy purpose + * @{ + */ +#define __HAL_CORTEX_SYSTICKCLK_CONFIG HAL_SYSTICK_CLKSourceConfig +/** + * @} + */ + +/** @defgroup HAL_CRC_Aliased_Defines HAL CRC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define CRC_OUTPUTDATA_INVERSION_DISABLED CRC_OUTPUTDATA_INVERSION_DISABLE +#define CRC_OUTPUTDATA_INVERSION_ENABLED CRC_OUTPUTDATA_INVERSION_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Defines HAL DAC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define DAC1_CHANNEL_1 DAC_CHANNEL_1 +#define DAC1_CHANNEL_2 DAC_CHANNEL_2 +#define DAC2_CHANNEL_1 DAC_CHANNEL_1 +#define DAC_WAVE_NONE 0x00000000U +#define DAC_WAVE_NOISE DAC_CR_WAVE1_0 +#define DAC_WAVE_TRIANGLE DAC_CR_WAVE1_1 +#define DAC_WAVEGENERATION_NONE DAC_WAVE_NONE +#define DAC_WAVEGENERATION_NOISE DAC_WAVE_NOISE +#define DAC_WAVEGENERATION_TRIANGLE DAC_WAVE_TRIANGLE + +#if defined(STM32G4) || defined(STM32H7) +#define DAC_CHIPCONNECT_DISABLE DAC_CHIPCONNECT_EXTERNAL +#define DAC_CHIPCONNECT_ENABLE DAC_CHIPCONNECT_INTERNAL +#endif + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32G0) || defined(STM32L5) || defined(STM32H7) || defined(STM32F4) +#define HAL_DAC_MSP_INIT_CB_ID HAL_DAC_MSPINIT_CB_ID +#define HAL_DAC_MSP_DEINIT_CB_ID HAL_DAC_MSPDEINIT_CB_ID +#endif + +/** + * @} + */ + +/** @defgroup HAL_DMA_Aliased_Defines HAL DMA Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_REMAPDMA_ADC_DMA_CH2 DMA_REMAP_ADC_DMA_CH2 +#define HAL_REMAPDMA_USART1_TX_DMA_CH4 DMA_REMAP_USART1_TX_DMA_CH4 +#define HAL_REMAPDMA_USART1_RX_DMA_CH5 DMA_REMAP_USART1_RX_DMA_CH5 +#define HAL_REMAPDMA_TIM16_DMA_CH4 DMA_REMAP_TIM16_DMA_CH4 +#define HAL_REMAPDMA_TIM17_DMA_CH2 DMA_REMAP_TIM17_DMA_CH2 +#define HAL_REMAPDMA_USART3_DMA_CH32 DMA_REMAP_USART3_DMA_CH32 +#define HAL_REMAPDMA_TIM16_DMA_CH6 DMA_REMAP_TIM16_DMA_CH6 +#define HAL_REMAPDMA_TIM17_DMA_CH7 DMA_REMAP_TIM17_DMA_CH7 +#define HAL_REMAPDMA_SPI2_DMA_CH67 DMA_REMAP_SPI2_DMA_CH67 +#define HAL_REMAPDMA_USART2_DMA_CH67 DMA_REMAP_USART2_DMA_CH67 +#define HAL_REMAPDMA_I2C1_DMA_CH76 DMA_REMAP_I2C1_DMA_CH76 +#define HAL_REMAPDMA_TIM1_DMA_CH6 DMA_REMAP_TIM1_DMA_CH6 +#define HAL_REMAPDMA_TIM2_DMA_CH7 DMA_REMAP_TIM2_DMA_CH7 +#define HAL_REMAPDMA_TIM3_DMA_CH6 DMA_REMAP_TIM3_DMA_CH6 + +#define IS_HAL_REMAPDMA IS_DMA_REMAP +#define __HAL_REMAPDMA_CHANNEL_ENABLE __HAL_DMA_REMAP_CHANNEL_ENABLE +#define __HAL_REMAPDMA_CHANNEL_DISABLE __HAL_DMA_REMAP_CHANNEL_DISABLE + +#if defined(STM32L4) + +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI1 HAL_DMAMUX1_REQ_GEN_EXTI1 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI2 HAL_DMAMUX1_REQ_GEN_EXTI2 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI3 HAL_DMAMUX1_REQ_GEN_EXTI3 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI4 HAL_DMAMUX1_REQ_GEN_EXTI4 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI5 HAL_DMAMUX1_REQ_GEN_EXTI5 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI6 HAL_DMAMUX1_REQ_GEN_EXTI6 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI7 HAL_DMAMUX1_REQ_GEN_EXTI7 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI8 HAL_DMAMUX1_REQ_GEN_EXTI8 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI9 HAL_DMAMUX1_REQ_GEN_EXTI9 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI10 HAL_DMAMUX1_REQ_GEN_EXTI10 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI11 HAL_DMAMUX1_REQ_GEN_EXTI11 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI12 HAL_DMAMUX1_REQ_GEN_EXTI12 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI13 HAL_DMAMUX1_REQ_GEN_EXTI13 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI14 HAL_DMAMUX1_REQ_GEN_EXTI14 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI15 HAL_DMAMUX1_REQ_GEN_EXTI15 +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH3_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH3_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_DSI_TE HAL_DMAMUX1_REQ_GEN_DSI_TE +#define HAL_DMAMUX1_REQUEST_GEN_DSI_EOT HAL_DMAMUX1_REQ_GEN_DSI_EOT +#define HAL_DMAMUX1_REQUEST_GEN_DMA2D_EOT HAL_DMAMUX1_REQ_GEN_DMA2D_EOT +#define HAL_DMAMUX1_REQUEST_GEN_LTDC_IT HAL_DMAMUX1_REQ_GEN_LTDC_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#if defined(STM32L4R5xx) || defined(STM32L4R9xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx) +#define DMA_REQUEST_DCMI_PSSI DMA_REQUEST_DCMI +#endif + +#endif /* STM32L4 */ + +#if defined(STM32G0) +#define DMA_REQUEST_DAC1_CHANNEL1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC1_CHANNEL2 DMA_REQUEST_DAC1_CH2 +#endif + +#if defined(STM32H7) + +#define DMA_REQUEST_DAC1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC2 DMA_REQUEST_DAC1_CH2 + +#define BDMA_REQUEST_LP_UART1_RX BDMA_REQUEST_LPUART1_RX +#define BDMA_REQUEST_LP_UART1_TX BDMA_REQUEST_LPUART1_TX + +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX1_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_TIM12_TRGO HAL_DMAMUX1_REQ_GEN_TIM12_TRGO + +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH0_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH0_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH1_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH1_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH2_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH2_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH3_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH3_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH4_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH4_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH5_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH5_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH6_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH6_EVT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_RX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_TX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM2_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX2_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM3_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX2_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM4_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM5_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM5_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_WKUP HAL_DMAMUX2_REQ_GEN_I2C4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_WKUP HAL_DMAMUX2_REQ_GEN_SPI6_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_COMP1_OUT HAL_DMAMUX2_REQ_GEN_COMP1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_COMP2_OUT HAL_DMAMUX2_REQ_GEN_COMP2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_RTC_WKUP HAL_DMAMUX2_REQ_GEN_RTC_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_EXTI0 HAL_DMAMUX2_REQ_GEN_EXTI0 +#define HAL_DMAMUX2_REQUEST_GEN_EXTI2 HAL_DMAMUX2_REQ_GEN_EXTI2 +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_IT_EVT HAL_DMAMUX2_REQ_GEN_I2C4_IT_EVT +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_IT HAL_DMAMUX2_REQ_GEN_SPI6_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_TX_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_RX_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_IT HAL_DMAMUX2_REQ_GEN_ADC3_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_AWD1_OUT HAL_DMAMUX2_REQ_GEN_ADC3_AWD1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH0_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH0_IT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH1_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH1_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#define DFSDM_FILTER_EXT_TRIG_LPTIM1 DFSDM_FILTER_EXT_TRIG_LPTIM1_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM2 DFSDM_FILTER_EXT_TRIG_LPTIM2_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM3 DFSDM_FILTER_EXT_TRIG_LPTIM3_OUT + +#define DAC_TRIGGER_LP1_OUT DAC_TRIGGER_LPTIM1_OUT +#define DAC_TRIGGER_LP2_OUT DAC_TRIGGER_LPTIM2_OUT + +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Defines HAL FLASH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define TYPEPROGRAM_BYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_HALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_WORD FLASH_TYPEPROGRAM_WORD +#define TYPEPROGRAM_DOUBLEWORD FLASH_TYPEPROGRAM_DOUBLEWORD +#define TYPEERASE_SECTORS FLASH_TYPEERASE_SECTORS +#define TYPEERASE_PAGES FLASH_TYPEERASE_PAGES +#define TYPEERASE_PAGEERASE FLASH_TYPEERASE_PAGES +#define TYPEERASE_MASSERASE FLASH_TYPEERASE_MASSERASE +#define WRPSTATE_DISABLE OB_WRPSTATE_DISABLE +#define WRPSTATE_ENABLE OB_WRPSTATE_ENABLE +#define HAL_FLASH_TIMEOUT_VALUE FLASH_TIMEOUT_VALUE +#define OBEX_PCROP OPTIONBYTE_PCROP +#define OBEX_BOOTCONFIG OPTIONBYTE_BOOTCONFIG +#define PCROPSTATE_DISABLE OB_PCROP_STATE_DISABLE +#define PCROPSTATE_ENABLE OB_PCROP_STATE_ENABLE +#define TYPEERASEDATA_BYTE FLASH_TYPEERASEDATA_BYTE +#define TYPEERASEDATA_HALFWORD FLASH_TYPEERASEDATA_HALFWORD +#define TYPEERASEDATA_WORD FLASH_TYPEERASEDATA_WORD +#define TYPEPROGRAMDATA_BYTE FLASH_TYPEPROGRAMDATA_BYTE +#define TYPEPROGRAMDATA_HALFWORD FLASH_TYPEPROGRAMDATA_HALFWORD +#define TYPEPROGRAMDATA_WORD FLASH_TYPEPROGRAMDATA_WORD +#define TYPEPROGRAMDATA_FASTBYTE FLASH_TYPEPROGRAMDATA_FASTBYTE +#define TYPEPROGRAMDATA_FASTHALFWORD FLASH_TYPEPROGRAMDATA_FASTHALFWORD +#define TYPEPROGRAMDATA_FASTWORD FLASH_TYPEPROGRAMDATA_FASTWORD +#define PAGESIZE FLASH_PAGE_SIZE +#define TYPEPROGRAM_FASTBYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_FASTHALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_FASTWORD FLASH_TYPEPROGRAM_WORD +#define VOLTAGE_RANGE_1 FLASH_VOLTAGE_RANGE_1 +#define VOLTAGE_RANGE_2 FLASH_VOLTAGE_RANGE_2 +#define VOLTAGE_RANGE_3 FLASH_VOLTAGE_RANGE_3 +#define VOLTAGE_RANGE_4 FLASH_VOLTAGE_RANGE_4 +#define TYPEPROGRAM_FAST FLASH_TYPEPROGRAM_FAST +#define TYPEPROGRAM_FAST_AND_LAST FLASH_TYPEPROGRAM_FAST_AND_LAST +#define WRPAREA_BANK1_AREAA OB_WRPAREA_BANK1_AREAA +#define WRPAREA_BANK1_AREAB OB_WRPAREA_BANK1_AREAB +#define WRPAREA_BANK2_AREAA OB_WRPAREA_BANK2_AREAA +#define WRPAREA_BANK2_AREAB OB_WRPAREA_BANK2_AREAB +#define IWDG_STDBY_FREEZE OB_IWDG_STDBY_FREEZE +#define IWDG_STDBY_ACTIVE OB_IWDG_STDBY_RUN +#define IWDG_STOP_FREEZE OB_IWDG_STOP_FREEZE +#define IWDG_STOP_ACTIVE OB_IWDG_STOP_RUN +#define FLASH_ERROR_NONE HAL_FLASH_ERROR_NONE +#define FLASH_ERROR_RD HAL_FLASH_ERROR_RD +#define FLASH_ERROR_PG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_PGP HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_WRP HAL_FLASH_ERROR_WRP +#define FLASH_ERROR_OPTV HAL_FLASH_ERROR_OPTV +#define FLASH_ERROR_OPTVUSR HAL_FLASH_ERROR_OPTVUSR +#define FLASH_ERROR_PROG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_OP HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_PGA HAL_FLASH_ERROR_PGA +#define FLASH_ERROR_SIZE HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_SIZ HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_PGS HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_MIS HAL_FLASH_ERROR_MIS +#define FLASH_ERROR_FAST HAL_FLASH_ERROR_FAST +#define FLASH_ERROR_FWWERR HAL_FLASH_ERROR_FWWERR +#define FLASH_ERROR_NOTZERO HAL_FLASH_ERROR_NOTZERO +#define FLASH_ERROR_OPERATION HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_ERS HAL_FLASH_ERROR_ERS +#define OB_WDG_SW OB_IWDG_SW +#define OB_WDG_HW OB_IWDG_HW +#define OB_SDADC12_VDD_MONITOR_SET OB_SDACD_VDD_MONITOR_SET +#define OB_SDADC12_VDD_MONITOR_RESET OB_SDACD_VDD_MONITOR_RESET +#define OB_RAM_PARITY_CHECK_SET OB_SRAM_PARITY_SET +#define OB_RAM_PARITY_CHECK_RESET OB_SRAM_PARITY_RESET +#define IS_OB_SDADC12_VDD_MONITOR IS_OB_SDACD_VDD_MONITOR +#define OB_RDP_LEVEL0 OB_RDP_LEVEL_0 +#define OB_RDP_LEVEL1 OB_RDP_LEVEL_1 +#define OB_RDP_LEVEL2 OB_RDP_LEVEL_2 +#if defined(STM32G0) +#define OB_BOOT_LOCK_DISABLE OB_BOOT_ENTRY_FORCED_NONE +#define OB_BOOT_LOCK_ENABLE OB_BOOT_ENTRY_FORCED_FLASH +#else +#define OB_BOOT_ENTRY_FORCED_NONE OB_BOOT_LOCK_DISABLE +#define OB_BOOT_ENTRY_FORCED_FLASH OB_BOOT_LOCK_ENABLE +#endif +#if defined(STM32H7) +#define FLASH_FLAG_SNECCE_BANK1RR FLASH_FLAG_SNECCERR_BANK1 +#define FLASH_FLAG_DBECCE_BANK1RR FLASH_FLAG_DBECCERR_BANK1 +#define FLASH_FLAG_STRBER_BANK1R FLASH_FLAG_STRBERR_BANK1 +#define FLASH_FLAG_SNECCE_BANK2RR FLASH_FLAG_SNECCERR_BANK2 +#define FLASH_FLAG_DBECCE_BANK2RR FLASH_FLAG_DBECCERR_BANK2 +#define FLASH_FLAG_STRBER_BANK2R FLASH_FLAG_STRBERR_BANK2 +#define FLASH_FLAG_WDW FLASH_FLAG_WBNE +#define OB_WRP_SECTOR_All OB_WRP_SECTOR_ALL +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_JPEG_Aliased_Macros HAL JPEG Aliased Macros maintained for legacy purpose + * @{ + */ + +#if defined(STM32H7) +#define __HAL_RCC_JPEG_CLK_ENABLE __HAL_RCC_JPGDECEN_CLK_ENABLE +#define __HAL_RCC_JPEG_CLK_DISABLE __HAL_RCC_JPGDECEN_CLK_DISABLE +#define __HAL_RCC_JPEG_FORCE_RESET __HAL_RCC_JPGDECRST_FORCE_RESET +#define __HAL_RCC_JPEG_RELEASE_RESET __HAL_RCC_JPGDECRST_RELEASE_RESET +#define __HAL_RCC_JPEG_CLK_SLEEP_ENABLE __HAL_RCC_JPGDEC_CLK_SLEEP_ENABLE +#define __HAL_RCC_JPEG_CLK_SLEEP_DISABLE __HAL_RCC_JPGDEC_CLK_SLEEP_DISABLE +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_SYSCFG_Aliased_Defines HAL SYSCFG Aliased Defines maintained for legacy purpose + * @{ + */ + +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA9 I2C_FASTMODEPLUS_PA9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA10 I2C_FASTMODEPLUS_PA10 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB6 I2C_FASTMODEPLUS_PB6 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB7 I2C_FASTMODEPLUS_PB7 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB8 I2C_FASTMODEPLUS_PB8 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB9 I2C_FASTMODEPLUS_PB9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C1 I2C_FASTMODEPLUS_I2C1 +#define HAL_SYSCFG_FASTMODEPLUS_I2C2 I2C_FASTMODEPLUS_I2C2 +#define HAL_SYSCFG_FASTMODEPLUS_I2C3 I2C_FASTMODEPLUS_I2C3 +#if defined(STM32G4) + +#define HAL_SYSCFG_EnableIOAnalogSwitchBooster HAL_SYSCFG_EnableIOSwitchBooster +#define HAL_SYSCFG_DisableIOAnalogSwitchBooster HAL_SYSCFG_DisableIOSwitchBooster +#define HAL_SYSCFG_EnableIOAnalogSwitchVDD HAL_SYSCFG_EnableIOSwitchVDD +#define HAL_SYSCFG_DisableIOAnalogSwitchVDD HAL_SYSCFG_DisableIOSwitchVDD +#endif /* STM32G4 */ +/** + * @} + */ + + +/** @defgroup LL_FMC_Aliased_Defines LL FMC Aliased Defines maintained for compatibility purpose + * @{ + */ +#if defined(STM32L4) || defined(STM32F7) || defined(STM32H7) || defined(STM32G4) +#define FMC_NAND_PCC_WAIT_FEATURE_DISABLE FMC_NAND_WAIT_FEATURE_DISABLE +#define FMC_NAND_PCC_WAIT_FEATURE_ENABLE FMC_NAND_WAIT_FEATURE_ENABLE +#define FMC_NAND_PCC_MEM_BUS_WIDTH_8 FMC_NAND_MEM_BUS_WIDTH_8 +#define FMC_NAND_PCC_MEM_BUS_WIDTH_16 FMC_NAND_MEM_BUS_WIDTH_16 +#elif defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) +#define FMC_NAND_WAIT_FEATURE_DISABLE FMC_NAND_PCC_WAIT_FEATURE_DISABLE +#define FMC_NAND_WAIT_FEATURE_ENABLE FMC_NAND_PCC_WAIT_FEATURE_ENABLE +#define FMC_NAND_MEM_BUS_WIDTH_8 FMC_NAND_PCC_MEM_BUS_WIDTH_8 +#define FMC_NAND_MEM_BUS_WIDTH_16 FMC_NAND_PCC_MEM_BUS_WIDTH_16 +#endif +/** + * @} + */ + +/** @defgroup LL_FSMC_Aliased_Defines LL FSMC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FSMC_NORSRAM_TYPEDEF FSMC_NORSRAM_TypeDef +#define FSMC_NORSRAM_EXTENDED_TYPEDEF FSMC_NORSRAM_EXTENDED_TypeDef +/** + * @} + */ + +/** @defgroup HAL_GPIO_Aliased_Macros HAL GPIO Aliased Macros maintained for legacy purpose + * @{ + */ +#define GET_GPIO_SOURCE GPIO_GET_INDEX +#define GET_GPIO_INDEX GPIO_GET_INDEX + +#if defined(STM32F4) +#define GPIO_AF12_SDMMC GPIO_AF12_SDIO +#define GPIO_AF12_SDMMC1 GPIO_AF12_SDIO +#endif + +#if defined(STM32F7) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32L4) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32H7) +#define GPIO_AF7_SDIO1 GPIO_AF7_SDMMC1 +#define GPIO_AF8_SDIO1 GPIO_AF8_SDMMC1 +#define GPIO_AF12_SDIO1 GPIO_AF12_SDMMC1 +#define GPIO_AF9_SDIO2 GPIO_AF9_SDMMC2 +#define GPIO_AF10_SDIO2 GPIO_AF10_SDMMC2 +#define GPIO_AF11_SDIO2 GPIO_AF11_SDMMC2 + +#if defined (STM32H743xx) || defined (STM32H753xx) || defined (STM32H750xx) || defined (STM32H742xx) || \ + defined (STM32H745xx) || defined (STM32H755xx) || defined (STM32H747xx) || defined (STM32H757xx) +#define GPIO_AF10_OTG2_HS GPIO_AF10_OTG2_FS +#define GPIO_AF10_OTG1_FS GPIO_AF10_OTG1_HS +#define GPIO_AF12_OTG2_FS GPIO_AF12_OTG1_FS +#endif /*STM32H743xx || STM32H753xx || STM32H750xx || STM32H742xx || STM32H745xx || STM32H755xx || STM32H747xx || STM32H757xx */ +#endif /* STM32H7 */ + +#define GPIO_AF0_LPTIM GPIO_AF0_LPTIM1 +#define GPIO_AF1_LPTIM GPIO_AF1_LPTIM1 +#define GPIO_AF2_LPTIM GPIO_AF2_LPTIM1 + +#if defined(STM32L0) || defined(STM32L4) || defined(STM32F4) || defined(STM32F2) || defined(STM32F7) || defined(STM32G4) || defined(STM32H7) +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_FAST GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L0 || STM32L4 || STM32F4 || STM32F2 || STM32F7 || STM32G4 || STM32H7*/ + +#if defined(STM32L1) + #define GPIO_SPEED_VERY_LOW GPIO_SPEED_FREQ_LOW + #define GPIO_SPEED_LOW GPIO_SPEED_FREQ_MEDIUM + #define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_HIGH + #define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L1 */ + +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F1) + #define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW + #define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM + #define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_HIGH +#endif /* STM32F0 || STM32F3 || STM32F1 */ + +#define GPIO_AF6_DFSDM GPIO_AF6_DFSDM1 +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Macros HAL HRTIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define HRTIM_TIMDELAYEDPROTECTION_DISABLED HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DISABLED +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV7 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV7 + +#define __HAL_HRTIM_SetCounter __HAL_HRTIM_SETCOUNTER +#define __HAL_HRTIM_GetCounter __HAL_HRTIM_GETCOUNTER +#define __HAL_HRTIM_SetPeriod __HAL_HRTIM_SETPERIOD +#define __HAL_HRTIM_GetPeriod __HAL_HRTIM_GETPERIOD +#define __HAL_HRTIM_SetClockPrescaler __HAL_HRTIM_SETCLOCKPRESCALER +#define __HAL_HRTIM_GetClockPrescaler __HAL_HRTIM_GETCLOCKPRESCALER +#define __HAL_HRTIM_SetCompare __HAL_HRTIM_SETCOMPARE +#define __HAL_HRTIM_GetCompare __HAL_HRTIM_GETCOMPARE + +#if defined(STM32G4) +#define HAL_HRTIM_ExternalEventCounterConfig HAL_HRTIM_ExtEventCounterConfig +#define HAL_HRTIM_ExternalEventCounterEnable HAL_HRTIM_ExtEventCounterEnable +#define HAL_HRTIM_ExternalEventCounterDisable HAL_HRTIM_ExtEventCounterDisable +#define HAL_HRTIM_ExternalEventCounterReset HAL_HRTIM_ExtEventCounterReset +#endif /* STM32G4 */ + +#if defined(STM32H7) +#define HRTIM_OUTPUTSET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 + +#define HRTIM_OUTPUTRESET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 +#endif /* STM32H7 */ + +#if defined(STM32F3) +/** @brief Constants defining available sources associated to external events. + */ +#define HRTIM_EVENTSRC_1 (0x00000000U) +#define HRTIM_EVENTSRC_2 (HRTIM_EECR1_EE1SRC_0) +#define HRTIM_EVENTSRC_3 (HRTIM_EECR1_EE1SRC_1) +#define HRTIM_EVENTSRC_4 (HRTIM_EECR1_EE1SRC_1 | HRTIM_EECR1_EE1SRC_0) + +/** @brief Constants defining the events that can be selected to configure the + * set/reset crossbar of a timer output + */ +#define HRTIM_OUTPUTSET_TIMEV_1 (HRTIM_SET1R_TIMEVNT1) +#define HRTIM_OUTPUTSET_TIMEV_2 (HRTIM_SET1R_TIMEVNT2) +#define HRTIM_OUTPUTSET_TIMEV_3 (HRTIM_SET1R_TIMEVNT3) +#define HRTIM_OUTPUTSET_TIMEV_4 (HRTIM_SET1R_TIMEVNT4) +#define HRTIM_OUTPUTSET_TIMEV_5 (HRTIM_SET1R_TIMEVNT5) +#define HRTIM_OUTPUTSET_TIMEV_6 (HRTIM_SET1R_TIMEVNT6) +#define HRTIM_OUTPUTSET_TIMEV_7 (HRTIM_SET1R_TIMEVNT7) +#define HRTIM_OUTPUTSET_TIMEV_8 (HRTIM_SET1R_TIMEVNT8) +#define HRTIM_OUTPUTSET_TIMEV_9 (HRTIM_SET1R_TIMEVNT9) + +#define HRTIM_OUTPUTRESET_TIMEV_1 (HRTIM_RST1R_TIMEVNT1) +#define HRTIM_OUTPUTRESET_TIMEV_2 (HRTIM_RST1R_TIMEVNT2) +#define HRTIM_OUTPUTRESET_TIMEV_3 (HRTIM_RST1R_TIMEVNT3) +#define HRTIM_OUTPUTRESET_TIMEV_4 (HRTIM_RST1R_TIMEVNT4) +#define HRTIM_OUTPUTRESET_TIMEV_5 (HRTIM_RST1R_TIMEVNT5) +#define HRTIM_OUTPUTRESET_TIMEV_6 (HRTIM_RST1R_TIMEVNT6) +#define HRTIM_OUTPUTRESET_TIMEV_7 (HRTIM_RST1R_TIMEVNT7) +#define HRTIM_OUTPUTRESET_TIMEV_8 (HRTIM_RST1R_TIMEVNT8) +#define HRTIM_OUTPUTRESET_TIMEV_9 (HRTIM_RST1R_TIMEVNT9) + +/** @brief Constants defining the event filtering applied to external events + * by a timer + */ +#define HRTIM_TIMEVENTFILTER_NONE (0x00000000U) +#define HRTIM_TIMEVENTFILTER_BLANKINGCMP1 (HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGCMP2 (HRTIM_EEFR1_EE1FLTR_1) +#define HRTIM_TIMEVENTFILTER_BLANKINGCMP3 (HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGCMP4 (HRTIM_EEFR1_EE1FLTR_2) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR1 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR2 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR3 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR4 (HRTIM_EEFR1_EE1FLTR_3) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR5 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR6 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_1) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR7 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR8 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2) +#define HRTIM_TIMEVENTFILTER_WINDOWINGCMP2 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_0) +#define HRTIM_TIMEVENTFILTER_WINDOWINGCMP3 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1) +#define HRTIM_TIMEVENTFILTER_WINDOWINGTIM (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) + +/** @brief Constants defining the DLL calibration periods (in micro seconds) + */ +#define HRTIM_CALIBRATIONRATE_7300 0x00000000U +#define HRTIM_CALIBRATIONRATE_910 (HRTIM_DLLCR_CALRTE_0) +#define HRTIM_CALIBRATIONRATE_114 (HRTIM_DLLCR_CALRTE_1) +#define HRTIM_CALIBRATIONRATE_14 (HRTIM_DLLCR_CALRTE_1 | HRTIM_DLLCR_CALRTE_0) + +#endif /* STM32F3 */ +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Defines HAL I2C Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2C_DUALADDRESS_DISABLED I2C_DUALADDRESS_DISABLE +#define I2C_DUALADDRESS_ENABLED I2C_DUALADDRESS_ENABLE +#define I2C_GENERALCALL_DISABLED I2C_GENERALCALL_DISABLE +#define I2C_GENERALCALL_ENABLED I2C_GENERALCALL_ENABLE +#define I2C_NOSTRETCH_DISABLED I2C_NOSTRETCH_DISABLE +#define I2C_NOSTRETCH_ENABLED I2C_NOSTRETCH_ENABLE +#define I2C_ANALOGFILTER_ENABLED I2C_ANALOGFILTER_ENABLE +#define I2C_ANALOGFILTER_DISABLED I2C_ANALOGFILTER_DISABLE +#if defined(STM32F0) || defined(STM32F1) || defined(STM32F3) || defined(STM32G0) || defined(STM32L4) || defined(STM32L1) || defined(STM32F7) +#define HAL_I2C_STATE_MEM_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MEM_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_MASTER_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MASTER_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_SLAVE_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_SLAVE_BUSY_RX HAL_I2C_STATE_BUSY_RX +#endif +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Defines HAL IRDA Aliased Defines maintained for legacy purpose + * @{ + */ +#define IRDA_ONE_BIT_SAMPLE_DISABLED IRDA_ONE_BIT_SAMPLE_DISABLE +#define IRDA_ONE_BIT_SAMPLE_ENABLED IRDA_ONE_BIT_SAMPLE_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_IWDG_Aliased_Defines HAL IWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define KR_KEY_RELOAD IWDG_KEY_RELOAD +#define KR_KEY_ENABLE IWDG_KEY_ENABLE +#define KR_KEY_EWA IWDG_KEY_WRITE_ACCESS_ENABLE +#define KR_KEY_DWA IWDG_KEY_WRITE_ACCESS_DISABLE +/** + * @} + */ + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ + +#define LPTIM_CLOCKSAMPLETIME_DIRECTTRANSISTION LPTIM_CLOCKSAMPLETIME_DIRECTTRANSITION +#define LPTIM_CLOCKSAMPLETIME_2TRANSISTIONS LPTIM_CLOCKSAMPLETIME_2TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_4TRANSISTIONS LPTIM_CLOCKSAMPLETIME_4TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_8TRANSISTIONS LPTIM_CLOCKSAMPLETIME_8TRANSITIONS + +#define LPTIM_CLOCKPOLARITY_RISINGEDGE LPTIM_CLOCKPOLARITY_RISING +#define LPTIM_CLOCKPOLARITY_FALLINGEDGE LPTIM_CLOCKPOLARITY_FALLING +#define LPTIM_CLOCKPOLARITY_BOTHEDGES LPTIM_CLOCKPOLARITY_RISING_FALLING + +#define LPTIM_TRIGSAMPLETIME_DIRECTTRANSISTION LPTIM_TRIGSAMPLETIME_DIRECTTRANSITION +#define LPTIM_TRIGSAMPLETIME_2TRANSISTIONS LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSISTIONS LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSISTIONS LPTIM_TRIGSAMPLETIME_8TRANSITIONS + +/* The following 3 definition have also been present in a temporary version of lptim.h */ +/* They need to be renamed also to the right name, just in case */ +#define LPTIM_TRIGSAMPLETIME_2TRANSITION LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSITION LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSITION LPTIM_TRIGSAMPLETIME_8TRANSITIONS + +/** + * @} + */ + +/** @defgroup HAL_NAND_Aliased_Defines HAL NAND Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_NAND_Read_Page HAL_NAND_Read_Page_8b +#define HAL_NAND_Write_Page HAL_NAND_Write_Page_8b +#define HAL_NAND_Read_SpareArea HAL_NAND_Read_SpareArea_8b +#define HAL_NAND_Write_SpareArea HAL_NAND_Write_SpareArea_8b + +#define NAND_AddressTypedef NAND_AddressTypeDef + +#define __ARRAY_ADDRESS ARRAY_ADDRESS +#define __ADDR_1st_CYCLE ADDR_1ST_CYCLE +#define __ADDR_2nd_CYCLE ADDR_2ND_CYCLE +#define __ADDR_3rd_CYCLE ADDR_3RD_CYCLE +#define __ADDR_4th_CYCLE ADDR_4TH_CYCLE +/** + * @} + */ + +/** @defgroup HAL_NOR_Aliased_Defines HAL NOR Aliased Defines maintained for legacy purpose + * @{ + */ +#define NOR_StatusTypedef HAL_NOR_StatusTypeDef +#define NOR_SUCCESS HAL_NOR_STATUS_SUCCESS +#define NOR_ONGOING HAL_NOR_STATUS_ONGOING +#define NOR_ERROR HAL_NOR_STATUS_ERROR +#define NOR_TIMEOUT HAL_NOR_STATUS_TIMEOUT + +#define __NOR_WRITE NOR_WRITE +#define __NOR_ADDR_SHIFT NOR_ADDR_SHIFT +/** + * @} + */ + +/** @defgroup HAL_OPAMP_Aliased_Defines HAL OPAMP Aliased Defines maintained for legacy purpose + * @{ + */ + +#define OPAMP_NONINVERTINGINPUT_VP0 OPAMP_NONINVERTINGINPUT_IO0 +#define OPAMP_NONINVERTINGINPUT_VP1 OPAMP_NONINVERTINGINPUT_IO1 +#define OPAMP_NONINVERTINGINPUT_VP2 OPAMP_NONINVERTINGINPUT_IO2 +#define OPAMP_NONINVERTINGINPUT_VP3 OPAMP_NONINVERTINGINPUT_IO3 + +#define OPAMP_SEC_NONINVERTINGINPUT_VP0 OPAMP_SEC_NONINVERTINGINPUT_IO0 +#define OPAMP_SEC_NONINVERTINGINPUT_VP1 OPAMP_SEC_NONINVERTINGINPUT_IO1 +#define OPAMP_SEC_NONINVERTINGINPUT_VP2 OPAMP_SEC_NONINVERTINGINPUT_IO2 +#define OPAMP_SEC_NONINVERTINGINPUT_VP3 OPAMP_SEC_NONINVERTINGINPUT_IO3 + +#define OPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define OPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define IOPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define IOPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define OPAMP_SEC_INVERTINGINPUT_VM0 OPAMP_SEC_INVERTINGINPUT_IO0 +#define OPAMP_SEC_INVERTINGINPUT_VM1 OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_INVERTINGINPUT_VINM OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_PGACONNECT_NO OPAMP_PGA_CONNECT_INVERTINGINPUT_NO +#define OPAMP_PGACONNECT_VM0 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO0 +#define OPAMP_PGACONNECT_VM1 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO1 + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32L5) || defined(STM32H7) +#define HAL_OPAMP_MSP_INIT_CB_ID HAL_OPAMP_MSPINIT_CB_ID +#define HAL_OPAMP_MSP_DEINIT_CB_ID HAL_OPAMP_MSPDEINIT_CB_ID +#endif + + +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Defines HAL I2S Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2S_STANDARD_PHILLIPS I2S_STANDARD_PHILIPS + +#if defined(STM32H7) + #define I2S_IT_TXE I2S_IT_TXP + #define I2S_IT_RXNE I2S_IT_RXP + + #define I2S_FLAG_TXE I2S_FLAG_TXP + #define I2S_FLAG_RXNE I2S_FLAG_RXP +#endif + +#if defined(STM32F7) + #define I2S_CLOCK_SYSCLK I2S_CLOCK_PLL +#endif +/** + * @} + */ + +/** @defgroup HAL_PCCARD_Aliased_Defines HAL PCCARD Aliased Defines maintained for legacy purpose + * @{ + */ + +/* Compact Flash-ATA registers description */ +#define CF_DATA ATA_DATA +#define CF_SECTOR_COUNT ATA_SECTOR_COUNT +#define CF_SECTOR_NUMBER ATA_SECTOR_NUMBER +#define CF_CYLINDER_LOW ATA_CYLINDER_LOW +#define CF_CYLINDER_HIGH ATA_CYLINDER_HIGH +#define CF_CARD_HEAD ATA_CARD_HEAD +#define CF_STATUS_CMD ATA_STATUS_CMD +#define CF_STATUS_CMD_ALTERNATE ATA_STATUS_CMD_ALTERNATE +#define CF_COMMON_DATA_AREA ATA_COMMON_DATA_AREA + +/* Compact Flash-ATA commands */ +#define CF_READ_SECTOR_CMD ATA_READ_SECTOR_CMD +#define CF_WRITE_SECTOR_CMD ATA_WRITE_SECTOR_CMD +#define CF_ERASE_SECTOR_CMD ATA_ERASE_SECTOR_CMD +#define CF_IDENTIFY_CMD ATA_IDENTIFY_CMD + +#define PCCARD_StatusTypedef HAL_PCCARD_StatusTypeDef +#define PCCARD_SUCCESS HAL_PCCARD_STATUS_SUCCESS +#define PCCARD_ONGOING HAL_PCCARD_STATUS_ONGOING +#define PCCARD_ERROR HAL_PCCARD_STATUS_ERROR +#define PCCARD_TIMEOUT HAL_PCCARD_STATUS_TIMEOUT +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Defines HAL RTC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FORMAT_BIN RTC_FORMAT_BIN +#define FORMAT_BCD RTC_FORMAT_BCD + +#define RTC_ALARMSUBSECONDMASK_None RTC_ALARMSUBSECONDMASK_NONE +#define RTC_TAMPERERASEBACKUP_DISABLED RTC_TAMPER_ERASE_BACKUP_DISABLE +#define RTC_TAMPERMASK_FLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_TAMPERMASK_FLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE + +#define RTC_MASKTAMPERFLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_MASKTAMPERFLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE +#define RTC_TAMPERERASEBACKUP_ENABLED RTC_TAMPER_ERASE_BACKUP_ENABLE +#define RTC_TAMPER1_2_INTERRUPT RTC_ALL_TAMPER_INTERRUPT +#define RTC_TAMPER1_2_3_INTERRUPT RTC_ALL_TAMPER_INTERRUPT + +#define RTC_TIMESTAMPPIN_PC13 RTC_TIMESTAMPPIN_DEFAULT +#define RTC_TIMESTAMPPIN_PA0 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PI8 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PC1 RTC_TIMESTAMPPIN_POS2 + +#define RTC_OUTPUT_REMAP_PC13 RTC_OUTPUT_REMAP_NONE +#define RTC_OUTPUT_REMAP_PB14 RTC_OUTPUT_REMAP_POS1 +#define RTC_OUTPUT_REMAP_PB2 RTC_OUTPUT_REMAP_POS1 + +#define RTC_TAMPERPIN_PC13 RTC_TAMPERPIN_DEFAULT +#define RTC_TAMPERPIN_PA0 RTC_TAMPERPIN_POS1 +#define RTC_TAMPERPIN_PI8 RTC_TAMPERPIN_POS1 + +#if defined(STM32H7) +#define RTC_TAMPCR_TAMPXE RTC_TAMPER_X +#define RTC_TAMPCR_TAMPXIE RTC_TAMPER_X_INTERRUPT + +#define RTC_TAMPER1_INTERRUPT RTC_IT_TAMP1 +#define RTC_TAMPER2_INTERRUPT RTC_IT_TAMP2 +#define RTC_TAMPER3_INTERRUPT RTC_IT_TAMP3 +#define RTC_ALL_TAMPER_INTERRUPT RTC_IT_TAMPALL +#endif /* STM32H7 */ + +/** + * @} + */ + + +/** @defgroup HAL_SMARTCARD_Aliased_Defines HAL SMARTCARD Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMARTCARD_NACK_ENABLED SMARTCARD_NACK_ENABLE +#define SMARTCARD_NACK_DISABLED SMARTCARD_NACK_DISABLE + +#define SMARTCARD_ONEBIT_SAMPLING_DISABLED SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLED SMARTCARD_ONE_BIT_SAMPLE_ENABLE +#define SMARTCARD_ONEBIT_SAMPLING_DISABLE SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLE SMARTCARD_ONE_BIT_SAMPLE_ENABLE + +#define SMARTCARD_TIMEOUT_DISABLED SMARTCARD_TIMEOUT_DISABLE +#define SMARTCARD_TIMEOUT_ENABLED SMARTCARD_TIMEOUT_ENABLE + +#define SMARTCARD_LASTBIT_DISABLED SMARTCARD_LASTBIT_DISABLE +#define SMARTCARD_LASTBIT_ENABLED SMARTCARD_LASTBIT_ENABLE +/** + * @} + */ + + +/** @defgroup HAL_SMBUS_Aliased_Defines HAL SMBUS Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMBUS_DUALADDRESS_DISABLED SMBUS_DUALADDRESS_DISABLE +#define SMBUS_DUALADDRESS_ENABLED SMBUS_DUALADDRESS_ENABLE +#define SMBUS_GENERALCALL_DISABLED SMBUS_GENERALCALL_DISABLE +#define SMBUS_GENERALCALL_ENABLED SMBUS_GENERALCALL_ENABLE +#define SMBUS_NOSTRETCH_DISABLED SMBUS_NOSTRETCH_DISABLE +#define SMBUS_NOSTRETCH_ENABLED SMBUS_NOSTRETCH_ENABLE +#define SMBUS_ANALOGFILTER_ENABLED SMBUS_ANALOGFILTER_ENABLE +#define SMBUS_ANALOGFILTER_DISABLED SMBUS_ANALOGFILTER_DISABLE +#define SMBUS_PEC_DISABLED SMBUS_PEC_DISABLE +#define SMBUS_PEC_ENABLED SMBUS_PEC_ENABLE +#define HAL_SMBUS_STATE_SLAVE_LISTEN HAL_SMBUS_STATE_LISTEN +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Defines HAL SPI Aliased Defines maintained for legacy purpose + * @{ + */ +#define SPI_TIMODE_DISABLED SPI_TIMODE_DISABLE +#define SPI_TIMODE_ENABLED SPI_TIMODE_ENABLE + +#define SPI_CRCCALCULATION_DISABLED SPI_CRCCALCULATION_DISABLE +#define SPI_CRCCALCULATION_ENABLED SPI_CRCCALCULATION_ENABLE + +#define SPI_NSS_PULSE_DISABLED SPI_NSS_PULSE_DISABLE +#define SPI_NSS_PULSE_ENABLED SPI_NSS_PULSE_ENABLE + +#if defined(STM32H7) + + #define SPI_FLAG_TXE SPI_FLAG_TXP + #define SPI_FLAG_RXNE SPI_FLAG_RXP + + #define SPI_IT_TXE SPI_IT_TXP + #define SPI_IT_RXNE SPI_IT_RXP + + #define SPI_FRLVL_EMPTY SPI_RX_FIFO_0PACKET + #define SPI_FRLVL_QUARTER_FULL SPI_RX_FIFO_1PACKET + #define SPI_FRLVL_HALF_FULL SPI_RX_FIFO_2PACKET + #define SPI_FRLVL_FULL SPI_RX_FIFO_3PACKET + +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Defines HAL TIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define CCER_CCxE_MASK TIM_CCER_CCxE_MASK +#define CCER_CCxNE_MASK TIM_CCER_CCxNE_MASK + +#define TIM_DMABase_CR1 TIM_DMABASE_CR1 +#define TIM_DMABase_CR2 TIM_DMABASE_CR2 +#define TIM_DMABase_SMCR TIM_DMABASE_SMCR +#define TIM_DMABase_DIER TIM_DMABASE_DIER +#define TIM_DMABase_SR TIM_DMABASE_SR +#define TIM_DMABase_EGR TIM_DMABASE_EGR +#define TIM_DMABase_CCMR1 TIM_DMABASE_CCMR1 +#define TIM_DMABase_CCMR2 TIM_DMABASE_CCMR2 +#define TIM_DMABase_CCER TIM_DMABASE_CCER +#define TIM_DMABase_CNT TIM_DMABASE_CNT +#define TIM_DMABase_PSC TIM_DMABASE_PSC +#define TIM_DMABase_ARR TIM_DMABASE_ARR +#define TIM_DMABase_RCR TIM_DMABASE_RCR +#define TIM_DMABase_CCR1 TIM_DMABASE_CCR1 +#define TIM_DMABase_CCR2 TIM_DMABASE_CCR2 +#define TIM_DMABase_CCR3 TIM_DMABASE_CCR3 +#define TIM_DMABase_CCR4 TIM_DMABASE_CCR4 +#define TIM_DMABase_BDTR TIM_DMABASE_BDTR +#define TIM_DMABase_DCR TIM_DMABASE_DCR +#define TIM_DMABase_DMAR TIM_DMABASE_DMAR +#define TIM_DMABase_OR1 TIM_DMABASE_OR1 +#define TIM_DMABase_CCMR3 TIM_DMABASE_CCMR3 +#define TIM_DMABase_CCR5 TIM_DMABASE_CCR5 +#define TIM_DMABase_CCR6 TIM_DMABASE_CCR6 +#define TIM_DMABase_OR2 TIM_DMABASE_OR2 +#define TIM_DMABase_OR3 TIM_DMABASE_OR3 +#define TIM_DMABase_OR TIM_DMABASE_OR + +#define TIM_EventSource_Update TIM_EVENTSOURCE_UPDATE +#define TIM_EventSource_CC1 TIM_EVENTSOURCE_CC1 +#define TIM_EventSource_CC2 TIM_EVENTSOURCE_CC2 +#define TIM_EventSource_CC3 TIM_EVENTSOURCE_CC3 +#define TIM_EventSource_CC4 TIM_EVENTSOURCE_CC4 +#define TIM_EventSource_COM TIM_EVENTSOURCE_COM +#define TIM_EventSource_Trigger TIM_EVENTSOURCE_TRIGGER +#define TIM_EventSource_Break TIM_EVENTSOURCE_BREAK +#define TIM_EventSource_Break2 TIM_EVENTSOURCE_BREAK2 + +#define TIM_DMABurstLength_1Transfer TIM_DMABURSTLENGTH_1TRANSFER +#define TIM_DMABurstLength_2Transfers TIM_DMABURSTLENGTH_2TRANSFERS +#define TIM_DMABurstLength_3Transfers TIM_DMABURSTLENGTH_3TRANSFERS +#define TIM_DMABurstLength_4Transfers TIM_DMABURSTLENGTH_4TRANSFERS +#define TIM_DMABurstLength_5Transfers TIM_DMABURSTLENGTH_5TRANSFERS +#define TIM_DMABurstLength_6Transfers TIM_DMABURSTLENGTH_6TRANSFERS +#define TIM_DMABurstLength_7Transfers TIM_DMABURSTLENGTH_7TRANSFERS +#define TIM_DMABurstLength_8Transfers TIM_DMABURSTLENGTH_8TRANSFERS +#define TIM_DMABurstLength_9Transfers TIM_DMABURSTLENGTH_9TRANSFERS +#define TIM_DMABurstLength_10Transfers TIM_DMABURSTLENGTH_10TRANSFERS +#define TIM_DMABurstLength_11Transfers TIM_DMABURSTLENGTH_11TRANSFERS +#define TIM_DMABurstLength_12Transfers TIM_DMABURSTLENGTH_12TRANSFERS +#define TIM_DMABurstLength_13Transfers TIM_DMABURSTLENGTH_13TRANSFERS +#define TIM_DMABurstLength_14Transfers TIM_DMABURSTLENGTH_14TRANSFERS +#define TIM_DMABurstLength_15Transfers TIM_DMABURSTLENGTH_15TRANSFERS +#define TIM_DMABurstLength_16Transfers TIM_DMABURSTLENGTH_16TRANSFERS +#define TIM_DMABurstLength_17Transfers TIM_DMABURSTLENGTH_17TRANSFERS +#define TIM_DMABurstLength_18Transfers TIM_DMABURSTLENGTH_18TRANSFERS + +#if defined(STM32L0) +#define TIM22_TI1_GPIO1 TIM22_TI1_GPIO +#define TIM22_TI1_GPIO2 TIM22_TI1_GPIO +#endif + +#if defined(STM32F3) +#define IS_TIM_HALL_INTERFACE_INSTANCE IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE +#endif + +#if defined(STM32H7) +#define TIM_TIM1_ETR_COMP1_OUT TIM_TIM1_ETR_COMP1 +#define TIM_TIM1_ETR_COMP2_OUT TIM_TIM1_ETR_COMP2 +#define TIM_TIM8_ETR_COMP1_OUT TIM_TIM8_ETR_COMP1 +#define TIM_TIM8_ETR_COMP2_OUT TIM_TIM8_ETR_COMP2 +#define TIM_TIM2_ETR_COMP1_OUT TIM_TIM2_ETR_COMP1 +#define TIM_TIM2_ETR_COMP2_OUT TIM_TIM2_ETR_COMP2 +#define TIM_TIM3_ETR_COMP1_OUT TIM_TIM3_ETR_COMP1 +#define TIM_TIM1_TI1_COMP1_OUT TIM_TIM1_TI1_COMP1 +#define TIM_TIM8_TI1_COMP2_OUT TIM_TIM8_TI1_COMP2 +#define TIM_TIM2_TI4_COMP1_OUT TIM_TIM2_TI4_COMP1 +#define TIM_TIM2_TI4_COMP2_OUT TIM_TIM2_TI4_COMP2 +#define TIM_TIM2_TI4_COMP1COMP2_OUT TIM_TIM2_TI4_COMP1_COMP2 +#define TIM_TIM3_TI1_COMP1_OUT TIM_TIM3_TI1_COMP1 +#define TIM_TIM3_TI1_COMP2_OUT TIM_TIM3_TI1_COMP2 +#define TIM_TIM3_TI1_COMP1COMP2_OUT TIM_TIM3_TI1_COMP1_COMP2 +#endif + +/** + * @} + */ + +/** @defgroup HAL_TSC_Aliased_Defines HAL TSC Aliased Defines maintained for legacy purpose + * @{ + */ +#define TSC_SYNC_POL_FALL TSC_SYNC_POLARITY_FALLING +#define TSC_SYNC_POL_RISE_HIGH TSC_SYNC_POLARITY_RISING +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Defines HAL UART Aliased Defines maintained for legacy purpose + * @{ + */ +#define UART_ONEBIT_SAMPLING_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONEBIT_SAMPLING_ENABLED UART_ONE_BIT_SAMPLE_ENABLE +#define UART_ONE_BIT_SAMPLE_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONE_BIT_SAMPLE_ENABLED UART_ONE_BIT_SAMPLE_ENABLE + +#define __HAL_UART_ONEBIT_ENABLE __HAL_UART_ONE_BIT_SAMPLE_ENABLE +#define __HAL_UART_ONEBIT_DISABLE __HAL_UART_ONE_BIT_SAMPLE_DISABLE + +#define __DIV_SAMPLING16 UART_DIV_SAMPLING16 +#define __DIVMANT_SAMPLING16 UART_DIVMANT_SAMPLING16 +#define __DIVFRAQ_SAMPLING16 UART_DIVFRAQ_SAMPLING16 +#define __UART_BRR_SAMPLING16 UART_BRR_SAMPLING16 + +#define __DIV_SAMPLING8 UART_DIV_SAMPLING8 +#define __DIVMANT_SAMPLING8 UART_DIVMANT_SAMPLING8 +#define __DIVFRAQ_SAMPLING8 UART_DIVFRAQ_SAMPLING8 +#define __UART_BRR_SAMPLING8 UART_BRR_SAMPLING8 + +#define __DIV_LPUART UART_DIV_LPUART + +#define UART_WAKEUPMETHODE_IDLELINE UART_WAKEUPMETHOD_IDLELINE +#define UART_WAKEUPMETHODE_ADDRESSMARK UART_WAKEUPMETHOD_ADDRESSMARK + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Defines HAL USART Aliased Defines maintained for legacy purpose + * @{ + */ + +#define USART_CLOCK_DISABLED USART_CLOCK_DISABLE +#define USART_CLOCK_ENABLED USART_CLOCK_ENABLE + +#define USARTNACK_ENABLED USART_NACK_ENABLE +#define USARTNACK_DISABLED USART_NACK_DISABLE +/** + * @} + */ + +/** @defgroup HAL_WWDG_Aliased_Defines HAL WWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define CFR_BASE WWDG_CFR_BASE + +/** + * @} + */ + +/** @defgroup HAL_CAN_Aliased_Defines HAL CAN Aliased Defines maintained for legacy purpose + * @{ + */ +#define CAN_FilterFIFO0 CAN_FILTER_FIFO0 +#define CAN_FilterFIFO1 CAN_FILTER_FIFO1 +#define CAN_IT_RQCP0 CAN_IT_TME +#define CAN_IT_RQCP1 CAN_IT_TME +#define CAN_IT_RQCP2 CAN_IT_TME +#define INAK_TIMEOUT CAN_TIMEOUT_VALUE +#define SLAK_TIMEOUT CAN_TIMEOUT_VALUE +#define CAN_TXSTATUS_FAILED ((uint8_t)0x00U) +#define CAN_TXSTATUS_OK ((uint8_t)0x01U) +#define CAN_TXSTATUS_PENDING ((uint8_t)0x02U) + +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Defines HAL ETH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define VLAN_TAG ETH_VLAN_TAG +#define MIN_ETH_PAYLOAD ETH_MIN_ETH_PAYLOAD +#define MAX_ETH_PAYLOAD ETH_MAX_ETH_PAYLOAD +#define JUMBO_FRAME_PAYLOAD ETH_JUMBO_FRAME_PAYLOAD +#define MACMIIAR_CR_MASK ETH_MACMIIAR_CR_MASK +#define MACCR_CLEAR_MASK ETH_MACCR_CLEAR_MASK +#define MACFCR_CLEAR_MASK ETH_MACFCR_CLEAR_MASK +#define DMAOMR_CLEAR_MASK ETH_DMAOMR_CLEAR_MASK + +#define ETH_MMCCR 0x00000100U +#define ETH_MMCRIR 0x00000104U +#define ETH_MMCTIR 0x00000108U +#define ETH_MMCRIMR 0x0000010CU +#define ETH_MMCTIMR 0x00000110U +#define ETH_MMCTGFSCCR 0x0000014CU +#define ETH_MMCTGFMSCCR 0x00000150U +#define ETH_MMCTGFCR 0x00000168U +#define ETH_MMCRFCECR 0x00000194U +#define ETH_MMCRFAECR 0x00000198U +#define ETH_MMCRGUFCR 0x000001C4U + +#define ETH_MAC_TXFIFO_FULL 0x02000000U /* Tx FIFO full */ +#define ETH_MAC_TXFIFONOT_EMPTY 0x01000000U /* Tx FIFO not empty */ +#define ETH_MAC_TXFIFO_WRITE_ACTIVE 0x00400000U /* Tx FIFO write active */ +#define ETH_MAC_TXFIFO_IDLE 0x00000000U /* Tx FIFO read status: Idle */ +#define ETH_MAC_TXFIFO_READ 0x00100000U /* Tx FIFO read status: Read (transferring data to the MAC transmitter) */ +#define ETH_MAC_TXFIFO_WAITING 0x00200000U /* Tx FIFO read status: Waiting for TxStatus from MAC transmitter */ +#define ETH_MAC_TXFIFO_WRITING 0x00300000U /* Tx FIFO read status: Writing the received TxStatus or flushing the TxFIFO */ +#define ETH_MAC_TRANSMISSION_PAUSE 0x00080000U /* MAC transmitter in pause */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_IDLE 0x00000000U /* MAC transmit frame controller: Idle */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_WAITING 0x00020000U /* MAC transmit frame controller: Waiting for Status of previous frame or IFG/backoff period to be over */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_GENRATING_PCF 0x00040000U /* MAC transmit frame controller: Generating and transmitting a Pause control frame (in full duplex mode) */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_TRANSFERRING 0x00060000U /* MAC transmit frame controller: Transferring input frame for transmission */ +#define ETH_MAC_MII_TRANSMIT_ACTIVE 0x00010000U /* MAC MII transmit engine active */ +#define ETH_MAC_RXFIFO_EMPTY 0x00000000U /* Rx FIFO fill level: empty */ +#define ETH_MAC_RXFIFO_BELOW_THRESHOLD 0x00000100U /* Rx FIFO fill level: fill-level below flow-control de-activate threshold */ +#define ETH_MAC_RXFIFO_ABOVE_THRESHOLD 0x00000200U /* Rx FIFO fill level: fill-level above flow-control activate threshold */ +#define ETH_MAC_RXFIFO_FULL 0x00000300U /* Rx FIFO fill level: full */ +#if defined(STM32F1) +#else +#define ETH_MAC_READCONTROLLER_IDLE 0x00000000U /* Rx FIFO read controller IDLE state */ +#define ETH_MAC_READCONTROLLER_READING_DATA 0x00000020U /* Rx FIFO read controller Reading frame data */ +#define ETH_MAC_READCONTROLLER_READING_STATUS 0x00000040U /* Rx FIFO read controller Reading frame status (or time-stamp) */ +#endif +#define ETH_MAC_READCONTROLLER_FLUSHING 0x00000060U /* Rx FIFO read controller Flushing the frame data and status */ +#define ETH_MAC_RXFIFO_WRITE_ACTIVE 0x00000010U /* Rx FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_NOTACTIVE 0x00000000U /* MAC small FIFO read / write controllers not active */ +#define ETH_MAC_SMALL_FIFO_READ_ACTIVE 0x00000002U /* MAC small FIFO read controller active */ +#define ETH_MAC_SMALL_FIFO_WRITE_ACTIVE 0x00000004U /* MAC small FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_RW_ACTIVE 0x00000006U /* MAC small FIFO read / write controllers active */ +#define ETH_MAC_MII_RECEIVE_PROTOCOL_ACTIVE 0x00000001U /* MAC MII receive protocol engine active */ + +/** + * @} + */ + +/** @defgroup HAL_DCMI_Aliased_Defines HAL DCMI Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_DCMI_ERROR_OVF HAL_DCMI_ERROR_OVR +#define DCMI_IT_OVF DCMI_IT_OVR +#define DCMI_FLAG_OVFRI DCMI_FLAG_OVRRI +#define DCMI_FLAG_OVFMI DCMI_FLAG_OVRMI + +#define HAL_DCMI_ConfigCROP HAL_DCMI_ConfigCrop +#define HAL_DCMI_EnableCROP HAL_DCMI_EnableCrop +#define HAL_DCMI_DisableCROP HAL_DCMI_DisableCrop + +/** + * @} + */ + +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) +/** @defgroup HAL_DMA2D_Aliased_Defines HAL DMA2D Aliased Defines maintained for legacy purpose + * @{ + */ +#define DMA2D_ARGB8888 DMA2D_OUTPUT_ARGB8888 +#define DMA2D_RGB888 DMA2D_OUTPUT_RGB888 +#define DMA2D_RGB565 DMA2D_OUTPUT_RGB565 +#define DMA2D_ARGB1555 DMA2D_OUTPUT_ARGB1555 +#define DMA2D_ARGB4444 DMA2D_OUTPUT_ARGB4444 + +#define CM_ARGB8888 DMA2D_INPUT_ARGB8888 +#define CM_RGB888 DMA2D_INPUT_RGB888 +#define CM_RGB565 DMA2D_INPUT_RGB565 +#define CM_ARGB1555 DMA2D_INPUT_ARGB1555 +#define CM_ARGB4444 DMA2D_INPUT_ARGB4444 +#define CM_L8 DMA2D_INPUT_L8 +#define CM_AL44 DMA2D_INPUT_AL44 +#define CM_AL88 DMA2D_INPUT_AL88 +#define CM_L4 DMA2D_INPUT_L4 +#define CM_A8 DMA2D_INPUT_A8 +#define CM_A4 DMA2D_INPUT_A4 +/** + * @} + */ +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 */ + +/** @defgroup HAL_PPP_Aliased_Defines HAL PPP Aliased Defines maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup HAL_CRYP_Aliased_Functions HAL CRYP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_CRYP_ComputationCpltCallback HAL_CRYPEx_ComputationCpltCallback +/** + * @} + */ + +/** @defgroup HAL_HASH_Aliased_Functions HAL HASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_HASH_STATETypeDef HAL_HASH_StateTypeDef +#define HAL_HASHPhaseTypeDef HAL_HASH_PhaseTypeDef +#define HAL_HMAC_MD5_Finish HAL_HASH_MD5_Finish +#define HAL_HMAC_SHA1_Finish HAL_HASH_SHA1_Finish +#define HAL_HMAC_SHA224_Finish HAL_HASH_SHA224_Finish +#define HAL_HMAC_SHA256_Finish HAL_HASH_SHA256_Finish + +/*HASH Algorithm Selection*/ + +#define HASH_AlgoSelection_SHA1 HASH_ALGOSELECTION_SHA1 +#define HASH_AlgoSelection_SHA224 HASH_ALGOSELECTION_SHA224 +#define HASH_AlgoSelection_SHA256 HASH_ALGOSELECTION_SHA256 +#define HASH_AlgoSelection_MD5 HASH_ALGOSELECTION_MD5 + +#define HASH_AlgoMode_HASH HASH_ALGOMODE_HASH +#define HASH_AlgoMode_HMAC HASH_ALGOMODE_HMAC + +#define HASH_HMACKeyType_ShortKey HASH_HMAC_KEYTYPE_SHORTKEY +#define HASH_HMACKeyType_LongKey HASH_HMAC_KEYTYPE_LONGKEY + +#if defined(STM32L4) || defined(STM32F4) || defined(STM32F7) || defined(STM32H7) + +#define HAL_HASH_MD5_Accumulate HAL_HASH_MD5_Accmlt +#define HAL_HASH_MD5_Accumulate_End HAL_HASH_MD5_Accmlt_End +#define HAL_HASH_MD5_Accumulate_IT HAL_HASH_MD5_Accmlt_IT +#define HAL_HASH_MD5_Accumulate_End_IT HAL_HASH_MD5_Accmlt_End_IT + +#define HAL_HASH_SHA1_Accumulate HAL_HASH_SHA1_Accmlt +#define HAL_HASH_SHA1_Accumulate_End HAL_HASH_SHA1_Accmlt_End +#define HAL_HASH_SHA1_Accumulate_IT HAL_HASH_SHA1_Accmlt_IT +#define HAL_HASH_SHA1_Accumulate_End_IT HAL_HASH_SHA1_Accmlt_End_IT + +#define HAL_HASHEx_SHA224_Accumulate HAL_HASHEx_SHA224_Accmlt +#define HAL_HASHEx_SHA224_Accumulate_End HAL_HASHEx_SHA224_Accmlt_End +#define HAL_HASHEx_SHA224_Accumulate_IT HAL_HASHEx_SHA224_Accmlt_IT +#define HAL_HASHEx_SHA224_Accumulate_End_IT HAL_HASHEx_SHA224_Accmlt_End_IT + +#define HAL_HASHEx_SHA256_Accumulate HAL_HASHEx_SHA256_Accmlt +#define HAL_HASHEx_SHA256_Accumulate_End HAL_HASHEx_SHA256_Accmlt_End +#define HAL_HASHEx_SHA256_Accumulate_IT HAL_HASHEx_SHA256_Accmlt_IT +#define HAL_HASHEx_SHA256_Accumulate_End_IT HAL_HASHEx_SHA256_Accmlt_End_IT + +#endif /* STM32L4 || STM32F4 || STM32F7 || STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_Aliased_Functions HAL Generic Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_EnableDBGSleepMode HAL_DBGMCU_EnableDBGSleepMode +#define HAL_DisableDBGSleepMode HAL_DBGMCU_DisableDBGSleepMode +#define HAL_EnableDBGStopMode HAL_DBGMCU_EnableDBGStopMode +#define HAL_DisableDBGStopMode HAL_DBGMCU_DisableDBGStopMode +#define HAL_EnableDBGStandbyMode HAL_DBGMCU_EnableDBGStandbyMode +#define HAL_DisableDBGStandbyMode HAL_DBGMCU_DisableDBGStandbyMode +#define HAL_DBG_LowPowerConfig(Periph, cmd) (((cmd)==ENABLE)? HAL_DBGMCU_DBG_EnableLowPowerConfig(Periph) : HAL_DBGMCU_DBG_DisableLowPowerConfig(Periph)) +#define HAL_VREFINT_OutputSelect HAL_SYSCFG_VREFINT_OutputSelect +#define HAL_Lock_Cmd(cmd) (((cmd)==ENABLE) ? HAL_SYSCFG_Enable_Lock_VREFINT() : HAL_SYSCFG_Disable_Lock_VREFINT()) +#if defined(STM32L0) +#else +#define HAL_VREFINT_Cmd(cmd) (((cmd)==ENABLE)? HAL_SYSCFG_EnableVREFINT() : HAL_SYSCFG_DisableVREFINT()) +#endif +#define HAL_ADC_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINT() : HAL_ADCEx_DisableVREFINT()) +#define HAL_ADC_EnableBufferSensor_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINTTempSensor() : HAL_ADCEx_DisableVREFINTTempSensor()) +#if defined(STM32H7A3xx) || defined(STM32H7B3xx) || defined(STM32H7B0xx) || defined(STM32H7A3xxQ) || defined(STM32H7B3xxQ) || defined(STM32H7B0xxQ) +#define HAL_EnableSRDomainDBGStopMode HAL_EnableDomain3DBGStopMode +#define HAL_DisableSRDomainDBGStopMode HAL_DisableDomain3DBGStopMode +#define HAL_EnableSRDomainDBGStandbyMode HAL_EnableDomain3DBGStandbyMode +#define HAL_DisableSRDomainDBGStandbyMode HAL_DisableDomain3DBGStandbyMode +#endif /* STM32H7A3xx || STM32H7B3xx || STM32H7B0xx || STM32H7A3xxQ || STM32H7B3xxQ || STM32H7B0xxQ */ + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Functions HAL FLASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define FLASH_HalfPageProgram HAL_FLASHEx_HalfPageProgram +#define FLASH_EnableRunPowerDown HAL_FLASHEx_EnableRunPowerDown +#define FLASH_DisableRunPowerDown HAL_FLASHEx_DisableRunPowerDown +#define HAL_DATA_EEPROMEx_Unlock HAL_FLASHEx_DATAEEPROM_Unlock +#define HAL_DATA_EEPROMEx_Lock HAL_FLASHEx_DATAEEPROM_Lock +#define HAL_DATA_EEPROMEx_Erase HAL_FLASHEx_DATAEEPROM_Erase +#define HAL_DATA_EEPROMEx_Program HAL_FLASHEx_DATAEEPROM_Program + + /** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Functions HAL I2C Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_I2CEx_AnalogFilter_Config HAL_I2CEx_ConfigAnalogFilter +#define HAL_I2CEx_DigitalFilter_Config HAL_I2CEx_ConfigDigitalFilter +#define HAL_FMPI2CEx_AnalogFilter_Config HAL_FMPI2CEx_ConfigAnalogFilter +#define HAL_FMPI2CEx_DigitalFilter_Config HAL_FMPI2CEx_ConfigDigitalFilter + +#define HAL_I2CFastModePlusConfig(SYSCFG_I2CFastModePlus, cmd) (((cmd)==ENABLE)? HAL_I2CEx_EnableFastModePlus(SYSCFG_I2CFastModePlus): HAL_I2CEx_DisableFastModePlus(SYSCFG_I2CFastModePlus)) + +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4) +#define HAL_I2C_Master_Sequential_Transmit_IT HAL_I2C_Master_Seq_Transmit_IT +#define HAL_I2C_Master_Sequential_Receive_IT HAL_I2C_Master_Seq_Receive_IT +#define HAL_I2C_Slave_Sequential_Transmit_IT HAL_I2C_Slave_Seq_Transmit_IT +#define HAL_I2C_Slave_Sequential_Receive_IT HAL_I2C_Slave_Seq_Receive_IT +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 */ +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4) +#define HAL_I2C_Master_Sequential_Transmit_DMA HAL_I2C_Master_Seq_Transmit_DMA +#define HAL_I2C_Master_Sequential_Receive_DMA HAL_I2C_Master_Seq_Receive_DMA +#define HAL_I2C_Slave_Sequential_Transmit_DMA HAL_I2C_Slave_Seq_Transmit_DMA +#define HAL_I2C_Slave_Sequential_Receive_DMA HAL_I2C_Slave_Seq_Receive_DMA +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 */ + +#if defined(STM32F4) +#define HAL_FMPI2C_Master_Sequential_Transmit_IT HAL_FMPI2C_Master_Seq_Transmit_IT +#define HAL_FMPI2C_Master_Sequential_Receive_IT HAL_FMPI2C_Master_Seq_Receive_IT +#define HAL_FMPI2C_Slave_Sequential_Transmit_IT HAL_FMPI2C_Slave_Seq_Transmit_IT +#define HAL_FMPI2C_Slave_Sequential_Receive_IT HAL_FMPI2C_Slave_Seq_Receive_IT +#define HAL_FMPI2C_Master_Sequential_Transmit_DMA HAL_FMPI2C_Master_Seq_Transmit_DMA +#define HAL_FMPI2C_Master_Sequential_Receive_DMA HAL_FMPI2C_Master_Seq_Receive_DMA +#define HAL_FMPI2C_Slave_Sequential_Transmit_DMA HAL_FMPI2C_Slave_Seq_Transmit_DMA +#define HAL_FMPI2C_Slave_Sequential_Receive_DMA HAL_FMPI2C_Slave_Seq_Receive_DMA +#endif /* STM32F4 */ + /** + * @} + */ + +/** @defgroup HAL_PWR_Aliased HAL PWR Aliased maintained for legacy purpose + * @{ + */ + +#if defined(STM32G0) +#define HAL_PWR_ConfigPVD HAL_PWREx_ConfigPVD +#define HAL_PWR_EnablePVD HAL_PWREx_EnablePVD +#define HAL_PWR_DisablePVD HAL_PWREx_DisablePVD +#define HAL_PWR_PVD_IRQHandler HAL_PWREx_PVD_IRQHandler +#endif +#define HAL_PWR_PVDConfig HAL_PWR_ConfigPVD +#define HAL_PWR_DisableBkUpReg HAL_PWREx_DisableBkUpReg +#define HAL_PWR_DisableFlashPowerDown HAL_PWREx_DisableFlashPowerDown +#define HAL_PWR_DisableVddio2Monitor HAL_PWREx_DisableVddio2Monitor +#define HAL_PWR_EnableBkUpReg HAL_PWREx_EnableBkUpReg +#define HAL_PWR_EnableFlashPowerDown HAL_PWREx_EnableFlashPowerDown +#define HAL_PWR_EnableVddio2Monitor HAL_PWREx_EnableVddio2Monitor +#define HAL_PWR_PVD_PVM_IRQHandler HAL_PWREx_PVD_PVM_IRQHandler +#define HAL_PWR_PVDLevelConfig HAL_PWR_ConfigPVD +#define HAL_PWR_Vddio2Monitor_IRQHandler HAL_PWREx_Vddio2Monitor_IRQHandler +#define HAL_PWR_Vddio2MonitorCallback HAL_PWREx_Vddio2MonitorCallback +#define HAL_PWREx_ActivateOverDrive HAL_PWREx_EnableOverDrive +#define HAL_PWREx_DeactivateOverDrive HAL_PWREx_DisableOverDrive +#define HAL_PWREx_DisableSDADCAnalog HAL_PWREx_DisableSDADC +#define HAL_PWREx_EnableSDADCAnalog HAL_PWREx_EnableSDADC +#define HAL_PWREx_PVMConfig HAL_PWREx_ConfigPVM + +#define PWR_MODE_NORMAL PWR_PVD_MODE_NORMAL +#define PWR_MODE_IT_RISING PWR_PVD_MODE_IT_RISING +#define PWR_MODE_IT_FALLING PWR_PVD_MODE_IT_FALLING +#define PWR_MODE_IT_RISING_FALLING PWR_PVD_MODE_IT_RISING_FALLING +#define PWR_MODE_EVENT_RISING PWR_PVD_MODE_EVENT_RISING +#define PWR_MODE_EVENT_FALLING PWR_PVD_MODE_EVENT_FALLING +#define PWR_MODE_EVENT_RISING_FALLING PWR_PVD_MODE_EVENT_RISING_FALLING + +#define CR_OFFSET_BB PWR_CR_OFFSET_BB +#define CSR_OFFSET_BB PWR_CSR_OFFSET_BB +#define PMODE_BIT_NUMBER VOS_BIT_NUMBER +#define CR_PMODE_BB CR_VOS_BB + +#define DBP_BitNumber DBP_BIT_NUMBER +#define PVDE_BitNumber PVDE_BIT_NUMBER +#define PMODE_BitNumber PMODE_BIT_NUMBER +#define EWUP_BitNumber EWUP_BIT_NUMBER +#define FPDS_BitNumber FPDS_BIT_NUMBER +#define ODEN_BitNumber ODEN_BIT_NUMBER +#define ODSWEN_BitNumber ODSWEN_BIT_NUMBER +#define MRLVDS_BitNumber MRLVDS_BIT_NUMBER +#define LPLVDS_BitNumber LPLVDS_BIT_NUMBER +#define BRE_BitNumber BRE_BIT_NUMBER + +#define PWR_MODE_EVT PWR_PVD_MODE_NORMAL + + /** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Functions HAL SMBUS Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SMBUS_Slave_Listen_IT HAL_SMBUS_EnableListen_IT +#define HAL_SMBUS_SlaveAddrCallback HAL_SMBUS_AddrCallback +#define HAL_SMBUS_SlaveListenCpltCallback HAL_SMBUS_ListenCpltCallback +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Functions HAL SPI Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SPI_FlushRxFifo HAL_SPIEx_FlushRxFifo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Functions HAL TIM Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_TIM_DMADelayPulseCplt TIM_DMADelayPulseCplt +#define HAL_TIM_DMAError TIM_DMAError +#define HAL_TIM_DMACaptureCplt TIM_DMACaptureCplt +#define HAL_TIMEx_DMACommutationCplt TIMEx_DMACommutationCplt +#if defined(STM32H7) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) +#define HAL_TIM_SlaveConfigSynchronization HAL_TIM_SlaveConfigSynchro +#define HAL_TIM_SlaveConfigSynchronization_IT HAL_TIM_SlaveConfigSynchro_IT +#define HAL_TIMEx_CommutationCallback HAL_TIMEx_CommutCallback +#define HAL_TIMEx_ConfigCommutationEvent HAL_TIMEx_ConfigCommutEvent +#define HAL_TIMEx_ConfigCommutationEvent_IT HAL_TIMEx_ConfigCommutEvent_IT +#define HAL_TIMEx_ConfigCommutationEvent_DMA HAL_TIMEx_ConfigCommutEvent_DMA +#endif /* STM32H7 || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 */ +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Functions HAL UART Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_UART_WakeupCallback HAL_UARTEx_WakeupCallback +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Functions HAL LTDC Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_LTDC_LineEvenCallback HAL_LTDC_LineEventCallback +#define HAL_LTDC_Relaod HAL_LTDC_Reload +#define HAL_LTDC_StructInitFromVideoConfig HAL_LTDCEx_StructInitFromVideoConfig +#define HAL_LTDC_StructInitFromAdaptedCommandConfig HAL_LTDCEx_StructInitFromAdaptedCommandConfig +/** + * @} + */ + + +/** @defgroup HAL_PPP_Aliased_Functions HAL PPP Aliased Functions maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported macros ------------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Macros HAL CRYP Aliased Macros maintained for legacy purpose + * @{ + */ +#define AES_IT_CC CRYP_IT_CC +#define AES_IT_ERR CRYP_IT_ERR +#define AES_FLAG_CCF CRYP_FLAG_CCF +/** + * @} + */ + +/** @defgroup HAL_Aliased_Macros HAL Generic Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_GET_BOOT_MODE __HAL_SYSCFG_GET_BOOT_MODE +#define __HAL_REMAPMEMORY_FLASH __HAL_SYSCFG_REMAPMEMORY_FLASH +#define __HAL_REMAPMEMORY_SYSTEMFLASH __HAL_SYSCFG_REMAPMEMORY_SYSTEMFLASH +#define __HAL_REMAPMEMORY_SRAM __HAL_SYSCFG_REMAPMEMORY_SRAM +#define __HAL_REMAPMEMORY_FMC __HAL_SYSCFG_REMAPMEMORY_FMC +#define __HAL_REMAPMEMORY_FMC_SDRAM __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM +#define __HAL_REMAPMEMORY_FSMC __HAL_SYSCFG_REMAPMEMORY_FSMC +#define __HAL_REMAPMEMORY_QUADSPI __HAL_SYSCFG_REMAPMEMORY_QUADSPI +#define __HAL_FMC_BANK __HAL_SYSCFG_FMC_BANK +#define __HAL_GET_FLAG __HAL_SYSCFG_GET_FLAG +#define __HAL_CLEAR_FLAG __HAL_SYSCFG_CLEAR_FLAG +#define __HAL_VREFINT_OUT_ENABLE __HAL_SYSCFG_VREFINT_OUT_ENABLE +#define __HAL_VREFINT_OUT_DISABLE __HAL_SYSCFG_VREFINT_OUT_DISABLE +#define __HAL_SYSCFG_SRAM2_WRP_ENABLE __HAL_SYSCFG_SRAM2_WRP_0_31_ENABLE + +#define SYSCFG_FLAG_VREF_READY SYSCFG_FLAG_VREFINT_READY +#define SYSCFG_FLAG_RC48 RCC_FLAG_HSI48 +#define IS_SYSCFG_FASTMODEPLUS_CONFIG IS_I2C_FASTMODEPLUS +#define UFB_MODE_BitNumber UFB_MODE_BIT_NUMBER +#define CMP_PD_BitNumber CMP_PD_BIT_NUMBER + +/** + * @} + */ + + +/** @defgroup HAL_ADC_Aliased_Macros HAL ADC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __ADC_ENABLE __HAL_ADC_ENABLE +#define __ADC_DISABLE __HAL_ADC_DISABLE +#define __HAL_ADC_ENABLING_CONDITIONS ADC_ENABLING_CONDITIONS +#define __HAL_ADC_DISABLING_CONDITIONS ADC_DISABLING_CONDITIONS +#define __HAL_ADC_IS_ENABLED ADC_IS_ENABLE +#define __ADC_IS_ENABLED ADC_IS_ENABLE +#define __HAL_ADC_IS_SOFTWARE_START_REGULAR ADC_IS_SOFTWARE_START_REGULAR +#define __HAL_ADC_IS_SOFTWARE_START_INJECTED ADC_IS_SOFTWARE_START_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR ADC_IS_CONVERSION_ONGOING_REGULAR +#define __HAL_ADC_IS_CONVERSION_ONGOING_INJECTED ADC_IS_CONVERSION_ONGOING_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING ADC_IS_CONVERSION_ONGOING +#define __HAL_ADC_CLEAR_ERRORCODE ADC_CLEAR_ERRORCODE + +#define __HAL_ADC_GET_RESOLUTION ADC_GET_RESOLUTION +#define __HAL_ADC_JSQR_RK ADC_JSQR_RK +#define __HAL_ADC_CFGR_AWD1CH ADC_CFGR_AWD1CH_SHIFT +#define __HAL_ADC_CFGR_AWD23CR ADC_CFGR_AWD23CR +#define __HAL_ADC_CFGR_INJECT_AUTO_CONVERSION ADC_CFGR_INJECT_AUTO_CONVERSION +#define __HAL_ADC_CFGR_INJECT_CONTEXT_QUEUE ADC_CFGR_INJECT_CONTEXT_QUEUE +#define __HAL_ADC_CFGR_INJECT_DISCCONTINUOUS ADC_CFGR_INJECT_DISCCONTINUOUS +#define __HAL_ADC_CFGR_REG_DISCCONTINUOUS ADC_CFGR_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR_DISCONTINUOUS_NUM ADC_CFGR_DISCONTINUOUS_NUM +#define __HAL_ADC_CFGR_AUTOWAIT ADC_CFGR_AUTOWAIT +#define __HAL_ADC_CFGR_CONTINUOUS ADC_CFGR_CONTINUOUS +#define __HAL_ADC_CFGR_OVERRUN ADC_CFGR_OVERRUN +#define __HAL_ADC_CFGR_DMACONTREQ ADC_CFGR_DMACONTREQ +#define __HAL_ADC_CFGR_EXTSEL ADC_CFGR_EXTSEL_SET +#define __HAL_ADC_JSQR_JEXTSEL ADC_JSQR_JEXTSEL_SET +#define __HAL_ADC_OFR_CHANNEL ADC_OFR_CHANNEL +#define __HAL_ADC_DIFSEL_CHANNEL ADC_DIFSEL_CHANNEL +#define __HAL_ADC_CALFACT_DIFF_SET ADC_CALFACT_DIFF_SET +#define __HAL_ADC_CALFACT_DIFF_GET ADC_CALFACT_DIFF_GET +#define __HAL_ADC_TRX_HIGHTHRESHOLD ADC_TRX_HIGHTHRESHOLD + +#define __HAL_ADC_OFFSET_SHIFT_RESOLUTION ADC_OFFSET_SHIFT_RESOLUTION +#define __HAL_ADC_AWD1THRESHOLD_SHIFT_RESOLUTION ADC_AWD1THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_AWD23THRESHOLD_SHIFT_RESOLUTION ADC_AWD23THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_COMMON_REGISTER ADC_COMMON_REGISTER +#define __HAL_ADC_COMMON_CCR_MULTI ADC_COMMON_CCR_MULTI +#define __HAL_ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __HAL_ADC_NONMULTIMODE_OR_MULTIMODEMASTER ADC_NONMULTIMODE_OR_MULTIMODEMASTER +#define __HAL_ADC_COMMON_ADC_OTHER ADC_COMMON_ADC_OTHER +#define __HAL_ADC_MULTI_SLAVE ADC_MULTI_SLAVE + +#define __HAL_ADC_SQR1_L ADC_SQR1_L_SHIFT +#define __HAL_ADC_JSQR_JL ADC_JSQR_JL_SHIFT +#define __HAL_ADC_JSQR_RK_JL ADC_JSQR_RK_JL +#define __HAL_ADC_CR1_DISCONTINUOUS_NUM ADC_CR1_DISCONTINUOUS_NUM +#define __HAL_ADC_CR1_SCAN ADC_CR1_SCAN_SET +#define __HAL_ADC_CONVCYCLES_MAX_RANGE ADC_CONVCYCLES_MAX_RANGE +#define __HAL_ADC_CLOCK_PRESCALER_RANGE ADC_CLOCK_PRESCALER_RANGE +#define __HAL_ADC_GET_CLOCK_PRESCALER ADC_GET_CLOCK_PRESCALER + +#define __HAL_ADC_SQR1 ADC_SQR1 +#define __HAL_ADC_SMPR1 ADC_SMPR1 +#define __HAL_ADC_SMPR2 ADC_SMPR2 +#define __HAL_ADC_SQR3_RK ADC_SQR3_RK +#define __HAL_ADC_SQR2_RK ADC_SQR2_RK +#define __HAL_ADC_SQR1_RK ADC_SQR1_RK +#define __HAL_ADC_CR2_CONTINUOUS ADC_CR2_CONTINUOUS +#define __HAL_ADC_CR1_DISCONTINUOUS ADC_CR1_DISCONTINUOUS +#define __HAL_ADC_CR1_SCANCONV ADC_CR1_SCANCONV +#define __HAL_ADC_CR2_EOCSelection ADC_CR2_EOCSelection +#define __HAL_ADC_CR2_DMAContReq ADC_CR2_DMAContReq +#define __HAL_ADC_JSQR ADC_JSQR + +#define __HAL_ADC_CHSELR_CHANNEL ADC_CHSELR_CHANNEL +#define __HAL_ADC_CFGR1_REG_DISCCONTINUOUS ADC_CFGR1_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR1_AUTOOFF ADC_CFGR1_AUTOOFF +#define __HAL_ADC_CFGR1_AUTOWAIT ADC_CFGR1_AUTOWAIT +#define __HAL_ADC_CFGR1_CONTINUOUS ADC_CFGR1_CONTINUOUS +#define __HAL_ADC_CFGR1_OVERRUN ADC_CFGR1_OVERRUN +#define __HAL_ADC_CFGR1_SCANDIR ADC_CFGR1_SCANDIR +#define __HAL_ADC_CFGR1_DMACONTREQ ADC_CFGR1_DMACONTREQ + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_DHR12R1_ALIGNEMENT DAC_DHR12R1_ALIGNMENT +#define __HAL_DHR12R2_ALIGNEMENT DAC_DHR12R2_ALIGNMENT +#define __HAL_DHR12RD_ALIGNEMENT DAC_DHR12RD_ALIGNMENT +#define IS_DAC_GENERATE_WAVE IS_DAC_WAVE + +/** + * @} + */ + +/** @defgroup HAL_DBGMCU_Aliased_Macros HAL DBGMCU Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_FREEZE_TIM1_DBGMCU __HAL_DBGMCU_FREEZE_TIM1 +#define __HAL_UNFREEZE_TIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM1 +#define __HAL_FREEZE_TIM2_DBGMCU __HAL_DBGMCU_FREEZE_TIM2 +#define __HAL_UNFREEZE_TIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM2 +#define __HAL_FREEZE_TIM3_DBGMCU __HAL_DBGMCU_FREEZE_TIM3 +#define __HAL_UNFREEZE_TIM3_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM3 +#define __HAL_FREEZE_TIM4_DBGMCU __HAL_DBGMCU_FREEZE_TIM4 +#define __HAL_UNFREEZE_TIM4_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM4 +#define __HAL_FREEZE_TIM5_DBGMCU __HAL_DBGMCU_FREEZE_TIM5 +#define __HAL_UNFREEZE_TIM5_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM5 +#define __HAL_FREEZE_TIM6_DBGMCU __HAL_DBGMCU_FREEZE_TIM6 +#define __HAL_UNFREEZE_TIM6_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM6 +#define __HAL_FREEZE_TIM7_DBGMCU __HAL_DBGMCU_FREEZE_TIM7 +#define __HAL_UNFREEZE_TIM7_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM7 +#define __HAL_FREEZE_TIM8_DBGMCU __HAL_DBGMCU_FREEZE_TIM8 +#define __HAL_UNFREEZE_TIM8_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM8 + +#define __HAL_FREEZE_TIM9_DBGMCU __HAL_DBGMCU_FREEZE_TIM9 +#define __HAL_UNFREEZE_TIM9_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM9 +#define __HAL_FREEZE_TIM10_DBGMCU __HAL_DBGMCU_FREEZE_TIM10 +#define __HAL_UNFREEZE_TIM10_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM10 +#define __HAL_FREEZE_TIM11_DBGMCU __HAL_DBGMCU_FREEZE_TIM11 +#define __HAL_UNFREEZE_TIM11_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM11 +#define __HAL_FREEZE_TIM12_DBGMCU __HAL_DBGMCU_FREEZE_TIM12 +#define __HAL_UNFREEZE_TIM12_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM12 +#define __HAL_FREEZE_TIM13_DBGMCU __HAL_DBGMCU_FREEZE_TIM13 +#define __HAL_UNFREEZE_TIM13_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM13 +#define __HAL_FREEZE_TIM14_DBGMCU __HAL_DBGMCU_FREEZE_TIM14 +#define __HAL_UNFREEZE_TIM14_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM14 +#define __HAL_FREEZE_CAN2_DBGMCU __HAL_DBGMCU_FREEZE_CAN2 +#define __HAL_UNFREEZE_CAN2_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN2 + + +#define __HAL_FREEZE_TIM15_DBGMCU __HAL_DBGMCU_FREEZE_TIM15 +#define __HAL_UNFREEZE_TIM15_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM15 +#define __HAL_FREEZE_TIM16_DBGMCU __HAL_DBGMCU_FREEZE_TIM16 +#define __HAL_UNFREEZE_TIM16_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM16 +#define __HAL_FREEZE_TIM17_DBGMCU __HAL_DBGMCU_FREEZE_TIM17 +#define __HAL_UNFREEZE_TIM17_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM17 +#define __HAL_FREEZE_RTC_DBGMCU __HAL_DBGMCU_FREEZE_RTC +#define __HAL_UNFREEZE_RTC_DBGMCU __HAL_DBGMCU_UNFREEZE_RTC +#if defined(STM32H7) + #define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG1 + #define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UnFreeze_WWDG1 + #define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG1 + #define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UnFreeze_IWDG1 +#else + #define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG + #define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_WWDG + #define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG + #define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_IWDG +#endif /* STM32H7 */ +#define __HAL_FREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT +#define __HAL_UNFREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT +#define __HAL_FREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT +#define __HAL_UNFREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT +#define __HAL_FREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT +#define __HAL_UNFREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT +#define __HAL_FREEZE_CAN1_DBGMCU __HAL_DBGMCU_FREEZE_CAN1 +#define __HAL_UNFREEZE_CAN1_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN1 +#define __HAL_FREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM1 +#define __HAL_UNFREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM1 +#define __HAL_FREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM2 +#define __HAL_UNFREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM2 + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Macros HAL COMP Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32F3) +#define COMP_START __HAL_COMP_ENABLE +#define COMP_STOP __HAL_COMP_DISABLE +#define COMP_LOCK __HAL_COMP_LOCK + +#if defined(STM32F301x8) || defined(STM32F302x8) || defined(STM32F318xx) || defined(STM32F303x8) || defined(STM32F334x8) || defined(STM32F328xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F302xE) || defined(STM32F302xC) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F303xE) || defined(STM32F398xx) || defined(STM32F303xC) || defined(STM32F358xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP7_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP7_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F373xC) ||defined(STM32F378xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +# endif +#else +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +#endif + +#define __HAL_COMP_GET_EXTI_LINE COMP_GET_EXTI_LINE + +#if defined(STM32L0) || defined(STM32L4) +/* Note: On these STM32 families, the only argument of this macro */ +/* is COMP_FLAG_LOCK. */ +/* This macro is replaced by __HAL_COMP_IS_LOCKED with only HAL handle */ +/* argument. */ +#define __HAL_COMP_GET_FLAG(__HANDLE__, __FLAG__) (__HAL_COMP_IS_LOCKED(__HANDLE__)) +#endif +/** + * @} + */ + +#if defined(STM32L0) || defined(STM32L4) +/** @defgroup HAL_COMP_Aliased_Functions HAL COMP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_COMP_Start_IT HAL_COMP_Start /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ +#define HAL_COMP_Stop_IT HAL_COMP_Stop /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ +/** + * @} + */ +#endif + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_DAC_WAVE(WAVE) (((WAVE) == DAC_WAVE_NONE) || \ + ((WAVE) == DAC_WAVE_NOISE)|| \ + ((WAVE) == DAC_WAVE_TRIANGLE)) + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Macros HAL FLASH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_WRPAREA IS_OB_WRPAREA +#define IS_TYPEPROGRAM IS_FLASH_TYPEPROGRAM +#define IS_TYPEPROGRAMFLASH IS_FLASH_TYPEPROGRAM +#define IS_TYPEERASE IS_FLASH_TYPEERASE +#define IS_NBSECTORS IS_FLASH_NBSECTORS +#define IS_OB_WDG_SOURCE IS_OB_IWDG_SOURCE + +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Macros HAL I2C Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_I2C_RESET_CR2 I2C_RESET_CR2 +#define __HAL_I2C_GENERATE_START I2C_GENERATE_START +#if defined(STM32F1) +#define __HAL_I2C_FREQ_RANGE I2C_FREQRANGE +#else +#define __HAL_I2C_FREQ_RANGE I2C_FREQ_RANGE +#endif /* STM32F1 */ +#define __HAL_I2C_RISE_TIME I2C_RISE_TIME +#define __HAL_I2C_SPEED_STANDARD I2C_SPEED_STANDARD +#define __HAL_I2C_SPEED_FAST I2C_SPEED_FAST +#define __HAL_I2C_SPEED I2C_SPEED +#define __HAL_I2C_7BIT_ADD_WRITE I2C_7BIT_ADD_WRITE +#define __HAL_I2C_7BIT_ADD_READ I2C_7BIT_ADD_READ +#define __HAL_I2C_10BIT_ADDRESS I2C_10BIT_ADDRESS +#define __HAL_I2C_10BIT_HEADER_WRITE I2C_10BIT_HEADER_WRITE +#define __HAL_I2C_10BIT_HEADER_READ I2C_10BIT_HEADER_READ +#define __HAL_I2C_MEM_ADD_MSB I2C_MEM_ADD_MSB +#define __HAL_I2C_MEM_ADD_LSB I2C_MEM_ADD_LSB +#define __HAL_I2C_FREQRANGE I2C_FREQRANGE +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Macros HAL I2S Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_I2S_INSTANCE IS_I2S_ALL_INSTANCE +#define IS_I2S_INSTANCE_EXT IS_I2S_ALL_INSTANCE_EXT + +#if defined(STM32H7) + #define __HAL_I2S_CLEAR_FREFLAG __HAL_I2S_CLEAR_TIFREFLAG +#endif + +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Macros HAL IRDA Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __IRDA_DISABLE __HAL_IRDA_DISABLE +#define __IRDA_ENABLE __HAL_IRDA_ENABLE + +#define __HAL_IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __HAL_IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION +#define __IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION + +#define IS_IRDA_ONEBIT_SAMPLE IS_IRDA_ONE_BIT_SAMPLE + + +/** + * @} + */ + + +/** @defgroup HAL_IWDG_Aliased_Macros HAL IWDG Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_IWDG_ENABLE_WRITE_ACCESS IWDG_ENABLE_WRITE_ACCESS +#define __HAL_IWDG_DISABLE_WRITE_ACCESS IWDG_DISABLE_WRITE_ACCESS +/** + * @} + */ + + +/** @defgroup HAL_LPTIM_Aliased_Macros HAL LPTIM Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_LPTIM_ENABLE_INTERRUPT __HAL_LPTIM_ENABLE_IT +#define __HAL_LPTIM_DISABLE_INTERRUPT __HAL_LPTIM_DISABLE_IT +#define __HAL_LPTIM_GET_ITSTATUS __HAL_LPTIM_GET_IT_SOURCE + +/** + * @} + */ + + +/** @defgroup HAL_OPAMP_Aliased_Macros HAL OPAMP Aliased Macros maintained for legacy purpose + * @{ + */ +#define __OPAMP_CSR_OPAXPD OPAMP_CSR_OPAXPD +#define __OPAMP_CSR_S3SELX OPAMP_CSR_S3SELX +#define __OPAMP_CSR_S4SELX OPAMP_CSR_S4SELX +#define __OPAMP_CSR_S5SELX OPAMP_CSR_S5SELX +#define __OPAMP_CSR_S6SELX OPAMP_CSR_S6SELX +#define __OPAMP_CSR_OPAXCAL_L OPAMP_CSR_OPAXCAL_L +#define __OPAMP_CSR_OPAXCAL_H OPAMP_CSR_OPAXCAL_H +#define __OPAMP_CSR_OPAXLPM OPAMP_CSR_OPAXLPM +#define __OPAMP_CSR_ALL_SWITCHES OPAMP_CSR_ALL_SWITCHES +#define __OPAMP_CSR_ANAWSELX OPAMP_CSR_ANAWSELX +#define __OPAMP_CSR_OPAXCALOUT OPAMP_CSR_OPAXCALOUT +#define __OPAMP_OFFSET_TRIM_BITSPOSITION OPAMP_OFFSET_TRIM_BITSPOSITION +#define __OPAMP_OFFSET_TRIM_SET OPAMP_OFFSET_TRIM_SET + +/** + * @} + */ + + +/** @defgroup HAL_PWR_Aliased_Macros HAL PWR Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_PVD_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PVD_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PVM_EVENT_DISABLE __HAL_PWR_PVM_EVENT_DISABLE +#define __HAL_PVM_EVENT_ENABLE __HAL_PWR_PVM_EVENT_ENABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_ENABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_ENABLE +#define __HAL_PWR_INTERNALWAKEUP_DISABLE HAL_PWREx_DisableInternalWakeUpLine +#define __HAL_PWR_INTERNALWAKEUP_ENABLE HAL_PWREx_EnableInternalWakeUpLine +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_DISABLE HAL_PWREx_DisablePullUpPullDownConfig +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_ENABLE HAL_PWREx_EnablePullUpPullDownConfig +#define __HAL_PWR_PVD_EXTI_CLEAR_EGDE_TRIGGER() do { __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); } while(0) +#define __HAL_PWR_PVD_EXTI_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PWR_PVD_EXTI_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_RISING_EDGE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVM_DISABLE() do { HAL_PWREx_DisablePVM1();HAL_PWREx_DisablePVM2();HAL_PWREx_DisablePVM3();HAL_PWREx_DisablePVM4(); } while(0) +#define __HAL_PWR_PVM_ENABLE() do { HAL_PWREx_EnablePVM1();HAL_PWREx_EnablePVM2();HAL_PWREx_EnablePVM3();HAL_PWREx_EnablePVM4(); } while(0) +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_DISABLE HAL_PWREx_DisableSRAM2ContentRetention +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_ENABLE HAL_PWREx_EnableSRAM2ContentRetention +#define __HAL_PWR_VDDIO2_DISABLE HAL_PWREx_DisableVddIO2 +#define __HAL_PWR_VDDIO2_ENABLE HAL_PWREx_EnableVddIO2 +#define __HAL_PWR_VDDIO2_EXTI_CLEAR_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_VDDIO2_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_VDDUSB_DISABLE HAL_PWREx_DisableVddUSB +#define __HAL_PWR_VDDUSB_ENABLE HAL_PWREx_EnableVddUSB + +#if defined (STM32F4) +#define __HAL_PVD_EXTI_ENABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_ENABLE_IT() +#define __HAL_PVD_EXTI_DISABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_DISABLE_IT() +#define __HAL_PVD_EXTI_GET_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GET_FLAG() +#define __HAL_PVD_EXTI_CLEAR_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_CLEAR_FLAG() +#define __HAL_PVD_EXTI_GENERATE_SWIT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GENERATE_SWIT() +#else +#define __HAL_PVD_EXTI_CLEAR_FLAG __HAL_PWR_PVD_EXTI_CLEAR_FLAG +#define __HAL_PVD_EXTI_DISABLE_IT __HAL_PWR_PVD_EXTI_DISABLE_IT +#define __HAL_PVD_EXTI_ENABLE_IT __HAL_PWR_PVD_EXTI_ENABLE_IT +#define __HAL_PVD_EXTI_GENERATE_SWIT __HAL_PWR_PVD_EXTI_GENERATE_SWIT +#define __HAL_PVD_EXTI_GET_FLAG __HAL_PWR_PVD_EXTI_GET_FLAG +#endif /* STM32F4 */ +/** + * @} + */ + + +/** @defgroup HAL_RCC_Aliased HAL RCC Aliased maintained for legacy purpose + * @{ + */ + +#define RCC_StopWakeUpClock_MSI RCC_STOP_WAKEUPCLOCK_MSI +#define RCC_StopWakeUpClock_HSI RCC_STOP_WAKEUPCLOCK_HSI + +#define HAL_RCC_CCSCallback HAL_RCC_CSSCallback +#define HAL_RC48_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_RCCEx_EnableHSI48_VREFINT() : HAL_RCCEx_DisableHSI48_VREFINT()) + +#define __ADC_CLK_DISABLE __HAL_RCC_ADC_CLK_DISABLE +#define __ADC_CLK_ENABLE __HAL_RCC_ADC_CLK_ENABLE +#define __ADC_CLK_SLEEP_DISABLE __HAL_RCC_ADC_CLK_SLEEP_DISABLE +#define __ADC_CLK_SLEEP_ENABLE __HAL_RCC_ADC_CLK_SLEEP_ENABLE +#define __ADC_FORCE_RESET __HAL_RCC_ADC_FORCE_RESET +#define __ADC_RELEASE_RESET __HAL_RCC_ADC_RELEASE_RESET +#define __ADC1_CLK_DISABLE __HAL_RCC_ADC1_CLK_DISABLE +#define __ADC1_CLK_ENABLE __HAL_RCC_ADC1_CLK_ENABLE +#define __ADC1_FORCE_RESET __HAL_RCC_ADC1_FORCE_RESET +#define __ADC1_RELEASE_RESET __HAL_RCC_ADC1_RELEASE_RESET +#define __ADC1_CLK_SLEEP_ENABLE __HAL_RCC_ADC1_CLK_SLEEP_ENABLE +#define __ADC1_CLK_SLEEP_DISABLE __HAL_RCC_ADC1_CLK_SLEEP_DISABLE +#define __ADC2_CLK_DISABLE __HAL_RCC_ADC2_CLK_DISABLE +#define __ADC2_CLK_ENABLE __HAL_RCC_ADC2_CLK_ENABLE +#define __ADC2_FORCE_RESET __HAL_RCC_ADC2_FORCE_RESET +#define __ADC2_RELEASE_RESET __HAL_RCC_ADC2_RELEASE_RESET +#define __ADC3_CLK_DISABLE __HAL_RCC_ADC3_CLK_DISABLE +#define __ADC3_CLK_ENABLE __HAL_RCC_ADC3_CLK_ENABLE +#define __ADC3_FORCE_RESET __HAL_RCC_ADC3_FORCE_RESET +#define __ADC3_RELEASE_RESET __HAL_RCC_ADC3_RELEASE_RESET +#define __AES_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __AES_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __AES_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __AES_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __AES_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __AES_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#define __CRYP_CLK_SLEEP_ENABLE __HAL_RCC_CRYP_CLK_SLEEP_ENABLE +#define __CRYP_CLK_SLEEP_DISABLE __HAL_RCC_CRYP_CLK_SLEEP_DISABLE +#define __CRYP_CLK_ENABLE __HAL_RCC_CRYP_CLK_ENABLE +#define __CRYP_CLK_DISABLE __HAL_RCC_CRYP_CLK_DISABLE +#define __CRYP_FORCE_RESET __HAL_RCC_CRYP_FORCE_RESET +#define __CRYP_RELEASE_RESET __HAL_RCC_CRYP_RELEASE_RESET +#define __AFIO_CLK_DISABLE __HAL_RCC_AFIO_CLK_DISABLE +#define __AFIO_CLK_ENABLE __HAL_RCC_AFIO_CLK_ENABLE +#define __AFIO_FORCE_RESET __HAL_RCC_AFIO_FORCE_RESET +#define __AFIO_RELEASE_RESET __HAL_RCC_AFIO_RELEASE_RESET +#define __AHB_FORCE_RESET __HAL_RCC_AHB_FORCE_RESET +#define __AHB_RELEASE_RESET __HAL_RCC_AHB_RELEASE_RESET +#define __AHB1_FORCE_RESET __HAL_RCC_AHB1_FORCE_RESET +#define __AHB1_RELEASE_RESET __HAL_RCC_AHB1_RELEASE_RESET +#define __AHB2_FORCE_RESET __HAL_RCC_AHB2_FORCE_RESET +#define __AHB2_RELEASE_RESET __HAL_RCC_AHB2_RELEASE_RESET +#define __AHB3_FORCE_RESET __HAL_RCC_AHB3_FORCE_RESET +#define __AHB3_RELEASE_RESET __HAL_RCC_AHB3_RELEASE_RESET +#define __APB1_FORCE_RESET __HAL_RCC_APB1_FORCE_RESET +#define __APB1_RELEASE_RESET __HAL_RCC_APB1_RELEASE_RESET +#define __APB2_FORCE_RESET __HAL_RCC_APB2_FORCE_RESET +#define __APB2_RELEASE_RESET __HAL_RCC_APB2_RELEASE_RESET +#define __BKP_CLK_DISABLE __HAL_RCC_BKP_CLK_DISABLE +#define __BKP_CLK_ENABLE __HAL_RCC_BKP_CLK_ENABLE +#define __BKP_FORCE_RESET __HAL_RCC_BKP_FORCE_RESET +#define __BKP_RELEASE_RESET __HAL_RCC_BKP_RELEASE_RESET +#define __CAN1_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN1_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN1_CLK_SLEEP_DISABLE __HAL_RCC_CAN1_CLK_SLEEP_DISABLE +#define __CAN1_CLK_SLEEP_ENABLE __HAL_RCC_CAN1_CLK_SLEEP_ENABLE +#define __CAN1_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN1_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN2_CLK_DISABLE __HAL_RCC_CAN2_CLK_DISABLE +#define __CAN2_CLK_ENABLE __HAL_RCC_CAN2_CLK_ENABLE +#define __CAN2_FORCE_RESET __HAL_RCC_CAN2_FORCE_RESET +#define __CAN2_RELEASE_RESET __HAL_RCC_CAN2_RELEASE_RESET +#define __CEC_CLK_DISABLE __HAL_RCC_CEC_CLK_DISABLE +#define __CEC_CLK_ENABLE __HAL_RCC_CEC_CLK_ENABLE +#define __COMP_CLK_DISABLE __HAL_RCC_COMP_CLK_DISABLE +#define __COMP_CLK_ENABLE __HAL_RCC_COMP_CLK_ENABLE +#define __COMP_FORCE_RESET __HAL_RCC_COMP_FORCE_RESET +#define __COMP_RELEASE_RESET __HAL_RCC_COMP_RELEASE_RESET +#define __COMP_CLK_SLEEP_ENABLE __HAL_RCC_COMP_CLK_SLEEP_ENABLE +#define __COMP_CLK_SLEEP_DISABLE __HAL_RCC_COMP_CLK_SLEEP_DISABLE +#define __CEC_FORCE_RESET __HAL_RCC_CEC_FORCE_RESET +#define __CEC_RELEASE_RESET __HAL_RCC_CEC_RELEASE_RESET +#define __CRC_CLK_DISABLE __HAL_RCC_CRC_CLK_DISABLE +#define __CRC_CLK_ENABLE __HAL_RCC_CRC_CLK_ENABLE +#define __CRC_CLK_SLEEP_DISABLE __HAL_RCC_CRC_CLK_SLEEP_DISABLE +#define __CRC_CLK_SLEEP_ENABLE __HAL_RCC_CRC_CLK_SLEEP_ENABLE +#define __CRC_FORCE_RESET __HAL_RCC_CRC_FORCE_RESET +#define __CRC_RELEASE_RESET __HAL_RCC_CRC_RELEASE_RESET +#define __DAC_CLK_DISABLE __HAL_RCC_DAC_CLK_DISABLE +#define __DAC_CLK_ENABLE __HAL_RCC_DAC_CLK_ENABLE +#define __DAC_FORCE_RESET __HAL_RCC_DAC_FORCE_RESET +#define __DAC_RELEASE_RESET __HAL_RCC_DAC_RELEASE_RESET +#define __DAC1_CLK_DISABLE __HAL_RCC_DAC1_CLK_DISABLE +#define __DAC1_CLK_ENABLE __HAL_RCC_DAC1_CLK_ENABLE +#define __DAC1_CLK_SLEEP_DISABLE __HAL_RCC_DAC1_CLK_SLEEP_DISABLE +#define __DAC1_CLK_SLEEP_ENABLE __HAL_RCC_DAC1_CLK_SLEEP_ENABLE +#define __DAC1_FORCE_RESET __HAL_RCC_DAC1_FORCE_RESET +#define __DAC1_RELEASE_RESET __HAL_RCC_DAC1_RELEASE_RESET +#define __DBGMCU_CLK_ENABLE __HAL_RCC_DBGMCU_CLK_ENABLE +#define __DBGMCU_CLK_DISABLE __HAL_RCC_DBGMCU_CLK_DISABLE +#define __DBGMCU_FORCE_RESET __HAL_RCC_DBGMCU_FORCE_RESET +#define __DBGMCU_RELEASE_RESET __HAL_RCC_DBGMCU_RELEASE_RESET +#define __DFSDM_CLK_DISABLE __HAL_RCC_DFSDM_CLK_DISABLE +#define __DFSDM_CLK_ENABLE __HAL_RCC_DFSDM_CLK_ENABLE +#define __DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE +#define __DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE +#define __DFSDM_FORCE_RESET __HAL_RCC_DFSDM_FORCE_RESET +#define __DFSDM_RELEASE_RESET __HAL_RCC_DFSDM_RELEASE_RESET +#define __DMA1_CLK_DISABLE __HAL_RCC_DMA1_CLK_DISABLE +#define __DMA1_CLK_ENABLE __HAL_RCC_DMA1_CLK_ENABLE +#define __DMA1_CLK_SLEEP_DISABLE __HAL_RCC_DMA1_CLK_SLEEP_DISABLE +#define __DMA1_CLK_SLEEP_ENABLE __HAL_RCC_DMA1_CLK_SLEEP_ENABLE +#define __DMA1_FORCE_RESET __HAL_RCC_DMA1_FORCE_RESET +#define __DMA1_RELEASE_RESET __HAL_RCC_DMA1_RELEASE_RESET +#define __DMA2_CLK_DISABLE __HAL_RCC_DMA2_CLK_DISABLE +#define __DMA2_CLK_ENABLE __HAL_RCC_DMA2_CLK_ENABLE +#define __DMA2_CLK_SLEEP_DISABLE __HAL_RCC_DMA2_CLK_SLEEP_DISABLE +#define __DMA2_CLK_SLEEP_ENABLE __HAL_RCC_DMA2_CLK_SLEEP_ENABLE +#define __DMA2_FORCE_RESET __HAL_RCC_DMA2_FORCE_RESET +#define __DMA2_RELEASE_RESET __HAL_RCC_DMA2_RELEASE_RESET +#define __ETHMAC_CLK_DISABLE __HAL_RCC_ETHMAC_CLK_DISABLE +#define __ETHMAC_CLK_ENABLE __HAL_RCC_ETHMAC_CLK_ENABLE +#define __ETHMAC_FORCE_RESET __HAL_RCC_ETHMAC_FORCE_RESET +#define __ETHMAC_RELEASE_RESET __HAL_RCC_ETHMAC_RELEASE_RESET +#define __ETHMACRX_CLK_DISABLE __HAL_RCC_ETHMACRX_CLK_DISABLE +#define __ETHMACRX_CLK_ENABLE __HAL_RCC_ETHMACRX_CLK_ENABLE +#define __ETHMACTX_CLK_DISABLE __HAL_RCC_ETHMACTX_CLK_DISABLE +#define __ETHMACTX_CLK_ENABLE __HAL_RCC_ETHMACTX_CLK_ENABLE +#define __FIREWALL_CLK_DISABLE __HAL_RCC_FIREWALL_CLK_DISABLE +#define __FIREWALL_CLK_ENABLE __HAL_RCC_FIREWALL_CLK_ENABLE +#define __FLASH_CLK_DISABLE __HAL_RCC_FLASH_CLK_DISABLE +#define __FLASH_CLK_ENABLE __HAL_RCC_FLASH_CLK_ENABLE +#define __FLASH_CLK_SLEEP_DISABLE __HAL_RCC_FLASH_CLK_SLEEP_DISABLE +#define __FLASH_CLK_SLEEP_ENABLE __HAL_RCC_FLASH_CLK_SLEEP_ENABLE +#define __FLASH_FORCE_RESET __HAL_RCC_FLASH_FORCE_RESET +#define __FLASH_RELEASE_RESET __HAL_RCC_FLASH_RELEASE_RESET +#define __FLITF_CLK_DISABLE __HAL_RCC_FLITF_CLK_DISABLE +#define __FLITF_CLK_ENABLE __HAL_RCC_FLITF_CLK_ENABLE +#define __FLITF_FORCE_RESET __HAL_RCC_FLITF_FORCE_RESET +#define __FLITF_RELEASE_RESET __HAL_RCC_FLITF_RELEASE_RESET +#define __FLITF_CLK_SLEEP_ENABLE __HAL_RCC_FLITF_CLK_SLEEP_ENABLE +#define __FLITF_CLK_SLEEP_DISABLE __HAL_RCC_FLITF_CLK_SLEEP_DISABLE +#define __FMC_CLK_DISABLE __HAL_RCC_FMC_CLK_DISABLE +#define __FMC_CLK_ENABLE __HAL_RCC_FMC_CLK_ENABLE +#define __FMC_CLK_SLEEP_DISABLE __HAL_RCC_FMC_CLK_SLEEP_DISABLE +#define __FMC_CLK_SLEEP_ENABLE __HAL_RCC_FMC_CLK_SLEEP_ENABLE +#define __FMC_FORCE_RESET __HAL_RCC_FMC_FORCE_RESET +#define __FMC_RELEASE_RESET __HAL_RCC_FMC_RELEASE_RESET +#define __FSMC_CLK_DISABLE __HAL_RCC_FSMC_CLK_DISABLE +#define __FSMC_CLK_ENABLE __HAL_RCC_FSMC_CLK_ENABLE +#define __GPIOA_CLK_DISABLE __HAL_RCC_GPIOA_CLK_DISABLE +#define __GPIOA_CLK_ENABLE __HAL_RCC_GPIOA_CLK_ENABLE +#define __GPIOA_CLK_SLEEP_DISABLE __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE +#define __GPIOA_CLK_SLEEP_ENABLE __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE +#define __GPIOA_FORCE_RESET __HAL_RCC_GPIOA_FORCE_RESET +#define __GPIOA_RELEASE_RESET __HAL_RCC_GPIOA_RELEASE_RESET +#define __GPIOB_CLK_DISABLE __HAL_RCC_GPIOB_CLK_DISABLE +#define __GPIOB_CLK_ENABLE __HAL_RCC_GPIOB_CLK_ENABLE +#define __GPIOB_CLK_SLEEP_DISABLE __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE +#define __GPIOB_CLK_SLEEP_ENABLE __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE +#define __GPIOB_FORCE_RESET __HAL_RCC_GPIOB_FORCE_RESET +#define __GPIOB_RELEASE_RESET __HAL_RCC_GPIOB_RELEASE_RESET +#define __GPIOC_CLK_DISABLE __HAL_RCC_GPIOC_CLK_DISABLE +#define __GPIOC_CLK_ENABLE __HAL_RCC_GPIOC_CLK_ENABLE +#define __GPIOC_CLK_SLEEP_DISABLE __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE +#define __GPIOC_CLK_SLEEP_ENABLE __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE +#define __GPIOC_FORCE_RESET __HAL_RCC_GPIOC_FORCE_RESET +#define __GPIOC_RELEASE_RESET __HAL_RCC_GPIOC_RELEASE_RESET +#define __GPIOD_CLK_DISABLE __HAL_RCC_GPIOD_CLK_DISABLE +#define __GPIOD_CLK_ENABLE __HAL_RCC_GPIOD_CLK_ENABLE +#define __GPIOD_CLK_SLEEP_DISABLE __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE +#define __GPIOD_CLK_SLEEP_ENABLE __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE +#define __GPIOD_FORCE_RESET __HAL_RCC_GPIOD_FORCE_RESET +#define __GPIOD_RELEASE_RESET __HAL_RCC_GPIOD_RELEASE_RESET +#define __GPIOE_CLK_DISABLE __HAL_RCC_GPIOE_CLK_DISABLE +#define __GPIOE_CLK_ENABLE __HAL_RCC_GPIOE_CLK_ENABLE +#define __GPIOE_CLK_SLEEP_DISABLE __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE +#define __GPIOE_CLK_SLEEP_ENABLE __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE +#define __GPIOE_FORCE_RESET __HAL_RCC_GPIOE_FORCE_RESET +#define __GPIOE_RELEASE_RESET __HAL_RCC_GPIOE_RELEASE_RESET +#define __GPIOF_CLK_DISABLE __HAL_RCC_GPIOF_CLK_DISABLE +#define __GPIOF_CLK_ENABLE __HAL_RCC_GPIOF_CLK_ENABLE +#define __GPIOF_CLK_SLEEP_DISABLE __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE +#define __GPIOF_CLK_SLEEP_ENABLE __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE +#define __GPIOF_FORCE_RESET __HAL_RCC_GPIOF_FORCE_RESET +#define __GPIOF_RELEASE_RESET __HAL_RCC_GPIOF_RELEASE_RESET +#define __GPIOG_CLK_DISABLE __HAL_RCC_GPIOG_CLK_DISABLE +#define __GPIOG_CLK_ENABLE __HAL_RCC_GPIOG_CLK_ENABLE +#define __GPIOG_CLK_SLEEP_DISABLE __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE +#define __GPIOG_CLK_SLEEP_ENABLE __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE +#define __GPIOG_FORCE_RESET __HAL_RCC_GPIOG_FORCE_RESET +#define __GPIOG_RELEASE_RESET __HAL_RCC_GPIOG_RELEASE_RESET +#define __GPIOH_CLK_DISABLE __HAL_RCC_GPIOH_CLK_DISABLE +#define __GPIOH_CLK_ENABLE __HAL_RCC_GPIOH_CLK_ENABLE +#define __GPIOH_CLK_SLEEP_DISABLE __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE +#define __GPIOH_CLK_SLEEP_ENABLE __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE +#define __GPIOH_FORCE_RESET __HAL_RCC_GPIOH_FORCE_RESET +#define __GPIOH_RELEASE_RESET __HAL_RCC_GPIOH_RELEASE_RESET +#define __I2C1_CLK_DISABLE __HAL_RCC_I2C1_CLK_DISABLE +#define __I2C1_CLK_ENABLE __HAL_RCC_I2C1_CLK_ENABLE +#define __I2C1_CLK_SLEEP_DISABLE __HAL_RCC_I2C1_CLK_SLEEP_DISABLE +#define __I2C1_CLK_SLEEP_ENABLE __HAL_RCC_I2C1_CLK_SLEEP_ENABLE +#define __I2C1_FORCE_RESET __HAL_RCC_I2C1_FORCE_RESET +#define __I2C1_RELEASE_RESET __HAL_RCC_I2C1_RELEASE_RESET +#define __I2C2_CLK_DISABLE __HAL_RCC_I2C2_CLK_DISABLE +#define __I2C2_CLK_ENABLE __HAL_RCC_I2C2_CLK_ENABLE +#define __I2C2_CLK_SLEEP_DISABLE __HAL_RCC_I2C2_CLK_SLEEP_DISABLE +#define __I2C2_CLK_SLEEP_ENABLE __HAL_RCC_I2C2_CLK_SLEEP_ENABLE +#define __I2C2_FORCE_RESET __HAL_RCC_I2C2_FORCE_RESET +#define __I2C2_RELEASE_RESET __HAL_RCC_I2C2_RELEASE_RESET +#define __I2C3_CLK_DISABLE __HAL_RCC_I2C3_CLK_DISABLE +#define __I2C3_CLK_ENABLE __HAL_RCC_I2C3_CLK_ENABLE +#define __I2C3_CLK_SLEEP_DISABLE __HAL_RCC_I2C3_CLK_SLEEP_DISABLE +#define __I2C3_CLK_SLEEP_ENABLE __HAL_RCC_I2C3_CLK_SLEEP_ENABLE +#define __I2C3_FORCE_RESET __HAL_RCC_I2C3_FORCE_RESET +#define __I2C3_RELEASE_RESET __HAL_RCC_I2C3_RELEASE_RESET +#define __LCD_CLK_DISABLE __HAL_RCC_LCD_CLK_DISABLE +#define __LCD_CLK_ENABLE __HAL_RCC_LCD_CLK_ENABLE +#define __LCD_CLK_SLEEP_DISABLE __HAL_RCC_LCD_CLK_SLEEP_DISABLE +#define __LCD_CLK_SLEEP_ENABLE __HAL_RCC_LCD_CLK_SLEEP_ENABLE +#define __LCD_FORCE_RESET __HAL_RCC_LCD_FORCE_RESET +#define __LCD_RELEASE_RESET __HAL_RCC_LCD_RELEASE_RESET +#define __LPTIM1_CLK_DISABLE __HAL_RCC_LPTIM1_CLK_DISABLE +#define __LPTIM1_CLK_ENABLE __HAL_RCC_LPTIM1_CLK_ENABLE +#define __LPTIM1_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE +#define __LPTIM1_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE +#define __LPTIM1_FORCE_RESET __HAL_RCC_LPTIM1_FORCE_RESET +#define __LPTIM1_RELEASE_RESET __HAL_RCC_LPTIM1_RELEASE_RESET +#define __LPTIM2_CLK_DISABLE __HAL_RCC_LPTIM2_CLK_DISABLE +#define __LPTIM2_CLK_ENABLE __HAL_RCC_LPTIM2_CLK_ENABLE +#define __LPTIM2_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM2_CLK_SLEEP_DISABLE +#define __LPTIM2_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM2_CLK_SLEEP_ENABLE +#define __LPTIM2_FORCE_RESET __HAL_RCC_LPTIM2_FORCE_RESET +#define __LPTIM2_RELEASE_RESET __HAL_RCC_LPTIM2_RELEASE_RESET +#define __LPUART1_CLK_DISABLE __HAL_RCC_LPUART1_CLK_DISABLE +#define __LPUART1_CLK_ENABLE __HAL_RCC_LPUART1_CLK_ENABLE +#define __LPUART1_CLK_SLEEP_DISABLE __HAL_RCC_LPUART1_CLK_SLEEP_DISABLE +#define __LPUART1_CLK_SLEEP_ENABLE __HAL_RCC_LPUART1_CLK_SLEEP_ENABLE +#define __LPUART1_FORCE_RESET __HAL_RCC_LPUART1_FORCE_RESET +#define __LPUART1_RELEASE_RESET __HAL_RCC_LPUART1_RELEASE_RESET +#define __OPAMP_CLK_DISABLE __HAL_RCC_OPAMP_CLK_DISABLE +#define __OPAMP_CLK_ENABLE __HAL_RCC_OPAMP_CLK_ENABLE +#define __OPAMP_CLK_SLEEP_DISABLE __HAL_RCC_OPAMP_CLK_SLEEP_DISABLE +#define __OPAMP_CLK_SLEEP_ENABLE __HAL_RCC_OPAMP_CLK_SLEEP_ENABLE +#define __OPAMP_FORCE_RESET __HAL_RCC_OPAMP_FORCE_RESET +#define __OPAMP_RELEASE_RESET __HAL_RCC_OPAMP_RELEASE_RESET +#define __OTGFS_CLK_DISABLE __HAL_RCC_OTGFS_CLK_DISABLE +#define __OTGFS_CLK_ENABLE __HAL_RCC_OTGFS_CLK_ENABLE +#define __OTGFS_CLK_SLEEP_DISABLE __HAL_RCC_OTGFS_CLK_SLEEP_DISABLE +#define __OTGFS_CLK_SLEEP_ENABLE __HAL_RCC_OTGFS_CLK_SLEEP_ENABLE +#define __OTGFS_FORCE_RESET __HAL_RCC_OTGFS_FORCE_RESET +#define __OTGFS_RELEASE_RESET __HAL_RCC_OTGFS_RELEASE_RESET +#define __PWR_CLK_DISABLE __HAL_RCC_PWR_CLK_DISABLE +#define __PWR_CLK_ENABLE __HAL_RCC_PWR_CLK_ENABLE +#define __PWR_CLK_SLEEP_DISABLE __HAL_RCC_PWR_CLK_SLEEP_DISABLE +#define __PWR_CLK_SLEEP_ENABLE __HAL_RCC_PWR_CLK_SLEEP_ENABLE +#define __PWR_FORCE_RESET __HAL_RCC_PWR_FORCE_RESET +#define __PWR_RELEASE_RESET __HAL_RCC_PWR_RELEASE_RESET +#define __QSPI_CLK_DISABLE __HAL_RCC_QSPI_CLK_DISABLE +#define __QSPI_CLK_ENABLE __HAL_RCC_QSPI_CLK_ENABLE +#define __QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QSPI_CLK_SLEEP_DISABLE +#define __QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QSPI_CLK_SLEEP_ENABLE +#define __QSPI_FORCE_RESET __HAL_RCC_QSPI_FORCE_RESET +#define __QSPI_RELEASE_RESET __HAL_RCC_QSPI_RELEASE_RESET + +#if defined(STM32WB) +#define __HAL_RCC_QSPI_CLK_DISABLE __HAL_RCC_QUADSPI_CLK_DISABLE +#define __HAL_RCC_QSPI_CLK_ENABLE __HAL_RCC_QUADSPI_CLK_ENABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QUADSPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QUADSPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_QSPI_FORCE_RESET __HAL_RCC_QUADSPI_FORCE_RESET +#define __HAL_RCC_QSPI_RELEASE_RESET __HAL_RCC_QUADSPI_RELEASE_RESET +#define __HAL_RCC_QSPI_IS_CLK_ENABLED __HAL_RCC_QUADSPI_IS_CLK_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_DISABLED __HAL_RCC_QUADSPI_IS_CLK_DISABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_DISABLED +#define QSPI_IRQHandler QUADSPI_IRQHandler +#endif /* __HAL_RCC_QUADSPI_CLK_ENABLE */ + +#define __RNG_CLK_DISABLE __HAL_RCC_RNG_CLK_DISABLE +#define __RNG_CLK_ENABLE __HAL_RCC_RNG_CLK_ENABLE +#define __RNG_CLK_SLEEP_DISABLE __HAL_RCC_RNG_CLK_SLEEP_DISABLE +#define __RNG_CLK_SLEEP_ENABLE __HAL_RCC_RNG_CLK_SLEEP_ENABLE +#define __RNG_FORCE_RESET __HAL_RCC_RNG_FORCE_RESET +#define __RNG_RELEASE_RESET __HAL_RCC_RNG_RELEASE_RESET +#define __SAI1_CLK_DISABLE __HAL_RCC_SAI1_CLK_DISABLE +#define __SAI1_CLK_ENABLE __HAL_RCC_SAI1_CLK_ENABLE +#define __SAI1_CLK_SLEEP_DISABLE __HAL_RCC_SAI1_CLK_SLEEP_DISABLE +#define __SAI1_CLK_SLEEP_ENABLE __HAL_RCC_SAI1_CLK_SLEEP_ENABLE +#define __SAI1_FORCE_RESET __HAL_RCC_SAI1_FORCE_RESET +#define __SAI1_RELEASE_RESET __HAL_RCC_SAI1_RELEASE_RESET +#define __SAI2_CLK_DISABLE __HAL_RCC_SAI2_CLK_DISABLE +#define __SAI2_CLK_ENABLE __HAL_RCC_SAI2_CLK_ENABLE +#define __SAI2_CLK_SLEEP_DISABLE __HAL_RCC_SAI2_CLK_SLEEP_DISABLE +#define __SAI2_CLK_SLEEP_ENABLE __HAL_RCC_SAI2_CLK_SLEEP_ENABLE +#define __SAI2_FORCE_RESET __HAL_RCC_SAI2_FORCE_RESET +#define __SAI2_RELEASE_RESET __HAL_RCC_SAI2_RELEASE_RESET +#define __SDIO_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __SDIO_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __SDMMC_CLK_DISABLE __HAL_RCC_SDMMC_CLK_DISABLE +#define __SDMMC_CLK_ENABLE __HAL_RCC_SDMMC_CLK_ENABLE +#define __SDMMC_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC_CLK_SLEEP_DISABLE +#define __SDMMC_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC_CLK_SLEEP_ENABLE +#define __SDMMC_FORCE_RESET __HAL_RCC_SDMMC_FORCE_RESET +#define __SDMMC_RELEASE_RESET __HAL_RCC_SDMMC_RELEASE_RESET +#define __SPI1_CLK_DISABLE __HAL_RCC_SPI1_CLK_DISABLE +#define __SPI1_CLK_ENABLE __HAL_RCC_SPI1_CLK_ENABLE +#define __SPI1_CLK_SLEEP_DISABLE __HAL_RCC_SPI1_CLK_SLEEP_DISABLE +#define __SPI1_CLK_SLEEP_ENABLE __HAL_RCC_SPI1_CLK_SLEEP_ENABLE +#define __SPI1_FORCE_RESET __HAL_RCC_SPI1_FORCE_RESET +#define __SPI1_RELEASE_RESET __HAL_RCC_SPI1_RELEASE_RESET +#define __SPI2_CLK_DISABLE __HAL_RCC_SPI2_CLK_DISABLE +#define __SPI2_CLK_ENABLE __HAL_RCC_SPI2_CLK_ENABLE +#define __SPI2_CLK_SLEEP_DISABLE __HAL_RCC_SPI2_CLK_SLEEP_DISABLE +#define __SPI2_CLK_SLEEP_ENABLE __HAL_RCC_SPI2_CLK_SLEEP_ENABLE +#define __SPI2_FORCE_RESET __HAL_RCC_SPI2_FORCE_RESET +#define __SPI2_RELEASE_RESET __HAL_RCC_SPI2_RELEASE_RESET +#define __SPI3_CLK_DISABLE __HAL_RCC_SPI3_CLK_DISABLE +#define __SPI3_CLK_ENABLE __HAL_RCC_SPI3_CLK_ENABLE +#define __SPI3_CLK_SLEEP_DISABLE __HAL_RCC_SPI3_CLK_SLEEP_DISABLE +#define __SPI3_CLK_SLEEP_ENABLE __HAL_RCC_SPI3_CLK_SLEEP_ENABLE +#define __SPI3_FORCE_RESET __HAL_RCC_SPI3_FORCE_RESET +#define __SPI3_RELEASE_RESET __HAL_RCC_SPI3_RELEASE_RESET +#define __SRAM_CLK_DISABLE __HAL_RCC_SRAM_CLK_DISABLE +#define __SRAM_CLK_ENABLE __HAL_RCC_SRAM_CLK_ENABLE +#define __SRAM1_CLK_SLEEP_DISABLE __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE +#define __SRAM1_CLK_SLEEP_ENABLE __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE +#define __SRAM2_CLK_SLEEP_DISABLE __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE +#define __SRAM2_CLK_SLEEP_ENABLE __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE +#define __SWPMI1_CLK_DISABLE __HAL_RCC_SWPMI1_CLK_DISABLE +#define __SWPMI1_CLK_ENABLE __HAL_RCC_SWPMI1_CLK_ENABLE +#define __SWPMI1_CLK_SLEEP_DISABLE __HAL_RCC_SWPMI1_CLK_SLEEP_DISABLE +#define __SWPMI1_CLK_SLEEP_ENABLE __HAL_RCC_SWPMI1_CLK_SLEEP_ENABLE +#define __SWPMI1_FORCE_RESET __HAL_RCC_SWPMI1_FORCE_RESET +#define __SWPMI1_RELEASE_RESET __HAL_RCC_SWPMI1_RELEASE_RESET +#define __SYSCFG_CLK_DISABLE __HAL_RCC_SYSCFG_CLK_DISABLE +#define __SYSCFG_CLK_ENABLE __HAL_RCC_SYSCFG_CLK_ENABLE +#define __SYSCFG_CLK_SLEEP_DISABLE __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE +#define __SYSCFG_CLK_SLEEP_ENABLE __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE +#define __SYSCFG_FORCE_RESET __HAL_RCC_SYSCFG_FORCE_RESET +#define __SYSCFG_RELEASE_RESET __HAL_RCC_SYSCFG_RELEASE_RESET +#define __TIM1_CLK_DISABLE __HAL_RCC_TIM1_CLK_DISABLE +#define __TIM1_CLK_ENABLE __HAL_RCC_TIM1_CLK_ENABLE +#define __TIM1_CLK_SLEEP_DISABLE __HAL_RCC_TIM1_CLK_SLEEP_DISABLE +#define __TIM1_CLK_SLEEP_ENABLE __HAL_RCC_TIM1_CLK_SLEEP_ENABLE +#define __TIM1_FORCE_RESET __HAL_RCC_TIM1_FORCE_RESET +#define __TIM1_RELEASE_RESET __HAL_RCC_TIM1_RELEASE_RESET +#define __TIM10_CLK_DISABLE __HAL_RCC_TIM10_CLK_DISABLE +#define __TIM10_CLK_ENABLE __HAL_RCC_TIM10_CLK_ENABLE +#define __TIM10_FORCE_RESET __HAL_RCC_TIM10_FORCE_RESET +#define __TIM10_RELEASE_RESET __HAL_RCC_TIM10_RELEASE_RESET +#define __TIM11_CLK_DISABLE __HAL_RCC_TIM11_CLK_DISABLE +#define __TIM11_CLK_ENABLE __HAL_RCC_TIM11_CLK_ENABLE +#define __TIM11_FORCE_RESET __HAL_RCC_TIM11_FORCE_RESET +#define __TIM11_RELEASE_RESET __HAL_RCC_TIM11_RELEASE_RESET +#define __TIM12_CLK_DISABLE __HAL_RCC_TIM12_CLK_DISABLE +#define __TIM12_CLK_ENABLE __HAL_RCC_TIM12_CLK_ENABLE +#define __TIM12_FORCE_RESET __HAL_RCC_TIM12_FORCE_RESET +#define __TIM12_RELEASE_RESET __HAL_RCC_TIM12_RELEASE_RESET +#define __TIM13_CLK_DISABLE __HAL_RCC_TIM13_CLK_DISABLE +#define __TIM13_CLK_ENABLE __HAL_RCC_TIM13_CLK_ENABLE +#define __TIM13_FORCE_RESET __HAL_RCC_TIM13_FORCE_RESET +#define __TIM13_RELEASE_RESET __HAL_RCC_TIM13_RELEASE_RESET +#define __TIM14_CLK_DISABLE __HAL_RCC_TIM14_CLK_DISABLE +#define __TIM14_CLK_ENABLE __HAL_RCC_TIM14_CLK_ENABLE +#define __TIM14_FORCE_RESET __HAL_RCC_TIM14_FORCE_RESET +#define __TIM14_RELEASE_RESET __HAL_RCC_TIM14_RELEASE_RESET +#define __TIM15_CLK_DISABLE __HAL_RCC_TIM15_CLK_DISABLE +#define __TIM15_CLK_ENABLE __HAL_RCC_TIM15_CLK_ENABLE +#define __TIM15_CLK_SLEEP_DISABLE __HAL_RCC_TIM15_CLK_SLEEP_DISABLE +#define __TIM15_CLK_SLEEP_ENABLE __HAL_RCC_TIM15_CLK_SLEEP_ENABLE +#define __TIM15_FORCE_RESET __HAL_RCC_TIM15_FORCE_RESET +#define __TIM15_RELEASE_RESET __HAL_RCC_TIM15_RELEASE_RESET +#define __TIM16_CLK_DISABLE __HAL_RCC_TIM16_CLK_DISABLE +#define __TIM16_CLK_ENABLE __HAL_RCC_TIM16_CLK_ENABLE +#define __TIM16_CLK_SLEEP_DISABLE __HAL_RCC_TIM16_CLK_SLEEP_DISABLE +#define __TIM16_CLK_SLEEP_ENABLE __HAL_RCC_TIM16_CLK_SLEEP_ENABLE +#define __TIM16_FORCE_RESET __HAL_RCC_TIM16_FORCE_RESET +#define __TIM16_RELEASE_RESET __HAL_RCC_TIM16_RELEASE_RESET +#define __TIM17_CLK_DISABLE __HAL_RCC_TIM17_CLK_DISABLE +#define __TIM17_CLK_ENABLE __HAL_RCC_TIM17_CLK_ENABLE +#define __TIM17_CLK_SLEEP_DISABLE __HAL_RCC_TIM17_CLK_SLEEP_DISABLE +#define __TIM17_CLK_SLEEP_ENABLE __HAL_RCC_TIM17_CLK_SLEEP_ENABLE +#define __TIM17_FORCE_RESET __HAL_RCC_TIM17_FORCE_RESET +#define __TIM17_RELEASE_RESET __HAL_RCC_TIM17_RELEASE_RESET +#define __TIM2_CLK_DISABLE __HAL_RCC_TIM2_CLK_DISABLE +#define __TIM2_CLK_ENABLE __HAL_RCC_TIM2_CLK_ENABLE +#define __TIM2_CLK_SLEEP_DISABLE __HAL_RCC_TIM2_CLK_SLEEP_DISABLE +#define __TIM2_CLK_SLEEP_ENABLE __HAL_RCC_TIM2_CLK_SLEEP_ENABLE +#define __TIM2_FORCE_RESET __HAL_RCC_TIM2_FORCE_RESET +#define __TIM2_RELEASE_RESET __HAL_RCC_TIM2_RELEASE_RESET +#define __TIM3_CLK_DISABLE __HAL_RCC_TIM3_CLK_DISABLE +#define __TIM3_CLK_ENABLE __HAL_RCC_TIM3_CLK_ENABLE +#define __TIM3_CLK_SLEEP_DISABLE __HAL_RCC_TIM3_CLK_SLEEP_DISABLE +#define __TIM3_CLK_SLEEP_ENABLE __HAL_RCC_TIM3_CLK_SLEEP_ENABLE +#define __TIM3_FORCE_RESET __HAL_RCC_TIM3_FORCE_RESET +#define __TIM3_RELEASE_RESET __HAL_RCC_TIM3_RELEASE_RESET +#define __TIM4_CLK_DISABLE __HAL_RCC_TIM4_CLK_DISABLE +#define __TIM4_CLK_ENABLE __HAL_RCC_TIM4_CLK_ENABLE +#define __TIM4_CLK_SLEEP_DISABLE __HAL_RCC_TIM4_CLK_SLEEP_DISABLE +#define __TIM4_CLK_SLEEP_ENABLE __HAL_RCC_TIM4_CLK_SLEEP_ENABLE +#define __TIM4_FORCE_RESET __HAL_RCC_TIM4_FORCE_RESET +#define __TIM4_RELEASE_RESET __HAL_RCC_TIM4_RELEASE_RESET +#define __TIM5_CLK_DISABLE __HAL_RCC_TIM5_CLK_DISABLE +#define __TIM5_CLK_ENABLE __HAL_RCC_TIM5_CLK_ENABLE +#define __TIM5_CLK_SLEEP_DISABLE __HAL_RCC_TIM5_CLK_SLEEP_DISABLE +#define __TIM5_CLK_SLEEP_ENABLE __HAL_RCC_TIM5_CLK_SLEEP_ENABLE +#define __TIM5_FORCE_RESET __HAL_RCC_TIM5_FORCE_RESET +#define __TIM5_RELEASE_RESET __HAL_RCC_TIM5_RELEASE_RESET +#define __TIM6_CLK_DISABLE __HAL_RCC_TIM6_CLK_DISABLE +#define __TIM6_CLK_ENABLE __HAL_RCC_TIM6_CLK_ENABLE +#define __TIM6_CLK_SLEEP_DISABLE __HAL_RCC_TIM6_CLK_SLEEP_DISABLE +#define __TIM6_CLK_SLEEP_ENABLE __HAL_RCC_TIM6_CLK_SLEEP_ENABLE +#define __TIM6_FORCE_RESET __HAL_RCC_TIM6_FORCE_RESET +#define __TIM6_RELEASE_RESET __HAL_RCC_TIM6_RELEASE_RESET +#define __TIM7_CLK_DISABLE __HAL_RCC_TIM7_CLK_DISABLE +#define __TIM7_CLK_ENABLE __HAL_RCC_TIM7_CLK_ENABLE +#define __TIM7_CLK_SLEEP_DISABLE __HAL_RCC_TIM7_CLK_SLEEP_DISABLE +#define __TIM7_CLK_SLEEP_ENABLE __HAL_RCC_TIM7_CLK_SLEEP_ENABLE +#define __TIM7_FORCE_RESET __HAL_RCC_TIM7_FORCE_RESET +#define __TIM7_RELEASE_RESET __HAL_RCC_TIM7_RELEASE_RESET +#define __TIM8_CLK_DISABLE __HAL_RCC_TIM8_CLK_DISABLE +#define __TIM8_CLK_ENABLE __HAL_RCC_TIM8_CLK_ENABLE +#define __TIM8_CLK_SLEEP_DISABLE __HAL_RCC_TIM8_CLK_SLEEP_DISABLE +#define __TIM8_CLK_SLEEP_ENABLE __HAL_RCC_TIM8_CLK_SLEEP_ENABLE +#define __TIM8_FORCE_RESET __HAL_RCC_TIM8_FORCE_RESET +#define __TIM8_RELEASE_RESET __HAL_RCC_TIM8_RELEASE_RESET +#define __TIM9_CLK_DISABLE __HAL_RCC_TIM9_CLK_DISABLE +#define __TIM9_CLK_ENABLE __HAL_RCC_TIM9_CLK_ENABLE +#define __TIM9_FORCE_RESET __HAL_RCC_TIM9_FORCE_RESET +#define __TIM9_RELEASE_RESET __HAL_RCC_TIM9_RELEASE_RESET +#define __TSC_CLK_DISABLE __HAL_RCC_TSC_CLK_DISABLE +#define __TSC_CLK_ENABLE __HAL_RCC_TSC_CLK_ENABLE +#define __TSC_CLK_SLEEP_DISABLE __HAL_RCC_TSC_CLK_SLEEP_DISABLE +#define __TSC_CLK_SLEEP_ENABLE __HAL_RCC_TSC_CLK_SLEEP_ENABLE +#define __TSC_FORCE_RESET __HAL_RCC_TSC_FORCE_RESET +#define __TSC_RELEASE_RESET __HAL_RCC_TSC_RELEASE_RESET +#define __UART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __UART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __UART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __UART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __UART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __UART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __UART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __UART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __UART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __UART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __UART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __UART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART1_CLK_DISABLE __HAL_RCC_USART1_CLK_DISABLE +#define __USART1_CLK_ENABLE __HAL_RCC_USART1_CLK_ENABLE +#define __USART1_CLK_SLEEP_DISABLE __HAL_RCC_USART1_CLK_SLEEP_DISABLE +#define __USART1_CLK_SLEEP_ENABLE __HAL_RCC_USART1_CLK_SLEEP_ENABLE +#define __USART1_FORCE_RESET __HAL_RCC_USART1_FORCE_RESET +#define __USART1_RELEASE_RESET __HAL_RCC_USART1_RELEASE_RESET +#define __USART2_CLK_DISABLE __HAL_RCC_USART2_CLK_DISABLE +#define __USART2_CLK_ENABLE __HAL_RCC_USART2_CLK_ENABLE +#define __USART2_CLK_SLEEP_DISABLE __HAL_RCC_USART2_CLK_SLEEP_DISABLE +#define __USART2_CLK_SLEEP_ENABLE __HAL_RCC_USART2_CLK_SLEEP_ENABLE +#define __USART2_FORCE_RESET __HAL_RCC_USART2_FORCE_RESET +#define __USART2_RELEASE_RESET __HAL_RCC_USART2_RELEASE_RESET +#define __USART3_CLK_DISABLE __HAL_RCC_USART3_CLK_DISABLE +#define __USART3_CLK_ENABLE __HAL_RCC_USART3_CLK_ENABLE +#define __USART3_CLK_SLEEP_DISABLE __HAL_RCC_USART3_CLK_SLEEP_DISABLE +#define __USART3_CLK_SLEEP_ENABLE __HAL_RCC_USART3_CLK_SLEEP_ENABLE +#define __USART3_FORCE_RESET __HAL_RCC_USART3_FORCE_RESET +#define __USART3_RELEASE_RESET __HAL_RCC_USART3_RELEASE_RESET +#define __USART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __USART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __USART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __USART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __USART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __USART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __USART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __USART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __USART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __USART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __USART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __USART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __USART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __USART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __USART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __USART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __USART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __USART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __USART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __USB_CLK_DISABLE __HAL_RCC_USB_CLK_DISABLE +#define __USB_CLK_ENABLE __HAL_RCC_USB_CLK_ENABLE +#define __USB_FORCE_RESET __HAL_RCC_USB_FORCE_RESET +#define __USB_CLK_SLEEP_ENABLE __HAL_RCC_USB_CLK_SLEEP_ENABLE +#define __USB_CLK_SLEEP_DISABLE __HAL_RCC_USB_CLK_SLEEP_DISABLE +#define __USB_OTG_FS_CLK_DISABLE __HAL_RCC_USB_OTG_FS_CLK_DISABLE +#define __USB_OTG_FS_CLK_ENABLE __HAL_RCC_USB_OTG_FS_CLK_ENABLE +#define __USB_RELEASE_RESET __HAL_RCC_USB_RELEASE_RESET + +#if defined(STM32H7) +#define __HAL_RCC_WWDG_CLK_DISABLE __HAL_RCC_WWDG1_CLK_DISABLE +#define __HAL_RCC_WWDG_CLK_ENABLE __HAL_RCC_WWDG1_CLK_ENABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG1_CLK_SLEEP_DISABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG1_CLK_SLEEP_ENABLE + +#define __HAL_RCC_WWDG_FORCE_RESET ((void)0U) /* Not available on the STM32H7*/ +#define __HAL_RCC_WWDG_RELEASE_RESET ((void)0U) /* Not available on the STM32H7*/ + + +#define __HAL_RCC_WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG1_IS_CLK_ENABLED +#define __HAL_RCC_WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG1_IS_CLK_DISABLED +#endif + +#define __WWDG_CLK_DISABLE __HAL_RCC_WWDG_CLK_DISABLE +#define __WWDG_CLK_ENABLE __HAL_RCC_WWDG_CLK_ENABLE +#define __WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG_CLK_SLEEP_DISABLE +#define __WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG_CLK_SLEEP_ENABLE +#define __WWDG_FORCE_RESET __HAL_RCC_WWDG_FORCE_RESET +#define __WWDG_RELEASE_RESET __HAL_RCC_WWDG_RELEASE_RESET + +#define __TIM21_CLK_ENABLE __HAL_RCC_TIM21_CLK_ENABLE +#define __TIM21_CLK_DISABLE __HAL_RCC_TIM21_CLK_DISABLE +#define __TIM21_FORCE_RESET __HAL_RCC_TIM21_FORCE_RESET +#define __TIM21_RELEASE_RESET __HAL_RCC_TIM21_RELEASE_RESET +#define __TIM21_CLK_SLEEP_ENABLE __HAL_RCC_TIM21_CLK_SLEEP_ENABLE +#define __TIM21_CLK_SLEEP_DISABLE __HAL_RCC_TIM21_CLK_SLEEP_DISABLE +#define __TIM22_CLK_ENABLE __HAL_RCC_TIM22_CLK_ENABLE +#define __TIM22_CLK_DISABLE __HAL_RCC_TIM22_CLK_DISABLE +#define __TIM22_FORCE_RESET __HAL_RCC_TIM22_FORCE_RESET +#define __TIM22_RELEASE_RESET __HAL_RCC_TIM22_RELEASE_RESET +#define __TIM22_CLK_SLEEP_ENABLE __HAL_RCC_TIM22_CLK_SLEEP_ENABLE +#define __TIM22_CLK_SLEEP_DISABLE __HAL_RCC_TIM22_CLK_SLEEP_DISABLE +#define __CRS_CLK_DISABLE __HAL_RCC_CRS_CLK_DISABLE +#define __CRS_CLK_ENABLE __HAL_RCC_CRS_CLK_ENABLE +#define __CRS_CLK_SLEEP_DISABLE __HAL_RCC_CRS_CLK_SLEEP_DISABLE +#define __CRS_CLK_SLEEP_ENABLE __HAL_RCC_CRS_CLK_SLEEP_ENABLE +#define __CRS_FORCE_RESET __HAL_RCC_CRS_FORCE_RESET +#define __CRS_RELEASE_RESET __HAL_RCC_CRS_RELEASE_RESET +#define __RCC_BACKUPRESET_FORCE __HAL_RCC_BACKUPRESET_FORCE +#define __RCC_BACKUPRESET_RELEASE __HAL_RCC_BACKUPRESET_RELEASE + +#define __USB_OTG_FS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __USB_OTG_FS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET +#define __USB_OTG_FS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE +#define __USB_OTG_FS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE +#define __USB_OTG_HS_CLK_DISABLE __HAL_RCC_USB_OTG_HS_CLK_DISABLE +#define __USB_OTG_HS_CLK_ENABLE __HAL_RCC_USB_OTG_HS_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE +#define __TIM9_CLK_SLEEP_ENABLE __HAL_RCC_TIM9_CLK_SLEEP_ENABLE +#define __TIM9_CLK_SLEEP_DISABLE __HAL_RCC_TIM9_CLK_SLEEP_DISABLE +#define __TIM10_CLK_SLEEP_ENABLE __HAL_RCC_TIM10_CLK_SLEEP_ENABLE +#define __TIM10_CLK_SLEEP_DISABLE __HAL_RCC_TIM10_CLK_SLEEP_DISABLE +#define __TIM11_CLK_SLEEP_ENABLE __HAL_RCC_TIM11_CLK_SLEEP_ENABLE +#define __TIM11_CLK_SLEEP_DISABLE __HAL_RCC_TIM11_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE +#define __ETHMACPTP_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_ENABLE __HAL_RCC_ETHMACPTP_CLK_ENABLE +#define __ETHMACPTP_CLK_DISABLE __HAL_RCC_ETHMACPTP_CLK_DISABLE +#define __HASH_CLK_ENABLE __HAL_RCC_HASH_CLK_ENABLE +#define __HASH_FORCE_RESET __HAL_RCC_HASH_FORCE_RESET +#define __HASH_RELEASE_RESET __HAL_RCC_HASH_RELEASE_RESET +#define __HASH_CLK_SLEEP_ENABLE __HAL_RCC_HASH_CLK_SLEEP_ENABLE +#define __HASH_CLK_SLEEP_DISABLE __HAL_RCC_HASH_CLK_SLEEP_DISABLE +#define __HASH_CLK_DISABLE __HAL_RCC_HASH_CLK_DISABLE +#define __SPI5_CLK_ENABLE __HAL_RCC_SPI5_CLK_ENABLE +#define __SPI5_CLK_DISABLE __HAL_RCC_SPI5_CLK_DISABLE +#define __SPI5_FORCE_RESET __HAL_RCC_SPI5_FORCE_RESET +#define __SPI5_RELEASE_RESET __HAL_RCC_SPI5_RELEASE_RESET +#define __SPI5_CLK_SLEEP_ENABLE __HAL_RCC_SPI5_CLK_SLEEP_ENABLE +#define __SPI5_CLK_SLEEP_DISABLE __HAL_RCC_SPI5_CLK_SLEEP_DISABLE +#define __SPI6_CLK_ENABLE __HAL_RCC_SPI6_CLK_ENABLE +#define __SPI6_CLK_DISABLE __HAL_RCC_SPI6_CLK_DISABLE +#define __SPI6_FORCE_RESET __HAL_RCC_SPI6_FORCE_RESET +#define __SPI6_RELEASE_RESET __HAL_RCC_SPI6_RELEASE_RESET +#define __SPI6_CLK_SLEEP_ENABLE __HAL_RCC_SPI6_CLK_SLEEP_ENABLE +#define __SPI6_CLK_SLEEP_DISABLE __HAL_RCC_SPI6_CLK_SLEEP_DISABLE +#define __LTDC_CLK_ENABLE __HAL_RCC_LTDC_CLK_ENABLE +#define __LTDC_CLK_DISABLE __HAL_RCC_LTDC_CLK_DISABLE +#define __LTDC_FORCE_RESET __HAL_RCC_LTDC_FORCE_RESET +#define __LTDC_RELEASE_RESET __HAL_RCC_LTDC_RELEASE_RESET +#define __LTDC_CLK_SLEEP_ENABLE __HAL_RCC_LTDC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_ENABLE __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_DISABLE __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE +#define __ETHMACTX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE +#define __ETHMACTX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE +#define __ETHMACRX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE +#define __ETHMACRX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE +#define __TIM12_CLK_SLEEP_ENABLE __HAL_RCC_TIM12_CLK_SLEEP_ENABLE +#define __TIM12_CLK_SLEEP_DISABLE __HAL_RCC_TIM12_CLK_SLEEP_DISABLE +#define __TIM13_CLK_SLEEP_ENABLE __HAL_RCC_TIM13_CLK_SLEEP_ENABLE +#define __TIM13_CLK_SLEEP_DISABLE __HAL_RCC_TIM13_CLK_SLEEP_DISABLE +#define __TIM14_CLK_SLEEP_ENABLE __HAL_RCC_TIM14_CLK_SLEEP_ENABLE +#define __TIM14_CLK_SLEEP_DISABLE __HAL_RCC_TIM14_CLK_SLEEP_DISABLE +#define __BKPSRAM_CLK_ENABLE __HAL_RCC_BKPSRAM_CLK_ENABLE +#define __BKPSRAM_CLK_DISABLE __HAL_RCC_BKPSRAM_CLK_DISABLE +#define __BKPSRAM_CLK_SLEEP_ENABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE +#define __BKPSRAM_CLK_SLEEP_DISABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE +#define __CCMDATARAMEN_CLK_ENABLE __HAL_RCC_CCMDATARAMEN_CLK_ENABLE +#define __CCMDATARAMEN_CLK_DISABLE __HAL_RCC_CCMDATARAMEN_CLK_DISABLE +#define __USART6_CLK_ENABLE __HAL_RCC_USART6_CLK_ENABLE +#define __USART6_CLK_DISABLE __HAL_RCC_USART6_CLK_DISABLE +#define __USART6_FORCE_RESET __HAL_RCC_USART6_FORCE_RESET +#define __USART6_RELEASE_RESET __HAL_RCC_USART6_RELEASE_RESET +#define __USART6_CLK_SLEEP_ENABLE __HAL_RCC_USART6_CLK_SLEEP_ENABLE +#define __USART6_CLK_SLEEP_DISABLE __HAL_RCC_USART6_CLK_SLEEP_DISABLE +#define __SPI4_CLK_ENABLE __HAL_RCC_SPI4_CLK_ENABLE +#define __SPI4_CLK_DISABLE __HAL_RCC_SPI4_CLK_DISABLE +#define __SPI4_FORCE_RESET __HAL_RCC_SPI4_FORCE_RESET +#define __SPI4_RELEASE_RESET __HAL_RCC_SPI4_RELEASE_RESET +#define __SPI4_CLK_SLEEP_ENABLE __HAL_RCC_SPI4_CLK_SLEEP_ENABLE +#define __SPI4_CLK_SLEEP_DISABLE __HAL_RCC_SPI4_CLK_SLEEP_DISABLE +#define __GPIOI_CLK_ENABLE __HAL_RCC_GPIOI_CLK_ENABLE +#define __GPIOI_CLK_DISABLE __HAL_RCC_GPIOI_CLK_DISABLE +#define __GPIOI_FORCE_RESET __HAL_RCC_GPIOI_FORCE_RESET +#define __GPIOI_RELEASE_RESET __HAL_RCC_GPIOI_RELEASE_RESET +#define __GPIOI_CLK_SLEEP_ENABLE __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE +#define __GPIOI_CLK_SLEEP_DISABLE __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE +#define __GPIOJ_CLK_ENABLE __HAL_RCC_GPIOJ_CLK_ENABLE +#define __GPIOJ_CLK_DISABLE __HAL_RCC_GPIOJ_CLK_DISABLE +#define __GPIOJ_FORCE_RESET __HAL_RCC_GPIOJ_FORCE_RESET +#define __GPIOJ_RELEASE_RESET __HAL_RCC_GPIOJ_RELEASE_RESET +#define __GPIOJ_CLK_SLEEP_ENABLE __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE +#define __GPIOJ_CLK_SLEEP_DISABLE __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE +#define __GPIOK_CLK_ENABLE __HAL_RCC_GPIOK_CLK_ENABLE +#define __GPIOK_CLK_DISABLE __HAL_RCC_GPIOK_CLK_DISABLE +#define __GPIOK_RELEASE_RESET __HAL_RCC_GPIOK_RELEASE_RESET +#define __GPIOK_CLK_SLEEP_ENABLE __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE +#define __GPIOK_CLK_SLEEP_DISABLE __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE +#define __ETH_CLK_ENABLE __HAL_RCC_ETH_CLK_ENABLE +#define __ETH_CLK_DISABLE __HAL_RCC_ETH_CLK_DISABLE +#define __DCMI_CLK_ENABLE __HAL_RCC_DCMI_CLK_ENABLE +#define __DCMI_CLK_DISABLE __HAL_RCC_DCMI_CLK_DISABLE +#define __DCMI_FORCE_RESET __HAL_RCC_DCMI_FORCE_RESET +#define __DCMI_RELEASE_RESET __HAL_RCC_DCMI_RELEASE_RESET +#define __DCMI_CLK_SLEEP_ENABLE __HAL_RCC_DCMI_CLK_SLEEP_ENABLE +#define __DCMI_CLK_SLEEP_DISABLE __HAL_RCC_DCMI_CLK_SLEEP_DISABLE +#define __UART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __UART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __UART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __UART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __UART7_CLK_SLEEP_ENABLE __HAL_RCC_UART7_CLK_SLEEP_ENABLE +#define __UART7_CLK_SLEEP_DISABLE __HAL_RCC_UART7_CLK_SLEEP_DISABLE +#define __UART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __UART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __UART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __UART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __UART8_CLK_SLEEP_ENABLE __HAL_RCC_UART8_CLK_SLEEP_ENABLE +#define __UART8_CLK_SLEEP_DISABLE __HAL_RCC_UART8_CLK_SLEEP_DISABLE +#define __OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_DISABLED +#define __HAL_RCC_OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __HAL_RCC_OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_DISABLED +#define __SRAM3_CLK_SLEEP_ENABLE __HAL_RCC_SRAM3_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_ENABLE __HAL_RCC_CAN2_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_DISABLE __HAL_RCC_CAN2_CLK_SLEEP_DISABLE +#define __DAC_CLK_SLEEP_ENABLE __HAL_RCC_DAC_CLK_SLEEP_ENABLE +#define __DAC_CLK_SLEEP_DISABLE __HAL_RCC_DAC_CLK_SLEEP_DISABLE +#define __ADC2_CLK_SLEEP_ENABLE __HAL_RCC_ADC2_CLK_SLEEP_ENABLE +#define __ADC2_CLK_SLEEP_DISABLE __HAL_RCC_ADC2_CLK_SLEEP_DISABLE +#define __ADC3_CLK_SLEEP_ENABLE __HAL_RCC_ADC3_CLK_SLEEP_ENABLE +#define __ADC3_CLK_SLEEP_DISABLE __HAL_RCC_ADC3_CLK_SLEEP_DISABLE +#define __FSMC_FORCE_RESET __HAL_RCC_FSMC_FORCE_RESET +#define __FSMC_RELEASE_RESET __HAL_RCC_FSMC_RELEASE_RESET +#define __FSMC_CLK_SLEEP_ENABLE __HAL_RCC_FSMC_CLK_SLEEP_ENABLE +#define __FSMC_CLK_SLEEP_DISABLE __HAL_RCC_FSMC_CLK_SLEEP_DISABLE +#define __SDIO_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __SDIO_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_ENABLE __HAL_RCC_DMA2D_CLK_ENABLE +#define __DMA2D_CLK_DISABLE __HAL_RCC_DMA2D_CLK_DISABLE +#define __DMA2D_FORCE_RESET __HAL_RCC_DMA2D_FORCE_RESET +#define __DMA2D_RELEASE_RESET __HAL_RCC_DMA2D_RELEASE_RESET +#define __DMA2D_CLK_SLEEP_ENABLE __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_SLEEP_DISABLE __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE + +/* alias define maintained for legacy */ +#define __HAL_RCC_OTGFS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __HAL_RCC_OTGFS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET + +#define __ADC12_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __ADC12_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __ADC34_CLK_ENABLE __HAL_RCC_ADC34_CLK_ENABLE +#define __ADC34_CLK_DISABLE __HAL_RCC_ADC34_CLK_DISABLE +#define __DAC2_CLK_ENABLE __HAL_RCC_DAC2_CLK_ENABLE +#define __DAC2_CLK_DISABLE __HAL_RCC_DAC2_CLK_DISABLE +#define __TIM18_CLK_ENABLE __HAL_RCC_TIM18_CLK_ENABLE +#define __TIM18_CLK_DISABLE __HAL_RCC_TIM18_CLK_DISABLE +#define __TIM19_CLK_ENABLE __HAL_RCC_TIM19_CLK_ENABLE +#define __TIM19_CLK_DISABLE __HAL_RCC_TIM19_CLK_DISABLE +#define __TIM20_CLK_ENABLE __HAL_RCC_TIM20_CLK_ENABLE +#define __TIM20_CLK_DISABLE __HAL_RCC_TIM20_CLK_DISABLE +#define __HRTIM1_CLK_ENABLE __HAL_RCC_HRTIM1_CLK_ENABLE +#define __HRTIM1_CLK_DISABLE __HAL_RCC_HRTIM1_CLK_DISABLE +#define __SDADC1_CLK_ENABLE __HAL_RCC_SDADC1_CLK_ENABLE +#define __SDADC2_CLK_ENABLE __HAL_RCC_SDADC2_CLK_ENABLE +#define __SDADC3_CLK_ENABLE __HAL_RCC_SDADC3_CLK_ENABLE +#define __SDADC1_CLK_DISABLE __HAL_RCC_SDADC1_CLK_DISABLE +#define __SDADC2_CLK_DISABLE __HAL_RCC_SDADC2_CLK_DISABLE +#define __SDADC3_CLK_DISABLE __HAL_RCC_SDADC3_CLK_DISABLE + +#define __ADC12_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __ADC12_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#define __ADC34_FORCE_RESET __HAL_RCC_ADC34_FORCE_RESET +#define __ADC34_RELEASE_RESET __HAL_RCC_ADC34_RELEASE_RESET +#define __DAC2_FORCE_RESET __HAL_RCC_DAC2_FORCE_RESET +#define __DAC2_RELEASE_RESET __HAL_RCC_DAC2_RELEASE_RESET +#define __TIM18_FORCE_RESET __HAL_RCC_TIM18_FORCE_RESET +#define __TIM18_RELEASE_RESET __HAL_RCC_TIM18_RELEASE_RESET +#define __TIM19_FORCE_RESET __HAL_RCC_TIM19_FORCE_RESET +#define __TIM19_RELEASE_RESET __HAL_RCC_TIM19_RELEASE_RESET +#define __TIM20_FORCE_RESET __HAL_RCC_TIM20_FORCE_RESET +#define __TIM20_RELEASE_RESET __HAL_RCC_TIM20_RELEASE_RESET +#define __HRTIM1_FORCE_RESET __HAL_RCC_HRTIM1_FORCE_RESET +#define __HRTIM1_RELEASE_RESET __HAL_RCC_HRTIM1_RELEASE_RESET +#define __SDADC1_FORCE_RESET __HAL_RCC_SDADC1_FORCE_RESET +#define __SDADC2_FORCE_RESET __HAL_RCC_SDADC2_FORCE_RESET +#define __SDADC3_FORCE_RESET __HAL_RCC_SDADC3_FORCE_RESET +#define __SDADC1_RELEASE_RESET __HAL_RCC_SDADC1_RELEASE_RESET +#define __SDADC2_RELEASE_RESET __HAL_RCC_SDADC2_RELEASE_RESET +#define __SDADC3_RELEASE_RESET __HAL_RCC_SDADC3_RELEASE_RESET + +#define __ADC1_IS_CLK_ENABLED __HAL_RCC_ADC1_IS_CLK_ENABLED +#define __ADC1_IS_CLK_DISABLED __HAL_RCC_ADC1_IS_CLK_DISABLED +#define __ADC12_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __ADC12_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __ADC34_IS_CLK_ENABLED __HAL_RCC_ADC34_IS_CLK_ENABLED +#define __ADC34_IS_CLK_DISABLED __HAL_RCC_ADC34_IS_CLK_DISABLED +#define __CEC_IS_CLK_ENABLED __HAL_RCC_CEC_IS_CLK_ENABLED +#define __CEC_IS_CLK_DISABLED __HAL_RCC_CEC_IS_CLK_DISABLED +#define __CRC_IS_CLK_ENABLED __HAL_RCC_CRC_IS_CLK_ENABLED +#define __CRC_IS_CLK_DISABLED __HAL_RCC_CRC_IS_CLK_DISABLED +#define __DAC1_IS_CLK_ENABLED __HAL_RCC_DAC1_IS_CLK_ENABLED +#define __DAC1_IS_CLK_DISABLED __HAL_RCC_DAC1_IS_CLK_DISABLED +#define __DAC2_IS_CLK_ENABLED __HAL_RCC_DAC2_IS_CLK_ENABLED +#define __DAC2_IS_CLK_DISABLED __HAL_RCC_DAC2_IS_CLK_DISABLED +#define __DMA1_IS_CLK_ENABLED __HAL_RCC_DMA1_IS_CLK_ENABLED +#define __DMA1_IS_CLK_DISABLED __HAL_RCC_DMA1_IS_CLK_DISABLED +#define __DMA2_IS_CLK_ENABLED __HAL_RCC_DMA2_IS_CLK_ENABLED +#define __DMA2_IS_CLK_DISABLED __HAL_RCC_DMA2_IS_CLK_DISABLED +#define __FLITF_IS_CLK_ENABLED __HAL_RCC_FLITF_IS_CLK_ENABLED +#define __FLITF_IS_CLK_DISABLED __HAL_RCC_FLITF_IS_CLK_DISABLED +#define __FMC_IS_CLK_ENABLED __HAL_RCC_FMC_IS_CLK_ENABLED +#define __FMC_IS_CLK_DISABLED __HAL_RCC_FMC_IS_CLK_DISABLED +#define __GPIOA_IS_CLK_ENABLED __HAL_RCC_GPIOA_IS_CLK_ENABLED +#define __GPIOA_IS_CLK_DISABLED __HAL_RCC_GPIOA_IS_CLK_DISABLED +#define __GPIOB_IS_CLK_ENABLED __HAL_RCC_GPIOB_IS_CLK_ENABLED +#define __GPIOB_IS_CLK_DISABLED __HAL_RCC_GPIOB_IS_CLK_DISABLED +#define __GPIOC_IS_CLK_ENABLED __HAL_RCC_GPIOC_IS_CLK_ENABLED +#define __GPIOC_IS_CLK_DISABLED __HAL_RCC_GPIOC_IS_CLK_DISABLED +#define __GPIOD_IS_CLK_ENABLED __HAL_RCC_GPIOD_IS_CLK_ENABLED +#define __GPIOD_IS_CLK_DISABLED __HAL_RCC_GPIOD_IS_CLK_DISABLED +#define __GPIOE_IS_CLK_ENABLED __HAL_RCC_GPIOE_IS_CLK_ENABLED +#define __GPIOE_IS_CLK_DISABLED __HAL_RCC_GPIOE_IS_CLK_DISABLED +#define __GPIOF_IS_CLK_ENABLED __HAL_RCC_GPIOF_IS_CLK_ENABLED +#define __GPIOF_IS_CLK_DISABLED __HAL_RCC_GPIOF_IS_CLK_DISABLED +#define __GPIOG_IS_CLK_ENABLED __HAL_RCC_GPIOG_IS_CLK_ENABLED +#define __GPIOG_IS_CLK_DISABLED __HAL_RCC_GPIOG_IS_CLK_DISABLED +#define __GPIOH_IS_CLK_ENABLED __HAL_RCC_GPIOH_IS_CLK_ENABLED +#define __GPIOH_IS_CLK_DISABLED __HAL_RCC_GPIOH_IS_CLK_DISABLED +#define __HRTIM1_IS_CLK_ENABLED __HAL_RCC_HRTIM1_IS_CLK_ENABLED +#define __HRTIM1_IS_CLK_DISABLED __HAL_RCC_HRTIM1_IS_CLK_DISABLED +#define __I2C1_IS_CLK_ENABLED __HAL_RCC_I2C1_IS_CLK_ENABLED +#define __I2C1_IS_CLK_DISABLED __HAL_RCC_I2C1_IS_CLK_DISABLED +#define __I2C2_IS_CLK_ENABLED __HAL_RCC_I2C2_IS_CLK_ENABLED +#define __I2C2_IS_CLK_DISABLED __HAL_RCC_I2C2_IS_CLK_DISABLED +#define __I2C3_IS_CLK_ENABLED __HAL_RCC_I2C3_IS_CLK_ENABLED +#define __I2C3_IS_CLK_DISABLED __HAL_RCC_I2C3_IS_CLK_DISABLED +#define __PWR_IS_CLK_ENABLED __HAL_RCC_PWR_IS_CLK_ENABLED +#define __PWR_IS_CLK_DISABLED __HAL_RCC_PWR_IS_CLK_DISABLED +#define __SYSCFG_IS_CLK_ENABLED __HAL_RCC_SYSCFG_IS_CLK_ENABLED +#define __SYSCFG_IS_CLK_DISABLED __HAL_RCC_SYSCFG_IS_CLK_DISABLED +#define __SPI1_IS_CLK_ENABLED __HAL_RCC_SPI1_IS_CLK_ENABLED +#define __SPI1_IS_CLK_DISABLED __HAL_RCC_SPI1_IS_CLK_DISABLED +#define __SPI2_IS_CLK_ENABLED __HAL_RCC_SPI2_IS_CLK_ENABLED +#define __SPI2_IS_CLK_DISABLED __HAL_RCC_SPI2_IS_CLK_DISABLED +#define __SPI3_IS_CLK_ENABLED __HAL_RCC_SPI3_IS_CLK_ENABLED +#define __SPI3_IS_CLK_DISABLED __HAL_RCC_SPI3_IS_CLK_DISABLED +#define __SPI4_IS_CLK_ENABLED __HAL_RCC_SPI4_IS_CLK_ENABLED +#define __SPI4_IS_CLK_DISABLED __HAL_RCC_SPI4_IS_CLK_DISABLED +#define __SDADC1_IS_CLK_ENABLED __HAL_RCC_SDADC1_IS_CLK_ENABLED +#define __SDADC1_IS_CLK_DISABLED __HAL_RCC_SDADC1_IS_CLK_DISABLED +#define __SDADC2_IS_CLK_ENABLED __HAL_RCC_SDADC2_IS_CLK_ENABLED +#define __SDADC2_IS_CLK_DISABLED __HAL_RCC_SDADC2_IS_CLK_DISABLED +#define __SDADC3_IS_CLK_ENABLED __HAL_RCC_SDADC3_IS_CLK_ENABLED +#define __SDADC3_IS_CLK_DISABLED __HAL_RCC_SDADC3_IS_CLK_DISABLED +#define __SRAM_IS_CLK_ENABLED __HAL_RCC_SRAM_IS_CLK_ENABLED +#define __SRAM_IS_CLK_DISABLED __HAL_RCC_SRAM_IS_CLK_DISABLED +#define __TIM1_IS_CLK_ENABLED __HAL_RCC_TIM1_IS_CLK_ENABLED +#define __TIM1_IS_CLK_DISABLED __HAL_RCC_TIM1_IS_CLK_DISABLED +#define __TIM2_IS_CLK_ENABLED __HAL_RCC_TIM2_IS_CLK_ENABLED +#define __TIM2_IS_CLK_DISABLED __HAL_RCC_TIM2_IS_CLK_DISABLED +#define __TIM3_IS_CLK_ENABLED __HAL_RCC_TIM3_IS_CLK_ENABLED +#define __TIM3_IS_CLK_DISABLED __HAL_RCC_TIM3_IS_CLK_DISABLED +#define __TIM4_IS_CLK_ENABLED __HAL_RCC_TIM4_IS_CLK_ENABLED +#define __TIM4_IS_CLK_DISABLED __HAL_RCC_TIM4_IS_CLK_DISABLED +#define __TIM5_IS_CLK_ENABLED __HAL_RCC_TIM5_IS_CLK_ENABLED +#define __TIM5_IS_CLK_DISABLED __HAL_RCC_TIM5_IS_CLK_DISABLED +#define __TIM6_IS_CLK_ENABLED __HAL_RCC_TIM6_IS_CLK_ENABLED +#define __TIM6_IS_CLK_DISABLED __HAL_RCC_TIM6_IS_CLK_DISABLED +#define __TIM7_IS_CLK_ENABLED __HAL_RCC_TIM7_IS_CLK_ENABLED +#define __TIM7_IS_CLK_DISABLED __HAL_RCC_TIM7_IS_CLK_DISABLED +#define __TIM8_IS_CLK_ENABLED __HAL_RCC_TIM8_IS_CLK_ENABLED +#define __TIM8_IS_CLK_DISABLED __HAL_RCC_TIM8_IS_CLK_DISABLED +#define __TIM12_IS_CLK_ENABLED __HAL_RCC_TIM12_IS_CLK_ENABLED +#define __TIM12_IS_CLK_DISABLED __HAL_RCC_TIM12_IS_CLK_DISABLED +#define __TIM13_IS_CLK_ENABLED __HAL_RCC_TIM13_IS_CLK_ENABLED +#define __TIM13_IS_CLK_DISABLED __HAL_RCC_TIM13_IS_CLK_DISABLED +#define __TIM14_IS_CLK_ENABLED __HAL_RCC_TIM14_IS_CLK_ENABLED +#define __TIM14_IS_CLK_DISABLED __HAL_RCC_TIM14_IS_CLK_DISABLED +#define __TIM15_IS_CLK_ENABLED __HAL_RCC_TIM15_IS_CLK_ENABLED +#define __TIM15_IS_CLK_DISABLED __HAL_RCC_TIM15_IS_CLK_DISABLED +#define __TIM16_IS_CLK_ENABLED __HAL_RCC_TIM16_IS_CLK_ENABLED +#define __TIM16_IS_CLK_DISABLED __HAL_RCC_TIM16_IS_CLK_DISABLED +#define __TIM17_IS_CLK_ENABLED __HAL_RCC_TIM17_IS_CLK_ENABLED +#define __TIM17_IS_CLK_DISABLED __HAL_RCC_TIM17_IS_CLK_DISABLED +#define __TIM18_IS_CLK_ENABLED __HAL_RCC_TIM18_IS_CLK_ENABLED +#define __TIM18_IS_CLK_DISABLED __HAL_RCC_TIM18_IS_CLK_DISABLED +#define __TIM19_IS_CLK_ENABLED __HAL_RCC_TIM19_IS_CLK_ENABLED +#define __TIM19_IS_CLK_DISABLED __HAL_RCC_TIM19_IS_CLK_DISABLED +#define __TIM20_IS_CLK_ENABLED __HAL_RCC_TIM20_IS_CLK_ENABLED +#define __TIM20_IS_CLK_DISABLED __HAL_RCC_TIM20_IS_CLK_DISABLED +#define __TSC_IS_CLK_ENABLED __HAL_RCC_TSC_IS_CLK_ENABLED +#define __TSC_IS_CLK_DISABLED __HAL_RCC_TSC_IS_CLK_DISABLED +#define __UART4_IS_CLK_ENABLED __HAL_RCC_UART4_IS_CLK_ENABLED +#define __UART4_IS_CLK_DISABLED __HAL_RCC_UART4_IS_CLK_DISABLED +#define __UART5_IS_CLK_ENABLED __HAL_RCC_UART5_IS_CLK_ENABLED +#define __UART5_IS_CLK_DISABLED __HAL_RCC_UART5_IS_CLK_DISABLED +#define __USART1_IS_CLK_ENABLED __HAL_RCC_USART1_IS_CLK_ENABLED +#define __USART1_IS_CLK_DISABLED __HAL_RCC_USART1_IS_CLK_DISABLED +#define __USART2_IS_CLK_ENABLED __HAL_RCC_USART2_IS_CLK_ENABLED +#define __USART2_IS_CLK_DISABLED __HAL_RCC_USART2_IS_CLK_DISABLED +#define __USART3_IS_CLK_ENABLED __HAL_RCC_USART3_IS_CLK_ENABLED +#define __USART3_IS_CLK_DISABLED __HAL_RCC_USART3_IS_CLK_DISABLED +#define __USB_IS_CLK_ENABLED __HAL_RCC_USB_IS_CLK_ENABLED +#define __USB_IS_CLK_DISABLED __HAL_RCC_USB_IS_CLK_DISABLED +#define __WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG_IS_CLK_ENABLED +#define __WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG_IS_CLK_DISABLED + +#if defined(STM32L1) +#define __HAL_RCC_CRYP_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __HAL_RCC_CRYP_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __HAL_RCC_CRYP_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __HAL_RCC_CRYP_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#endif /* STM32L1 */ + +#if defined(STM32F4) +#define __HAL_RCC_SDMMC1_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __HAL_RCC_SDMMC1_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDMMC1_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __HAL_RCC_SDMMC1_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __HAL_RCC_SDMMC1_IS_CLK_ENABLED __HAL_RCC_SDIO_IS_CLK_ENABLED +#define __HAL_RCC_SDMMC1_IS_CLK_DISABLED __HAL_RCC_SDIO_IS_CLK_DISABLED +#define Sdmmc1ClockSelection SdioClockSelection +#define RCC_PERIPHCLK_SDMMC1 RCC_PERIPHCLK_SDIO +#define RCC_SDMMC1CLKSOURCE_CLK48 RCC_SDIOCLKSOURCE_CK48 +#define RCC_SDMMC1CLKSOURCE_SYSCLK RCC_SDIOCLKSOURCE_SYSCLK +#define __HAL_RCC_SDMMC1_CONFIG __HAL_RCC_SDIO_CONFIG +#define __HAL_RCC_GET_SDMMC1_SOURCE __HAL_RCC_GET_SDIO_SOURCE +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define __HAL_RCC_SDIO_FORCE_RESET __HAL_RCC_SDMMC1_FORCE_RESET +#define __HAL_RCC_SDIO_RELEASE_RESET __HAL_RCC_SDMMC1_RELEASE_RESET +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDIO_CLK_ENABLE __HAL_RCC_SDMMC1_CLK_ENABLE +#define __HAL_RCC_SDIO_CLK_DISABLE __HAL_RCC_SDMMC1_CLK_DISABLE +#define __HAL_RCC_SDIO_IS_CLK_ENABLED __HAL_RCC_SDMMC1_IS_CLK_ENABLED +#define __HAL_RCC_SDIO_IS_CLK_DISABLED __HAL_RCC_SDMMC1_IS_CLK_DISABLED +#define SdioClockSelection Sdmmc1ClockSelection +#define RCC_PERIPHCLK_SDIO RCC_PERIPHCLK_SDMMC1 +#define __HAL_RCC_SDIO_CONFIG __HAL_RCC_SDMMC1_CONFIG +#define __HAL_RCC_GET_SDIO_SOURCE __HAL_RCC_GET_SDMMC1_SOURCE +#endif + +#if defined(STM32F7) +#define RCC_SDIOCLKSOURCE_CLK48 RCC_SDMMC1CLKSOURCE_CLK48 +#define RCC_SDIOCLKSOURCE_SYSCLK RCC_SDMMC1CLKSOURCE_SYSCLK +#endif + +#if defined(STM32H7) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() __HAL_RCC_USB1_OTG_HS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() __HAL_RCC_USB1_OTG_HS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_DISABLE() + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() __HAL_RCC_USB2_OTG_FS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() __HAL_RCC_USB2_OTG_FS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_DISABLE() +#endif + +#define __HAL_RCC_I2SCLK __HAL_RCC_I2S_CONFIG +#define __HAL_RCC_I2SCLK_CONFIG __HAL_RCC_I2S_CONFIG + +#define __RCC_PLLSRC RCC_GET_PLL_OSCSOURCE + +#define IS_RCC_MSIRANGE IS_RCC_MSI_CLOCK_RANGE +#define IS_RCC_RTCCLK_SOURCE IS_RCC_RTCCLKSOURCE +#define IS_RCC_SYSCLK_DIV IS_RCC_HCLK +#define IS_RCC_HCLK_DIV IS_RCC_PCLK +#define IS_RCC_PERIPHCLK IS_RCC_PERIPHCLOCK + +#define RCC_IT_HSI14 RCC_IT_HSI14RDY + +#define RCC_IT_CSSLSE RCC_IT_LSECSS +#define RCC_IT_CSSHSE RCC_IT_CSS + +#define RCC_PLLMUL_3 RCC_PLL_MUL3 +#define RCC_PLLMUL_4 RCC_PLL_MUL4 +#define RCC_PLLMUL_6 RCC_PLL_MUL6 +#define RCC_PLLMUL_8 RCC_PLL_MUL8 +#define RCC_PLLMUL_12 RCC_PLL_MUL12 +#define RCC_PLLMUL_16 RCC_PLL_MUL16 +#define RCC_PLLMUL_24 RCC_PLL_MUL24 +#define RCC_PLLMUL_32 RCC_PLL_MUL32 +#define RCC_PLLMUL_48 RCC_PLL_MUL48 + +#define RCC_PLLDIV_2 RCC_PLL_DIV2 +#define RCC_PLLDIV_3 RCC_PLL_DIV3 +#define RCC_PLLDIV_4 RCC_PLL_DIV4 + +#define IS_RCC_MCOSOURCE IS_RCC_MCO1SOURCE +#define __HAL_RCC_MCO_CONFIG __HAL_RCC_MCO1_CONFIG +#define RCC_MCO_NODIV RCC_MCODIV_1 +#define RCC_MCO_DIV1 RCC_MCODIV_1 +#define RCC_MCO_DIV2 RCC_MCODIV_2 +#define RCC_MCO_DIV4 RCC_MCODIV_4 +#define RCC_MCO_DIV8 RCC_MCODIV_8 +#define RCC_MCO_DIV16 RCC_MCODIV_16 +#define RCC_MCO_DIV32 RCC_MCODIV_32 +#define RCC_MCO_DIV64 RCC_MCODIV_64 +#define RCC_MCO_DIV128 RCC_MCODIV_128 +#define RCC_MCOSOURCE_NONE RCC_MCO1SOURCE_NOCLOCK +#define RCC_MCOSOURCE_LSI RCC_MCO1SOURCE_LSI +#define RCC_MCOSOURCE_LSE RCC_MCO1SOURCE_LSE +#define RCC_MCOSOURCE_SYSCLK RCC_MCO1SOURCE_SYSCLK +#define RCC_MCOSOURCE_HSI RCC_MCO1SOURCE_HSI +#define RCC_MCOSOURCE_HSI14 RCC_MCO1SOURCE_HSI14 +#define RCC_MCOSOURCE_HSI48 RCC_MCO1SOURCE_HSI48 +#define RCC_MCOSOURCE_HSE RCC_MCO1SOURCE_HSE +#define RCC_MCOSOURCE_PLLCLK_DIV1 RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_NODIV RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_DIV2 RCC_MCO1SOURCE_PLLCLK_DIV2 + +#if defined(STM32L4) +#define RCC_RTCCLKSOURCE_NO_CLK RCC_RTCCLKSOURCE_NONE +#elif defined(STM32WB) || defined(STM32G0) || defined(STM32G4) || defined(STM32L5) +#else +#define RCC_RTCCLKSOURCE_NONE RCC_RTCCLKSOURCE_NO_CLK +#endif + +#define RCC_USBCLK_PLLSAI1 RCC_USBCLKSOURCE_PLLSAI1 +#define RCC_USBCLK_PLL RCC_USBCLKSOURCE_PLL +#define RCC_USBCLK_MSI RCC_USBCLKSOURCE_MSI +#define RCC_USBCLKSOURCE_PLLCLK RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1 RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1_5 RCC_USBCLKSOURCE_PLL_DIV1_5 +#define RCC_USBPLLCLK_DIV2 RCC_USBCLKSOURCE_PLL_DIV2 +#define RCC_USBPLLCLK_DIV3 RCC_USBCLKSOURCE_PLL_DIV3 + +#define HSION_BitNumber RCC_HSION_BIT_NUMBER +#define HSION_BITNUMBER RCC_HSION_BIT_NUMBER +#define HSEON_BitNumber RCC_HSEON_BIT_NUMBER +#define HSEON_BITNUMBER RCC_HSEON_BIT_NUMBER +#define MSION_BITNUMBER RCC_MSION_BIT_NUMBER +#define CSSON_BitNumber RCC_CSSON_BIT_NUMBER +#define CSSON_BITNUMBER RCC_CSSON_BIT_NUMBER +#define PLLON_BitNumber RCC_PLLON_BIT_NUMBER +#define PLLON_BITNUMBER RCC_PLLON_BIT_NUMBER +#define PLLI2SON_BitNumber RCC_PLLI2SON_BIT_NUMBER +#define I2SSRC_BitNumber RCC_I2SSRC_BIT_NUMBER +#define RTCEN_BitNumber RCC_RTCEN_BIT_NUMBER +#define RTCEN_BITNUMBER RCC_RTCEN_BIT_NUMBER +#define BDRST_BitNumber RCC_BDRST_BIT_NUMBER +#define BDRST_BITNUMBER RCC_BDRST_BIT_NUMBER +#define RTCRST_BITNUMBER RCC_RTCRST_BIT_NUMBER +#define LSION_BitNumber RCC_LSION_BIT_NUMBER +#define LSION_BITNUMBER RCC_LSION_BIT_NUMBER +#define LSEON_BitNumber RCC_LSEON_BIT_NUMBER +#define LSEON_BITNUMBER RCC_LSEON_BIT_NUMBER +#define LSEBYP_BITNUMBER RCC_LSEBYP_BIT_NUMBER +#define PLLSAION_BitNumber RCC_PLLSAION_BIT_NUMBER +#define TIMPRE_BitNumber RCC_TIMPRE_BIT_NUMBER +#define RMVF_BitNumber RCC_RMVF_BIT_NUMBER +#define RMVF_BITNUMBER RCC_RMVF_BIT_NUMBER +#define RCC_CR2_HSI14TRIM_BitNumber RCC_HSI14TRIM_BIT_NUMBER +#define CR_BYTE2_ADDRESS RCC_CR_BYTE2_ADDRESS +#define CIR_BYTE1_ADDRESS RCC_CIR_BYTE1_ADDRESS +#define CIR_BYTE2_ADDRESS RCC_CIR_BYTE2_ADDRESS +#define BDCR_BYTE0_ADDRESS RCC_BDCR_BYTE0_ADDRESS +#define DBP_TIMEOUT_VALUE RCC_DBP_TIMEOUT_VALUE +#define LSE_TIMEOUT_VALUE RCC_LSE_TIMEOUT_VALUE + +#define CR_HSION_BB RCC_CR_HSION_BB +#define CR_CSSON_BB RCC_CR_CSSON_BB +#define CR_PLLON_BB RCC_CR_PLLON_BB +#define CR_PLLI2SON_BB RCC_CR_PLLI2SON_BB +#define CR_MSION_BB RCC_CR_MSION_BB +#define CSR_LSION_BB RCC_CSR_LSION_BB +#define CSR_LSEON_BB RCC_CSR_LSEON_BB +#define CSR_LSEBYP_BB RCC_CSR_LSEBYP_BB +#define CSR_RTCEN_BB RCC_CSR_RTCEN_BB +#define CSR_RTCRST_BB RCC_CSR_RTCRST_BB +#define CFGR_I2SSRC_BB RCC_CFGR_I2SSRC_BB +#define BDCR_RTCEN_BB RCC_BDCR_RTCEN_BB +#define BDCR_BDRST_BB RCC_BDCR_BDRST_BB +#define CR_HSEON_BB RCC_CR_HSEON_BB +#define CSR_RMVF_BB RCC_CSR_RMVF_BB +#define CR_PLLSAION_BB RCC_CR_PLLSAION_BB +#define DCKCFGR_TIMPRE_BB RCC_DCKCFGR_TIMPRE_BB + +#define __HAL_RCC_CRS_ENABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_ENABLE +#define __HAL_RCC_CRS_DISABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_DISABLE +#define __HAL_RCC_CRS_ENABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_ENABLE +#define __HAL_RCC_CRS_DISABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_DISABLE +#define __HAL_RCC_CRS_CALCULATE_RELOADVALUE __HAL_RCC_CRS_RELOADVALUE_CALCULATE + +#define __HAL_RCC_GET_IT_SOURCE __HAL_RCC_GET_IT + +#define RCC_CRS_SYNCWARM RCC_CRS_SYNCWARN +#define RCC_CRS_TRIMOV RCC_CRS_TRIMOVF + +#define RCC_PERIPHCLK_CK48 RCC_PERIPHCLK_CLK48 +#define RCC_CK48CLKSOURCE_PLLQ RCC_CLK48CLKSOURCE_PLLQ +#define RCC_CK48CLKSOURCE_PLLSAIP RCC_CLK48CLKSOURCE_PLLSAIP +#define RCC_CK48CLKSOURCE_PLLI2SQ RCC_CLK48CLKSOURCE_PLLI2SQ +#define IS_RCC_CK48CLKSOURCE IS_RCC_CLK48CLKSOURCE +#define RCC_SDIOCLKSOURCE_CK48 RCC_SDIOCLKSOURCE_CLK48 + +#define __HAL_RCC_DFSDM_CLK_ENABLE __HAL_RCC_DFSDM1_CLK_ENABLE +#define __HAL_RCC_DFSDM_CLK_DISABLE __HAL_RCC_DFSDM1_CLK_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_ENABLED __HAL_RCC_DFSDM1_IS_CLK_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_DISABLED __HAL_RCC_DFSDM1_IS_CLK_DISABLED +#define __HAL_RCC_DFSDM_FORCE_RESET __HAL_RCC_DFSDM1_FORCE_RESET +#define __HAL_RCC_DFSDM_RELEASE_RESET __HAL_RCC_DFSDM1_RELEASE_RESET +#define __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE +#define __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_ENABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_DISABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_DISABLED +#define DfsdmClockSelection Dfsdm1ClockSelection +#define RCC_PERIPHCLK_DFSDM RCC_PERIPHCLK_DFSDM1 +#define RCC_DFSDMCLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDMCLKSOURCE_SYSCLK RCC_DFSDM1CLKSOURCE_SYSCLK +#define __HAL_RCC_DFSDM_CONFIG __HAL_RCC_DFSDM1_CONFIG +#define __HAL_RCC_GET_DFSDM_SOURCE __HAL_RCC_GET_DFSDM1_SOURCE +#define RCC_DFSDM1CLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_SWPMI1CLKSOURCE_PCLK RCC_SWPMI1CLKSOURCE_PCLK1 +#define RCC_LPTIM1CLKSOURCE_PCLK RCC_LPTIM1CLKSOURCE_PCLK1 +#define RCC_LPTIM2CLKSOURCE_PCLK RCC_LPTIM2CLKSOURCE_PCLK1 + +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM1AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM1AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM2AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM2AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM1CLKSOURCE_APB2 RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDM2CLKSOURCE_APB2 RCC_DFSDM2CLKSOURCE_PCLK2 +#define RCC_FMPI2C1CLKSOURCE_APB RCC_FMPI2C1CLKSOURCE_PCLK1 + +/** + * @} + */ + +/** @defgroup HAL_RNG_Aliased_Macros HAL RNG Aliased Macros maintained for legacy purpose + * @{ + */ +#define HAL_RNG_ReadyCallback(__HANDLE__) HAL_RNG_ReadyDataCallback((__HANDLE__), uint32_t random32bit) + +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Macros HAL RTC Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32G0) || defined (STM32L5) || defined (STM32L412xx) || defined (STM32L422xx) || defined (STM32L4P5xx) || defined (STM32L4Q5xx) || defined (STM32G4) +#else +#define __HAL_RTC_CLEAR_FLAG __HAL_RTC_EXTI_CLEAR_FLAG +#endif +#define __HAL_RTC_DISABLE_IT __HAL_RTC_EXTI_DISABLE_IT +#define __HAL_RTC_ENABLE_IT __HAL_RTC_EXTI_ENABLE_IT + +#if defined (STM32F1) +#define __HAL_RTC_EXTI_CLEAR_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() + +#define __HAL_RTC_EXTI_ENABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_ENABLE_IT() + +#define __HAL_RTC_EXTI_DISABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_DISABLE_IT() + +#define __HAL_RTC_EXTI_GET_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GET_FLAG() + +#define __HAL_RTC_EXTI_GENERATE_SWIT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() +#else +#define __HAL_RTC_EXTI_CLEAR_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG())) +#define __HAL_RTC_EXTI_ENABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_ENABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT())) +#define __HAL_RTC_EXTI_DISABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_DISABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT())) +#define __HAL_RTC_EXTI_GET_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GET_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG())) +#define __HAL_RTC_EXTI_GENERATE_SWIT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT())) +#endif /* STM32F1 */ + +#define IS_ALARM IS_RTC_ALARM +#define IS_ALARM_MASK IS_RTC_ALARM_MASK +#define IS_TAMPER IS_RTC_TAMPER +#define IS_TAMPER_ERASE_MODE IS_RTC_TAMPER_ERASE_MODE +#define IS_TAMPER_FILTER IS_RTC_TAMPER_FILTER +#define IS_TAMPER_INTERRUPT IS_RTC_TAMPER_INTERRUPT +#define IS_TAMPER_MASKFLAG_STATE IS_RTC_TAMPER_MASKFLAG_STATE +#define IS_TAMPER_PRECHARGE_DURATION IS_RTC_TAMPER_PRECHARGE_DURATION +#define IS_TAMPER_PULLUP_STATE IS_RTC_TAMPER_PULLUP_STATE +#define IS_TAMPER_SAMPLING_FREQ IS_RTC_TAMPER_SAMPLING_FREQ +#define IS_TAMPER_TIMESTAMPONTAMPER_DETECTION IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION +#define IS_TAMPER_TRIGGER IS_RTC_TAMPER_TRIGGER +#define IS_WAKEUP_CLOCK IS_RTC_WAKEUP_CLOCK +#define IS_WAKEUP_COUNTER IS_RTC_WAKEUP_COUNTER + +#define __RTC_WRITEPROTECTION_ENABLE __HAL_RTC_WRITEPROTECTION_ENABLE +#define __RTC_WRITEPROTECTION_DISABLE __HAL_RTC_WRITEPROTECTION_DISABLE + +/** + * @} + */ + +/** @defgroup HAL_SD_Aliased_Macros HAL SD Aliased Macros maintained for legacy purpose + * @{ + */ + +#define SD_OCR_CID_CSD_OVERWRIETE SD_OCR_CID_CSD_OVERWRITE +#define SD_CMD_SD_APP_STAUS SD_CMD_SD_APP_STATUS + +#if defined(STM32F4) || defined(STM32F2) +#define SD_SDMMC_DISABLED SD_SDIO_DISABLED +#define SD_SDMMC_FUNCTION_BUSY SD_SDIO_FUNCTION_BUSY +#define SD_SDMMC_FUNCTION_FAILED SD_SDIO_FUNCTION_FAILED +#define SD_SDMMC_UNKNOWN_FUNCTION SD_SDIO_UNKNOWN_FUNCTION +#define SD_CMD_SDMMC_SEN_OP_COND SD_CMD_SDIO_SEN_OP_COND +#define SD_CMD_SDMMC_RW_DIRECT SD_CMD_SDIO_RW_DIRECT +#define SD_CMD_SDMMC_RW_EXTENDED SD_CMD_SDIO_RW_EXTENDED +#define __HAL_SD_SDMMC_ENABLE __HAL_SD_SDIO_ENABLE +#define __HAL_SD_SDMMC_DISABLE __HAL_SD_SDIO_DISABLE +#define __HAL_SD_SDMMC_DMA_ENABLE __HAL_SD_SDIO_DMA_ENABLE +#define __HAL_SD_SDMMC_DMA_DISABLE __HAL_SD_SDIO_DMA_DISABL +#define __HAL_SD_SDMMC_ENABLE_IT __HAL_SD_SDIO_ENABLE_IT +#define __HAL_SD_SDMMC_DISABLE_IT __HAL_SD_SDIO_DISABLE_IT +#define __HAL_SD_SDMMC_GET_FLAG __HAL_SD_SDIO_GET_FLAG +#define __HAL_SD_SDMMC_CLEAR_FLAG __HAL_SD_SDIO_CLEAR_FLAG +#define __HAL_SD_SDMMC_GET_IT __HAL_SD_SDIO_GET_IT +#define __HAL_SD_SDMMC_CLEAR_IT __HAL_SD_SDIO_CLEAR_IT +#define SDMMC_STATIC_FLAGS SDIO_STATIC_FLAGS +#define SDMMC_CMD0TIMEOUT SDIO_CMD0TIMEOUT +#define SD_SDMMC_SEND_IF_COND SD_SDIO_SEND_IF_COND +/* alias CMSIS */ +#define SDMMC1_IRQn SDIO_IRQn +#define SDMMC1_IRQHandler SDIO_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define SD_SDIO_DISABLED SD_SDMMC_DISABLED +#define SD_SDIO_FUNCTION_BUSY SD_SDMMC_FUNCTION_BUSY +#define SD_SDIO_FUNCTION_FAILED SD_SDMMC_FUNCTION_FAILED +#define SD_SDIO_UNKNOWN_FUNCTION SD_SDMMC_UNKNOWN_FUNCTION +#define SD_CMD_SDIO_SEN_OP_COND SD_CMD_SDMMC_SEN_OP_COND +#define SD_CMD_SDIO_RW_DIRECT SD_CMD_SDMMC_RW_DIRECT +#define SD_CMD_SDIO_RW_EXTENDED SD_CMD_SDMMC_RW_EXTENDED +#define __HAL_SD_SDIO_ENABLE __HAL_SD_SDMMC_ENABLE +#define __HAL_SD_SDIO_DISABLE __HAL_SD_SDMMC_DISABLE +#define __HAL_SD_SDIO_DMA_ENABLE __HAL_SD_SDMMC_DMA_ENABLE +#define __HAL_SD_SDIO_DMA_DISABL __HAL_SD_SDMMC_DMA_DISABLE +#define __HAL_SD_SDIO_ENABLE_IT __HAL_SD_SDMMC_ENABLE_IT +#define __HAL_SD_SDIO_DISABLE_IT __HAL_SD_SDMMC_DISABLE_IT +#define __HAL_SD_SDIO_GET_FLAG __HAL_SD_SDMMC_GET_FLAG +#define __HAL_SD_SDIO_CLEAR_FLAG __HAL_SD_SDMMC_CLEAR_FLAG +#define __HAL_SD_SDIO_GET_IT __HAL_SD_SDMMC_GET_IT +#define __HAL_SD_SDIO_CLEAR_IT __HAL_SD_SDMMC_CLEAR_IT +#define SDIO_STATIC_FLAGS SDMMC_STATIC_FLAGS +#define SDIO_CMD0TIMEOUT SDMMC_CMD0TIMEOUT +#define SD_SDIO_SEND_IF_COND SD_SDMMC_SEND_IF_COND +/* alias CMSIS for compatibilities */ +#define SDIO_IRQn SDMMC1_IRQn +#define SDIO_IRQHandler SDMMC1_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32F4) || defined(STM32F2) || defined(STM32L4) || defined(STM32H7) +#define HAL_SD_CardCIDTypedef HAL_SD_CardCIDTypeDef +#define HAL_SD_CardCSDTypedef HAL_SD_CardCSDTypeDef +#define HAL_SD_CardStatusTypedef HAL_SD_CardStatusTypeDef +#define HAL_SD_CardStateTypedef HAL_SD_CardStateTypeDef +#endif + +#if defined(STM32H7) || defined(STM32L5) +#define HAL_MMCEx_Read_DMADoubleBuffer0CpltCallback HAL_MMCEx_Read_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Read_DMADoubleBuffer1CpltCallback HAL_MMCEx_Read_DMADoubleBuf1CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer0CpltCallback HAL_MMCEx_Write_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer1CpltCallback HAL_MMCEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer0CpltCallback HAL_SDEx_Read_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer1CpltCallback HAL_SDEx_Read_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer0CpltCallback HAL_SDEx_Write_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer1CpltCallback HAL_SDEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SD_DriveTransciver_1_8V_Callback HAL_SD_DriveTransceiver_1_8V_Callback +#endif +/** + * @} + */ + +/** @defgroup HAL_SMARTCARD_Aliased_Macros HAL SMARTCARD Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __SMARTCARD_ENABLE_IT __HAL_SMARTCARD_ENABLE_IT +#define __SMARTCARD_DISABLE_IT __HAL_SMARTCARD_DISABLE_IT +#define __SMARTCARD_ENABLE __HAL_SMARTCARD_ENABLE +#define __SMARTCARD_DISABLE __HAL_SMARTCARD_DISABLE +#define __SMARTCARD_DMA_REQUEST_ENABLE __HAL_SMARTCARD_DMA_REQUEST_ENABLE +#define __SMARTCARD_DMA_REQUEST_DISABLE __HAL_SMARTCARD_DMA_REQUEST_DISABLE + +#define __HAL_SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE +#define __SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE + +#define IS_SMARTCARD_ONEBIT_SAMPLING IS_SMARTCARD_ONE_BIT_SAMPLE + +/** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Macros HAL SMBUS Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_SMBUS_RESET_CR1 SMBUS_RESET_CR1 +#define __HAL_SMBUS_RESET_CR2 SMBUS_RESET_CR2 +#define __HAL_SMBUS_GENERATE_START SMBUS_GENERATE_START +#define __HAL_SMBUS_GET_ADDR_MATCH SMBUS_GET_ADDR_MATCH +#define __HAL_SMBUS_GET_DIR SMBUS_GET_DIR +#define __HAL_SMBUS_GET_STOP_MODE SMBUS_GET_STOP_MODE +#define __HAL_SMBUS_GET_PEC_MODE SMBUS_GET_PEC_MODE +#define __HAL_SMBUS_GET_ALERT_ENABLED SMBUS_GET_ALERT_ENABLED +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Macros HAL SPI Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_SPI_1LINE_TX SPI_1LINE_TX +#define __HAL_SPI_1LINE_RX SPI_1LINE_RX +#define __HAL_SPI_RESET_CRC SPI_RESET_CRC + +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Macros HAL UART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __HAL_UART_MASK_COMPUTATION UART_MASK_COMPUTATION +#define __UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __UART_MASK_COMPUTATION UART_MASK_COMPUTATION + +#define IS_UART_WAKEUPMETHODE IS_UART_WAKEUPMETHOD + +#define IS_UART_ONEBIT_SAMPLE IS_UART_ONE_BIT_SAMPLE +#define IS_UART_ONEBIT_SAMPLING IS_UART_ONE_BIT_SAMPLE + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Macros HAL USART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __USART_ENABLE_IT __HAL_USART_ENABLE_IT +#define __USART_DISABLE_IT __HAL_USART_DISABLE_IT +#define __USART_ENABLE __HAL_USART_ENABLE +#define __USART_DISABLE __HAL_USART_DISABLE + +#define __HAL_USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE +#define __USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE + +/** + * @} + */ + +/** @defgroup HAL_USB_Aliased_Macros HAL USB Aliased Macros maintained for legacy purpose + * @{ + */ +#define USB_EXTI_LINE_WAKEUP USB_WAKEUP_EXTI_LINE + +#define USB_FS_EXTI_TRIGGER_RISING_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_EDGE +#define USB_FS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_FS_WAKEUP_EXTI_FALLING_EDGE +#define USB_FS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_FS_EXTI_LINE_WAKEUP USB_OTG_FS_WAKEUP_EXTI_LINE + +#define USB_HS_EXTI_TRIGGER_RISING_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_EDGE +#define USB_HS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_HS_WAKEUP_EXTI_FALLING_EDGE +#define USB_HS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_HS_EXTI_LINE_WAKEUP USB_OTG_HS_WAKEUP_EXTI_LINE + +#define __HAL_USB_EXTI_ENABLE_IT __HAL_USB_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_EXTI_DISABLE_IT __HAL_USB_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_EXTI_GET_FLAG __HAL_USB_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_EXTI_CLEAR_FLAG __HAL_USB_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_EXTI_SET_RISING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_EXTI_SET_FALLING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE + +#define __HAL_USB_FS_EXTI_ENABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_FS_EXTI_DISABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_FS_EXTI_GET_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_FS_EXTI_CLEAR_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_FS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_FS_EXTI_GENERATE_SWIT __HAL_USB_OTG_FS_WAKEUP_EXTI_GENERATE_SWIT + +#define __HAL_USB_HS_EXTI_ENABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_HS_EXTI_DISABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_HS_EXTI_GET_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_HS_EXTI_CLEAR_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_HS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_HS_EXTI_GENERATE_SWIT __HAL_USB_OTG_HS_WAKEUP_EXTI_GENERATE_SWIT + +#define HAL_PCD_ActiveRemoteWakeup HAL_PCD_ActivateRemoteWakeup +#define HAL_PCD_DeActiveRemoteWakeup HAL_PCD_DeActivateRemoteWakeup + +#define HAL_PCD_SetTxFiFo HAL_PCDEx_SetTxFiFo +#define HAL_PCD_SetRxFiFo HAL_PCDEx_SetRxFiFo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Macros HAL TIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_TIM_SetICPrescalerValue TIM_SET_ICPRESCALERVALUE +#define __HAL_TIM_ResetICPrescalerValue TIM_RESET_ICPRESCALERVALUE + +#define TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE +#define TIM_GET_CLEAR_IT __HAL_TIM_CLEAR_IT + +#define __HAL_TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE + +#define __HAL_TIM_DIRECTION_STATUS __HAL_TIM_IS_TIM_COUNTING_DOWN +#define __HAL_TIM_PRESCALER __HAL_TIM_SET_PRESCALER +#define __HAL_TIM_SetCounter __HAL_TIM_SET_COUNTER +#define __HAL_TIM_GetCounter __HAL_TIM_GET_COUNTER +#define __HAL_TIM_SetAutoreload __HAL_TIM_SET_AUTORELOAD +#define __HAL_TIM_GetAutoreload __HAL_TIM_GET_AUTORELOAD +#define __HAL_TIM_SetClockDivision __HAL_TIM_SET_CLOCKDIVISION +#define __HAL_TIM_GetClockDivision __HAL_TIM_GET_CLOCKDIVISION +#define __HAL_TIM_SetICPrescaler __HAL_TIM_SET_ICPRESCALER +#define __HAL_TIM_GetICPrescaler __HAL_TIM_GET_ICPRESCALER +#define __HAL_TIM_SetCompare __HAL_TIM_SET_COMPARE +#define __HAL_TIM_GetCompare __HAL_TIM_GET_COMPARE + +#define TIM_BREAKINPUTSOURCE_DFSDM TIM_BREAKINPUTSOURCE_DFSDM1 +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Macros HAL ETH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_ETH_EXTI_ENABLE_IT __HAL_ETH_WAKEUP_EXTI_ENABLE_IT +#define __HAL_ETH_EXTI_DISABLE_IT __HAL_ETH_WAKEUP_EXTI_DISABLE_IT +#define __HAL_ETH_EXTI_GET_FLAG __HAL_ETH_WAKEUP_EXTI_GET_FLAG +#define __HAL_ETH_EXTI_CLEAR_FLAG __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_ETH_EXTI_SET_RISING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLINGRISING_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLINGRISING_TRIGGER + +#define ETH_PROMISCIOUSMODE_ENABLE ETH_PROMISCUOUS_MODE_ENABLE +#define ETH_PROMISCIOUSMODE_DISABLE ETH_PROMISCUOUS_MODE_DISABLE +#define IS_ETH_PROMISCIOUS_MODE IS_ETH_PROMISCUOUS_MODE +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Macros HAL LTDC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_LTDC_LAYER LTDC_LAYER +#define __HAL_LTDC_RELOAD_CONFIG __HAL_LTDC_RELOAD_IMMEDIATE_CONFIG +/** + * @} + */ + +/** @defgroup HAL_SAI_Aliased_Macros HAL SAI Aliased Macros maintained for legacy purpose + * @{ + */ +#define SAI_OUTPUTDRIVE_DISABLED SAI_OUTPUTDRIVE_DISABLE +#define SAI_OUTPUTDRIVE_ENABLED SAI_OUTPUTDRIVE_ENABLE +#define SAI_MASTERDIVIDER_ENABLED SAI_MASTERDIVIDER_ENABLE +#define SAI_MASTERDIVIDER_DISABLED SAI_MASTERDIVIDER_DISABLE +#define SAI_STREOMODE SAI_STEREOMODE +#define SAI_FIFOStatus_Empty SAI_FIFOSTATUS_EMPTY +#define SAI_FIFOStatus_Less1QuarterFull SAI_FIFOSTATUS_LESS1QUARTERFULL +#define SAI_FIFOStatus_1QuarterFull SAI_FIFOSTATUS_1QUARTERFULL +#define SAI_FIFOStatus_HalfFull SAI_FIFOSTATUS_HALFFULL +#define SAI_FIFOStatus_3QuartersFull SAI_FIFOSTATUS_3QUARTERFULL +#define SAI_FIFOStatus_Full SAI_FIFOSTATUS_FULL +#define IS_SAI_BLOCK_MONO_STREO_MODE IS_SAI_BLOCK_MONO_STEREO_MODE +#define SAI_SYNCHRONOUS_EXT SAI_SYNCHRONOUS_EXT_SAI1 +#define SAI_SYNCEXT_IN_ENABLE SAI_SYNCEXT_OUTBLOCKA_ENABLE +/** + * @} + */ + +/** @defgroup HAL_SPDIFRX_Aliased_Macros HAL SPDIFRX Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32H7) +#define HAL_SPDIFRX_ReceiveControlFlow HAL_SPDIFRX_ReceiveCtrlFlow +#define HAL_SPDIFRX_ReceiveControlFlow_IT HAL_SPDIFRX_ReceiveCtrlFlow_IT +#define HAL_SPDIFRX_ReceiveControlFlow_DMA HAL_SPDIFRX_ReceiveCtrlFlow_DMA +#endif +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Functions HAL HRTIM Aliased Functions maintained for legacy purpose + * @{ + */ +#if defined (STM32H7) || defined (STM32G4) || defined (STM32F3) +#define HAL_HRTIM_WaveformCounterStart_IT HAL_HRTIM_WaveformCountStart_IT +#define HAL_HRTIM_WaveformCounterStart_DMA HAL_HRTIM_WaveformCountStart_DMA +#define HAL_HRTIM_WaveformCounterStart HAL_HRTIM_WaveformCountStart +#define HAL_HRTIM_WaveformCounterStop_IT HAL_HRTIM_WaveformCountStop_IT +#define HAL_HRTIM_WaveformCounterStop_DMA HAL_HRTIM_WaveformCountStop_DMA +#define HAL_HRTIM_WaveformCounterStop HAL_HRTIM_WaveformCountStop +#endif +/** + * @} + */ + +/** @defgroup HAL_QSPI_Aliased_Macros HAL QSPI Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32L4) || defined (STM32F4) || defined (STM32F7) +#define HAL_QPSI_TIMEOUT_DEFAULT_VALUE HAL_QSPI_TIMEOUT_DEFAULT_VALUE +#endif /* STM32L4 || STM32F4 || STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_PPP_Aliased_Macros HAL PPP Aliased Macros maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32_HAL_LEGACY */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h new file mode 100644 index 000000000..4df6fe312 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h @@ -0,0 +1,276 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal.h + * @author MCD Application Team + * @brief This file contains all the functions prototypes for the HAL + * module driver. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_H +#define __STM32F7xx_HAL_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_conf.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup HAL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Constants HAL Exported Constants + * @{ + */ + +/** @defgroup HAL_TICK_FREQ Tick Frequency + * @{ + */ +typedef enum +{ + HAL_TICK_FREQ_10HZ = 100U, + HAL_TICK_FREQ_100HZ = 10U, + HAL_TICK_FREQ_1KHZ = 1U, + HAL_TICK_FREQ_DEFAULT = HAL_TICK_FREQ_1KHZ +} HAL_TickFreqTypeDef; +/** + * @} + */ + +/** @defgroup SYSCFG_BootMode Boot Mode + * @{ + */ +#define SYSCFG_MEM_BOOT_ADD0 ((uint32_t)0x00000000U) +#define SYSCFG_MEM_BOOT_ADD1 SYSCFG_MEMRMP_MEM_BOOT +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup HAL_Exported_Macros HAL Exported Macros + * @{ + */ + +/** @brief Freeze/Unfreeze Peripherals in Debug mode + */ +#define __HAL_DBGMCU_FREEZE_TIM2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM3() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM4() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM5() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM6() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM7() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM12() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM13() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM14() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_FREEZE_LPTIM1() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_LPTIM1_STOP)) +#define __HAL_DBGMCU_FREEZE_RTC() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_FREEZE_WWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_IWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C4_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_CAN1() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_FREEZE_CAN2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM1() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM8() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM9() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM10() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM11() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + +#define __HAL_DBGMCU_UNFREEZE_TIM2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM3() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM4() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM5() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM6() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM7() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM12() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM13() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM14() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_UNFREEZE_LPTIM1() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_LPTIM1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_RTC() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_UNFREEZE_WWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_IWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C4_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_CAN1() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_CAN2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM1() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM8() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM9() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM10() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM11() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + + +/** @brief FMC (NOR/RAM) mapped at 0x60000000 and SDRAM mapped at 0xC0000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC() (SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_SWP_FMC)) + + +/** @brief FMC/SDRAM mapped at 0x60000000 (NOR/RAM) mapped at 0xC0000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_SWP_FMC);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_SWP_FMC_0);\ + }while(0); +/** + * @brief Return the memory boot mapping as configured by user. + * @retval The boot mode as configured by user. The returned value can be one + * of the following values: + * @arg @ref SYSCFG_MEM_BOOT_ADD0 + * @arg @ref SYSCFG_MEM_BOOT_ADD1 + */ +#define __HAL_SYSCFG_GET_BOOT_MODE() READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_BOOT) + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief SYSCFG Break Cortex-M7 Lockup lock. + * Enable and lock the connection of Cortex-M7 LOCKUP (Hardfault) output to TIM1/8 Break input. + * @note The selected configuration is locked and can be unlocked only by system reset. + */ +#define __HAL_SYSCFG_BREAK_LOCKUP_LOCK() SET_BIT(SYSCFG->CBR, SYSCFG_CBR_CLL) + +/** @brief SYSCFG Break PVD lock. + * Enable and lock the PVD connection to Timer1/8 Break input, as well as the PVDE and PLS[2:0] in the PWR_CR1 register. + * @note The selected configuration is locked and can be unlocked only by system reset. + */ +#define __HAL_SYSCFG_BREAK_PVD_LOCK() SET_BIT(SYSCFG->CBR, SYSCFG_CBR_PVDL) +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** @defgroup HAL_Private_Macros HAL Private Macros + * @{ + */ +#define IS_TICKFREQ(FREQ) (((FREQ) == HAL_TICK_FREQ_10HZ) || \ + ((FREQ) == HAL_TICK_FREQ_100HZ) || \ + ((FREQ) == HAL_TICK_FREQ_1KHZ)) +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Functions + * @{ + */ +/** @addtogroup HAL_Exported_Functions_Group1 + * @{ + */ +/* Initialization and Configuration functions ******************************/ +HAL_StatusTypeDef HAL_Init(void); +HAL_StatusTypeDef HAL_DeInit(void); +void HAL_MspInit(void); +void HAL_MspDeInit(void); +HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority); +/** + * @} + */ + + /* Exported variables ---------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Variables + * @{ + */ +extern __IO uint32_t uwTick; +extern uint32_t uwTickPrio; +extern HAL_TickFreqTypeDef uwTickFreq; +/** + * @} + */ + +/** @addtogroup HAL_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_IncTick(void); +void HAL_Delay(uint32_t Delay); +uint32_t HAL_GetTick(void); +uint32_t HAL_GetTickPrio(void); +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq); +HAL_TickFreqTypeDef HAL_GetTickFreq(void); +void HAL_SuspendTick(void); +void HAL_ResumeTick(void); +uint32_t HAL_GetHalVersion(void); +uint32_t HAL_GetREVID(void); +uint32_t HAL_GetDEVID(void); +uint32_t HAL_GetUIDw0(void); +uint32_t HAL_GetUIDw1(void); +uint32_t HAL_GetUIDw2(void); +void HAL_DBGMCU_EnableDBGSleepMode(void); +void HAL_DBGMCU_DisableDBGSleepMode(void); +void HAL_DBGMCU_EnableDBGStopMode(void); +void HAL_DBGMCU_DisableDBGStopMode(void); +void HAL_DBGMCU_EnableDBGStandbyMode(void); +void HAL_DBGMCU_DisableDBGStandbyMode(void); +void HAL_EnableCompensationCell(void); +void HAL_DisableCompensationCell(void); +void HAL_EnableFMCMemorySwapping(void); +void HAL_DisableFMCMemorySwapping(void); +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +void HAL_EnableMemorySwappingBank(void); +void HAL_DisableMemorySwappingBank(void); +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Variables HAL Private Variables + * @{ + */ +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Constants HAL Private Constants + * @{ + */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc.h new file mode 100644 index 000000000..2d882c025 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc.h @@ -0,0 +1,959 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_adc.h + * @author MCD Application Team + * @brief Header file of ADC HAL extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_ADC_H +#define STM32F7xx_ADC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup ADC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup ADC_Exported_Types ADC Exported Types + * @{ + */ + +/** + * @brief Structure definition of ADC and regular group initialization + * @note Parameters of this structure are shared within 2 scopes: + * - Scope entire ADC (affects regular and injected groups): ClockPrescaler, Resolution, ScanConvMode, DataAlign, ScanConvMode, EOCSelection, LowPowerAutoWait, LowPowerAutoPowerOff, ChannelsBank. + * - Scope regular group: ContinuousConvMode, NbrOfConversion, DiscontinuousConvMode, NbrOfDiscConversion, ExternalTrigConvEdge, ExternalTrigConv. + * @note The setting of these parameters with function HAL_ADC_Init() is conditioned to ADC state. + * ADC state can be either: + * - For all parameters: ADC disabled + * - For all parameters except 'Resolution', 'ScanConvMode', 'DiscontinuousConvMode', 'NbrOfDiscConversion' : ADC enabled without conversion on going on regular group. + * - For parameters 'ExternalTrigConv' and 'ExternalTrigConvEdge': ADC enabled, even with conversion on going. + * If ADC is not in the appropriate state to modify some parameters, these parameters setting is bypassed + * without error reporting (as it can be the expected behaviour in case of intended action to update another parameter (which fullfills the ADC state condition) on the fly). + */ +typedef struct +{ + uint32_t ClockPrescaler; /*!< Select ADC clock prescaler. The clock is common for + all the ADCs. + This parameter can be a value of @ref ADC_ClockPrescaler */ + uint32_t Resolution; /*!< Configures the ADC resolution. + This parameter can be a value of @ref ADC_Resolution */ + uint32_t DataAlign; /*!< Specifies ADC data alignment to right (MSB on register bit 11 and LSB on register bit 0) (default setting) + or to left (if regular group: MSB on register bit 15 and LSB on register bit 4, if injected group (MSB kept as signed value due to potential negative value after offset application): MSB on register bit 14 and LSB on register bit 3). + This parameter can be a value of @ref ADC_Data_Align */ + uint32_t ScanConvMode; /*!< Configures the sequencer of regular and injected groups. + This parameter can be associated to parameter 'DiscontinuousConvMode' to have main sequence subdivided in successive parts. + If disabled: Conversion is performed in single mode (one channel converted, the one defined in rank 1). + Parameters 'NbrOfConversion' and 'InjectedNbrOfConversion' are discarded (equivalent to set to 1). + If enabled: Conversions are performed in sequence mode (multiple ranks defined by 'NbrOfConversion'/'InjectedNbrOfConversion' and each channel rank). + Scan direction is upward: from rank1 to rank 'n'. + This parameter can be a value of @ref ADC_Scan_mode. + This parameter can be set to ENABLE or DISABLE */ + uint32_t EOCSelection; /*!< Specifies what EOC (End Of Conversion) flag is used for conversion by polling and interruption: end of conversion of each rank or complete sequence. + This parameter can be a value of @ref ADC_EOCSelection. + Note: For injected group, end of conversion (flag&IT) is raised only at the end of the sequence. + Therefore, if end of conversion is set to end of each conversion, injected group should not be used with interruption (HAL_ADCEx_InjectedStart_IT) + or polling (HAL_ADCEx_InjectedStart and HAL_ADCEx_InjectedPollForConversion). By the way, polling is still possible since driver will use an estimated timing for end of injected conversion. + Note: If overrun feature is intended to be used, use ADC in mode 'interruption' (function HAL_ADC_Start_IT() ) with parameter EOCSelection set to end of each conversion or in mode 'transfer by DMA' (function HAL_ADC_Start_DMA()). + If overrun feature is intended to be bypassed, use ADC in mode 'polling' or 'interruption' with parameter EOCSelection must be set to end of sequence */ + uint32_t ContinuousConvMode; /*!< Specifies whether the conversion is performed in single mode (one conversion) or continuous mode for regular group, + after the selected trigger occurred (software start or external trigger). + This parameter can be set to ENABLE or DISABLE. */ + uint32_t NbrOfConversion; /*!< Specifies the number of ranks that will be converted within the regular group sequencer. + To use regular group sequencer and convert several ranks, parameter 'ScanConvMode' must be enabled. + This parameter must be a number between Min_Data = 1 and Max_Data = 16. */ + FunctionalState DiscontinuousConvMode; /*!< Specifies whether the conversions sequence of regular group is performed in Complete-sequence/Discontinuous-sequence (main sequence subdivided in successive parts). + Discontinuous mode is used only if sequencer is enabled (parameter 'ScanConvMode'). If sequencer is disabled, this parameter is discarded. + Discontinuous mode can be enabled only if continuous mode is disabled. If continuous mode is enabled, this parameter setting is discarded. + This parameter can be set to ENABLE or DISABLE. */ + uint32_t NbrOfDiscConversion; /*!< Specifies the number of discontinuous conversions in which the main sequence of regular group (parameter NbrOfConversion) will be subdivided. + If parameter 'DiscontinuousConvMode' is disabled, this parameter is discarded. + This parameter must be a number between Min_Data = 1 and Max_Data = 8. */ + uint32_t ExternalTrigConv; /*!< Selects the external event used to trigger the conversion start of regular group. + If set to ADC_SOFTWARE_START, external triggers are disabled. + If set to external trigger source, triggering is on event rising edge by default. + This parameter can be a value of @ref ADC_External_trigger_Source_Regular */ + uint32_t ExternalTrigConvEdge; /*!< Selects the external trigger edge of regular group. + If trigger is set to ADC_SOFTWARE_START, this parameter is discarded. + This parameter can be a value of @ref ADC_External_trigger_edge_Regular */ + FunctionalState DMAContinuousRequests; /*!< Specifies whether the DMA requests are performed in one shot mode (DMA transfer stop when number of conversions is reached) + or in Continuous mode (DMA transfer unlimited, whatever number of conversions). + Note: In continuous mode, DMA must be configured in circular mode. Otherwise an overrun will be triggered when DMA buffer maximum pointer is reached. + Note: This parameter must be modified when no conversion is on going on both regular and injected groups (ADC disabled, or ADC enabled without continuous mode or external trigger that could launch a conversion). + This parameter can be set to ENABLE or DISABLE. */ +}ADC_InitTypeDef; + + + +/** + * @brief Structure definition of ADC channel for regular group + * @note The setting of these parameters with function HAL_ADC_ConfigChannel() is conditioned to ADC state. + * ADC can be either disabled or enabled without conversion on going on regular group. + */ +typedef struct +{ + uint32_t Channel; /*!< Specifies the channel to configure into ADC regular group. + This parameter can be a value of @ref ADC_channels */ + uint32_t Rank; /*!< Specifies the rank in the regular group sequencer. + This parameter must be a number between Min_Data = 1 and Max_Data = 16 + This parameter can be a value of @ref ADC_regular_rank */ + uint32_t SamplingTime; /*!< Sampling time value to be set for the selected channel. + Unit: ADC clock cycles + Conversion time is the addition of sampling time and processing time (12 ADC clock cycles at ADC resolution 12 bits, 11 cycles at 10 bits, 9 cycles at 8 bits, 7 cycles at 6 bits). + This parameter can be a value of @ref ADC_sampling_times + Caution: This parameter updates the parameter property of the channel, that can be used into regular and/or injected groups. + If this same channel has been previously configured in the other group (regular/injected), it will be updated to last setting. + Note: In case of usage of internal measurement channels (VrefInt/Vbat/TempSensor), + sampling time constraints must be respected (sampling time can be adjusted in function of ADC clock frequency and sampling time setting) + Refer to device datasheet for timings values, parameters TS_vrefint, TS_temp (values rough order: 4us min). */ + uint32_t Offset; /*!< Reserved for future use, can be set to 0 */ +}ADC_ChannelConfTypeDef; + +/** + * @brief ADC Configuration multi-mode structure definition + */ +typedef struct +{ + uint32_t WatchdogMode; /*!< Configures the ADC analog watchdog mode. + This parameter can be a value of @ref ADC_analog_watchdog_selection */ + uint32_t HighThreshold; /*!< Configures the ADC analog watchdog High threshold value. + This parameter must be a 12-bit value. */ + uint32_t LowThreshold; /*!< Configures the ADC analog watchdog High threshold value. + This parameter must be a 12-bit value. */ + uint32_t Channel; /*!< Configures ADC channel for the analog watchdog. + This parameter has an effect only if watchdog mode is configured on single channel + This parameter can be a value of @ref ADC_channels */ + FunctionalState ITMode; /*!< Specifies whether the analog watchdog is configured + is interrupt mode or in polling mode. + This parameter can be set to ENABLE or DISABLE */ + uint32_t WatchdogNumber; /*!< Reserved for future use, can be set to 0 */ +}ADC_AnalogWDGConfTypeDef; + +/** + * @brief HAL ADC state machine: ADC states definition (bitfields) + */ +/* States of ADC global scope */ +#define HAL_ADC_STATE_RESET ((uint32_t)0x00000000U) /*!< ADC not yet initialized or disabled */ +#define HAL_ADC_STATE_READY ((uint32_t)0x00000001U) /*!< ADC peripheral ready for use */ +#define HAL_ADC_STATE_BUSY_INTERNAL ((uint32_t)0x00000002U) /*!< ADC is busy to internal process (initialization, calibration) */ +#define HAL_ADC_STATE_TIMEOUT ((uint32_t)0x00000004U) /*!< TimeOut occurrence */ + +/* States of ADC errors */ +#define HAL_ADC_STATE_ERROR_INTERNAL ((uint32_t)0x00000010U) /*!< Internal error occurrence */ +#define HAL_ADC_STATE_ERROR_CONFIG ((uint32_t)0x00000020U) /*!< Configuration error occurrence */ +#define HAL_ADC_STATE_ERROR_DMA ((uint32_t)0x00000040U) /*!< DMA error occurrence */ + +/* States of ADC group regular */ +#define HAL_ADC_STATE_REG_BUSY ((uint32_t)0x00000100U) /*!< A conversion on group regular is ongoing or can occur (either by continuous mode, + external trigger, low power auto power-on (if feature available), multimode ADC master control (if feature available)) */ +#define HAL_ADC_STATE_REG_EOC ((uint32_t)0x00000200U) /*!< Conversion data available on group regular */ +#define HAL_ADC_STATE_REG_OVR ((uint32_t)0x00000400U) /*!< Overrun occurrence */ + +/* States of ADC group injected */ +#define HAL_ADC_STATE_INJ_BUSY ((uint32_t)0x00001000U) /*!< A conversion on group injected is ongoing or can occur (either by auto-injection mode, + external trigger, low power auto power-on (if feature available), multimode ADC master control (if feature available)) */ +#define HAL_ADC_STATE_INJ_EOC ((uint32_t)0x00002000U) /*!< Conversion data available on group injected */ + +/* States of ADC analog watchdogs */ +#define HAL_ADC_STATE_AWD1 ((uint32_t)0x00010000U) /*!< Out-of-window occurrence of analog watchdog 1 */ +#define HAL_ADC_STATE_AWD2 ((uint32_t)0x00020000U) /*!< Not available on STM32F7 device: Out-of-window occurrence of analog watchdog 2 */ +#define HAL_ADC_STATE_AWD3 ((uint32_t)0x00040000U) /*!< Not available on STM32F7 device: Out-of-window occurrence of analog watchdog 3 */ + +/* States of ADC multi-mode */ +#define HAL_ADC_STATE_MULTIMODE_SLAVE ((uint32_t)0x00100000U) /*!< Not available on STM32F7 device: ADC in multimode slave state, controlled by another ADC master ( */ + + +/** + * @brief ADC handle Structure definition + */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +typedef struct __ADC_HandleTypeDef +#else +typedef struct +#endif +{ + ADC_TypeDef *Instance; /*!< Register base address */ + + ADC_InitTypeDef Init; /*!< ADC required parameters */ + + __IO uint32_t NbrOfCurrentConversionRank; /*!< ADC number of current conversion rank */ + + DMA_HandleTypeDef *DMA_Handle; /*!< Pointer DMA Handler */ + + HAL_LockTypeDef Lock; /*!< ADC locking object */ + + __IO uint32_t State; /*!< ADC communication state */ + + __IO uint32_t ErrorCode; /*!< ADC Error code */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + void (* ConvCpltCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC conversion complete callback */ + void (* ConvHalfCpltCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC conversion DMA half-transfer callback */ + void (* LevelOutOfWindowCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC analog watchdog 1 callback */ + void (* ErrorCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC error callback */ + void (* InjectedConvCpltCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC group injected conversion complete callback */ + void (* MspInitCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC Msp Init callback */ + void (* MspDeInitCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC Msp DeInit callback */ +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ +}ADC_HandleTypeDef; + + +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +/** + * @brief HAL ADC Callback ID enumeration definition + */ +typedef enum +{ + HAL_ADC_CONVERSION_COMPLETE_CB_ID = 0x00U, /*!< ADC conversion complete callback ID */ + HAL_ADC_CONVERSION_HALF_CB_ID = 0x01U, /*!< ADC conversion DMA half-transfer callback ID */ + HAL_ADC_LEVEL_OUT_OF_WINDOW_1_CB_ID = 0x02U, /*!< ADC analog watchdog 1 callback ID */ + HAL_ADC_ERROR_CB_ID = 0x03U, /*!< ADC error callback ID */ + HAL_ADC_INJ_CONVERSION_COMPLETE_CB_ID = 0x04U, /*!< ADC group injected conversion complete callback ID */ + HAL_ADC_MSPINIT_CB_ID = 0x05U, /*!< ADC Msp Init callback ID */ + HAL_ADC_MSPDEINIT_CB_ID = 0x06U /*!< ADC Msp DeInit callback ID */ +} HAL_ADC_CallbackIDTypeDef; + +/** + * @brief HAL ADC Callback pointer definition + */ +typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to a ADC callback function */ + +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup ADC_Exported_Constants ADC Exported Constants + * @{ + */ + +/** @defgroup ADC_Error_Code ADC Error Code + * @{ + */ +#define HAL_ADC_ERROR_NONE ((uint32_t)0x00U) /*!< No error */ +#define HAL_ADC_ERROR_INTERNAL ((uint32_t)0x01U) /*!< ADC IP internal error: if problem of clocking, + enable/disable, erroneous state */ +#define HAL_ADC_ERROR_OVR ((uint32_t)0x02U) /*!< Overrun error */ +#define HAL_ADC_ERROR_DMA ((uint32_t)0x04U) /*!< DMA transfer error */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +#define HAL_ADC_ERROR_INVALID_CALLBACK (0x10U) /*!< Invalid Callback error */ +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ +/** + * @} + */ + + +/** @defgroup ADC_ClockPrescaler ADC Clock Prescaler + * @{ + */ +#define ADC_CLOCK_SYNC_PCLK_DIV2 ((uint32_t)0x00000000U) +#define ADC_CLOCK_SYNC_PCLK_DIV4 ((uint32_t)ADC_CCR_ADCPRE_0) +#define ADC_CLOCK_SYNC_PCLK_DIV6 ((uint32_t)ADC_CCR_ADCPRE_1) +#define ADC_CLOCK_SYNC_PCLK_DIV8 ((uint32_t)ADC_CCR_ADCPRE) +/** + * @} + */ + +/** @defgroup ADC_delay_between_2_sampling_phases ADC Delay Between 2 Sampling Phases + * @{ + */ +#define ADC_TWOSAMPLINGDELAY_5CYCLES ((uint32_t)0x00000000U) +#define ADC_TWOSAMPLINGDELAY_6CYCLES ((uint32_t)ADC_CCR_DELAY_0) +#define ADC_TWOSAMPLINGDELAY_7CYCLES ((uint32_t)ADC_CCR_DELAY_1) +#define ADC_TWOSAMPLINGDELAY_8CYCLES ((uint32_t)(ADC_CCR_DELAY_1 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_9CYCLES ((uint32_t)ADC_CCR_DELAY_2) +#define ADC_TWOSAMPLINGDELAY_10CYCLES ((uint32_t)(ADC_CCR_DELAY_2 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_11CYCLES ((uint32_t)(ADC_CCR_DELAY_2 | ADC_CCR_DELAY_1)) +#define ADC_TWOSAMPLINGDELAY_12CYCLES ((uint32_t)(ADC_CCR_DELAY_2 | ADC_CCR_DELAY_1 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_13CYCLES ((uint32_t)ADC_CCR_DELAY_3) +#define ADC_TWOSAMPLINGDELAY_14CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_15CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_1)) +#define ADC_TWOSAMPLINGDELAY_16CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_1 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_17CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_2)) +#define ADC_TWOSAMPLINGDELAY_18CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_2 | ADC_CCR_DELAY_0)) +#define ADC_TWOSAMPLINGDELAY_19CYCLES ((uint32_t)(ADC_CCR_DELAY_3 | ADC_CCR_DELAY_2 | ADC_CCR_DELAY_1)) +#define ADC_TWOSAMPLINGDELAY_20CYCLES ((uint32_t)ADC_CCR_DELAY) +/** + * @} + */ + +/** @defgroup ADC_Resolution ADC Resolution + * @{ + */ +#define ADC_RESOLUTION_12B ((uint32_t)0x00000000U) +#define ADC_RESOLUTION_10B ((uint32_t)ADC_CR1_RES_0) +#define ADC_RESOLUTION_8B ((uint32_t)ADC_CR1_RES_1) +#define ADC_RESOLUTION_6B ((uint32_t)ADC_CR1_RES) +/** + * @} + */ + +/** @defgroup ADC_External_trigger_edge_Regular ADC External Trigger Edge Regular + * @{ + */ +#define ADC_EXTERNALTRIGCONVEDGE_NONE ((uint32_t)0x00000000U) +#define ADC_EXTERNALTRIGCONVEDGE_RISING ((uint32_t)ADC_CR2_EXTEN_0) +#define ADC_EXTERNALTRIGCONVEDGE_FALLING ((uint32_t)ADC_CR2_EXTEN_1) +#define ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING ((uint32_t)ADC_CR2_EXTEN) +/** + * @} + */ + +/** @defgroup ADC_External_trigger_Source_Regular ADC External Trigger Source Regular + * @{ + */ +/* Note: Parameter ADC_SOFTWARE_START is a software parameter used for */ +/* compatibility with other STM32 devices. */ + + +#define ADC_EXTERNALTRIGCONV_T1_CC1 ((uint32_t)0x00000000U) +#define ADC_EXTERNALTRIGCONV_T1_CC2 ((uint32_t)ADC_CR2_EXTSEL_0) +#define ADC_EXTERNALTRIGCONV_T1_CC3 ((uint32_t)ADC_CR2_EXTSEL_1) +#define ADC_EXTERNALTRIGCONV_T2_CC2 ((uint32_t)(ADC_CR2_EXTSEL_1 | ADC_CR2_EXTSEL_0)) +#define ADC_EXTERNALTRIGCONV_T5_TRGO ((uint32_t)ADC_CR2_EXTSEL_2) +#define ADC_EXTERNALTRIGCONV_T4_CC4 ((uint32_t)(ADC_CR2_EXTSEL_2 | ADC_CR2_EXTSEL_0)) +#define ADC_EXTERNALTRIGCONV_T3_CC4 ((uint32_t)(ADC_CR2_EXTSEL_2 | ADC_CR2_EXTSEL_1)) +#define ADC_EXTERNALTRIGCONV_T8_TRGO ((uint32_t)(ADC_CR2_EXTSEL_2 | ADC_CR2_EXTSEL_1 | ADC_CR2_EXTSEL_0)) +#define ADC_EXTERNALTRIGCONV_T8_TRGO2 ((uint32_t)ADC_CR2_EXTSEL_3) +#define ADC_EXTERNALTRIGCONV_T1_TRGO ((uint32_t)(ADC_CR2_EXTSEL_3 | ADC_CR2_EXTSEL_0)) +#define ADC_EXTERNALTRIGCONV_T1_TRGO2 ((uint32_t)(ADC_CR2_EXTSEL_3 | ADC_CR2_EXTSEL_1)) +#define ADC_EXTERNALTRIGCONV_T2_TRGO ((uint32_t)(ADC_CR2_EXTSEL_3 | ADC_CR2_EXTSEL_1 | ADC_CR2_EXTSEL_0)) +#define ADC_EXTERNALTRIGCONV_T4_TRGO ((uint32_t)(ADC_CR2_EXTSEL_3 | ADC_CR2_EXTSEL_2)) +#define ADC_EXTERNALTRIGCONV_T6_TRGO ((uint32_t)(ADC_CR2_EXTSEL_3 | ADC_CR2_EXTSEL_2 | ADC_CR2_EXTSEL_0)) + +#define ADC_EXTERNALTRIGCONV_EXT_IT11 ((uint32_t)ADC_CR2_EXTSEL) +#define ADC_SOFTWARE_START ((uint32_t)ADC_CR2_EXTSEL + 1) + +/** + * @} + */ + +/** @defgroup ADC_Data_Align ADC Data Align + * @{ + */ +#define ADC_DATAALIGN_RIGHT ((uint32_t)0x00000000U) +#define ADC_DATAALIGN_LEFT ((uint32_t)ADC_CR2_ALIGN) +/** + * @} + */ + +/** @defgroup ADC_Scan_mode ADC sequencer scan mode + * @{ + */ +#define ADC_SCAN_DISABLE ((uint32_t)0x00000000) /*!< Scan mode disabled */ +#define ADC_SCAN_ENABLE ((uint32_t)0x00000001) /*!< Scan mode enabled */ +/** + * @} + */ + +/** @defgroup ADC_regular_rank ADC group regular sequencer rank + * @{ + */ +#define ADC_REGULAR_RANK_1 ((uint32_t)0x00000001) /*!< ADC regular conversion rank 1 */ +#define ADC_REGULAR_RANK_2 ((uint32_t)0x00000002) /*!< ADC regular conversion rank 2 */ +#define ADC_REGULAR_RANK_3 ((uint32_t)0x00000003) /*!< ADC regular conversion rank 3 */ +#define ADC_REGULAR_RANK_4 ((uint32_t)0x00000004) /*!< ADC regular conversion rank 4 */ +#define ADC_REGULAR_RANK_5 ((uint32_t)0x00000005) /*!< ADC regular conversion rank 5 */ +#define ADC_REGULAR_RANK_6 ((uint32_t)0x00000006) /*!< ADC regular conversion rank 6 */ +#define ADC_REGULAR_RANK_7 ((uint32_t)0x00000007) /*!< ADC regular conversion rank 7 */ +#define ADC_REGULAR_RANK_8 ((uint32_t)0x00000008) /*!< ADC regular conversion rank 8 */ +#define ADC_REGULAR_RANK_9 ((uint32_t)0x00000009) /*!< ADC regular conversion rank 9 */ +#define ADC_REGULAR_RANK_10 ((uint32_t)0x0000000A) /*!< ADC regular conversion rank 10 */ +#define ADC_REGULAR_RANK_11 ((uint32_t)0x0000000B) /*!< ADC regular conversion rank 11 */ +#define ADC_REGULAR_RANK_12 ((uint32_t)0x0000000C) /*!< ADC regular conversion rank 12 */ +#define ADC_REGULAR_RANK_13 ((uint32_t)0x0000000D) /*!< ADC regular conversion rank 13 */ +#define ADC_REGULAR_RANK_14 ((uint32_t)0x0000000E) /*!< ADC regular conversion rank 14 */ +#define ADC_REGULAR_RANK_15 ((uint32_t)0x0000000F) /*!< ADC regular conversion rank 15 */ +#define ADC_REGULAR_RANK_16 ((uint32_t)0x00000010) /*!< ADC regular conversion rank 16 */ +/** + * @} + */ + +/** @defgroup ADC_channels ADC Common Channels + * @{ + */ +#define ADC_CHANNEL_0 ((uint32_t)0x00000000U) +#define ADC_CHANNEL_1 ((uint32_t)ADC_CR1_AWDCH_0) +#define ADC_CHANNEL_2 ((uint32_t)ADC_CR1_AWDCH_1) +#define ADC_CHANNEL_3 ((uint32_t)(ADC_CR1_AWDCH_1 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_4 ((uint32_t)ADC_CR1_AWDCH_2) +#define ADC_CHANNEL_5 ((uint32_t)(ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_6 ((uint32_t)(ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_1)) +#define ADC_CHANNEL_7 ((uint32_t)(ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_1 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_8 ((uint32_t)ADC_CR1_AWDCH_3) +#define ADC_CHANNEL_9 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_10 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_1)) +#define ADC_CHANNEL_11 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_1 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_12 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_2)) +#define ADC_CHANNEL_13 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_14 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_1)) +#define ADC_CHANNEL_15 ((uint32_t)(ADC_CR1_AWDCH_3 | ADC_CR1_AWDCH_2 | ADC_CR1_AWDCH_1 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_16 ((uint32_t)ADC_CR1_AWDCH_4) +#define ADC_CHANNEL_17 ((uint32_t)(ADC_CR1_AWDCH_4 | ADC_CR1_AWDCH_0)) +#define ADC_CHANNEL_18 ((uint32_t)(ADC_CR1_AWDCH_4 | ADC_CR1_AWDCH_1)) + +#define ADC_INTERNAL_NONE 0x80000000U +#define ADC_CHANNEL_VREFINT ((uint32_t)ADC_CHANNEL_17) +#define ADC_CHANNEL_VBAT ((uint32_t)ADC_CHANNEL_18) +#define ADC_CHANNEL_TEMPSENSOR ((uint32_t)(ADC_CHANNEL_18 | 0x10000000U)) +/** + * @} + */ + +/** @defgroup ADC_sampling_times ADC Sampling Times + * @{ + */ +#define ADC_SAMPLETIME_3CYCLES ((uint32_t)0x00000000U) +#define ADC_SAMPLETIME_15CYCLES ((uint32_t)ADC_SMPR1_SMP10_0) +#define ADC_SAMPLETIME_28CYCLES ((uint32_t)ADC_SMPR1_SMP10_1) +#define ADC_SAMPLETIME_56CYCLES ((uint32_t)(ADC_SMPR1_SMP10_1 | ADC_SMPR1_SMP10_0)) +#define ADC_SAMPLETIME_84CYCLES ((uint32_t)ADC_SMPR1_SMP10_2) +#define ADC_SAMPLETIME_112CYCLES ((uint32_t)(ADC_SMPR1_SMP10_2 | ADC_SMPR1_SMP10_0)) +#define ADC_SAMPLETIME_144CYCLES ((uint32_t)(ADC_SMPR1_SMP10_2 | ADC_SMPR1_SMP10_1)) +#define ADC_SAMPLETIME_480CYCLES ((uint32_t)ADC_SMPR1_SMP10) +/** + * @} + */ + + /** @defgroup ADC_EOCSelection ADC EOC Selection + * @{ + */ +#define ADC_EOC_SEQ_CONV ((uint32_t)0x00000000U) +#define ADC_EOC_SINGLE_CONV ((uint32_t)0x00000001U) +#define ADC_EOC_SINGLE_SEQ_CONV ((uint32_t)0x00000002U) /*!< reserved for future use */ +/** + * @} + */ + +/** @defgroup ADC_Event_type ADC Event Type + * @{ + */ +#define ADC_AWD_EVENT ((uint32_t)ADC_FLAG_AWD) +#define ADC_OVR_EVENT ((uint32_t)ADC_FLAG_OVR) +/** + * @} + */ + +/** @defgroup ADC_analog_watchdog_selection ADC Analog Watchdog Selection + * @{ + */ +#define ADC_ANALOGWATCHDOG_SINGLE_REG ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_AWDEN)) +#define ADC_ANALOGWATCHDOG_SINGLE_INJEC ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_JAWDEN)) +#define ADC_ANALOGWATCHDOG_SINGLE_REGINJEC ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_AWDEN | ADC_CR1_JAWDEN)) +#define ADC_ANALOGWATCHDOG_ALL_REG ((uint32_t)ADC_CR1_AWDEN) +#define ADC_ANALOGWATCHDOG_ALL_INJEC ((uint32_t)ADC_CR1_JAWDEN) +#define ADC_ANALOGWATCHDOG_ALL_REGINJEC ((uint32_t)(ADC_CR1_AWDEN | ADC_CR1_JAWDEN)) +#define ADC_ANALOGWATCHDOG_NONE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ADC_interrupts_definition ADC Interrupts Definition + * @{ + */ +#define ADC_IT_EOC ((uint32_t)ADC_CR1_EOCIE) +#define ADC_IT_AWD ((uint32_t)ADC_CR1_AWDIE) +#define ADC_IT_JEOC ((uint32_t)ADC_CR1_JEOCIE) +#define ADC_IT_OVR ((uint32_t)ADC_CR1_OVRIE) +/** + * @} + */ + +/** @defgroup ADC_flags_definition ADC Flags Definition + * @{ + */ +#define ADC_FLAG_AWD ((uint32_t)ADC_SR_AWD) +#define ADC_FLAG_EOC ((uint32_t)ADC_SR_EOC) +#define ADC_FLAG_JEOC ((uint32_t)ADC_SR_JEOC) +#define ADC_FLAG_JSTRT ((uint32_t)ADC_SR_JSTRT) +#define ADC_FLAG_STRT ((uint32_t)ADC_SR_STRT) +#define ADC_FLAG_OVR ((uint32_t)ADC_SR_OVR) +/** + * @} + */ + +/** @defgroup ADC_channels_type ADC Channels Type + * @{ + */ +#define ADC_ALL_CHANNELS ((uint32_t)0x00000001U) +#define ADC_REGULAR_CHANNELS ((uint32_t)0x00000002U) /*!< reserved for future use */ +#define ADC_INJECTED_CHANNELS ((uint32_t)0x00000003U) /*!< reserved for future use */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup ADC_Exported_Macros ADC Exported Macros + * @{ + */ + +/** @brief Reset ADC handle state + * @param __HANDLE__ ADC handle + * @retval None + */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +#define __HAL_ADC_RESET_HANDLE_STATE(__HANDLE__) \ + do{ \ + (__HANDLE__)->State = HAL_ADC_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_ADC_RESET_HANDLE_STATE(__HANDLE__) \ + ((__HANDLE__)->State = HAL_ADC_STATE_RESET) +#endif + +/** + * @brief Enable the ADC peripheral. + * @param __HANDLE__ ADC handle + * @retval None + */ +#define __HAL_ADC_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR2 |= ADC_CR2_ADON) + +/** + * @brief Disable the ADC peripheral. + * @param __HANDLE__ ADC handle + * @retval None + */ +#define __HAL_ADC_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR2 &= ~ADC_CR2_ADON) + +/** + * @brief Enable the ADC end of conversion interrupt. + * @param __HANDLE__ specifies the ADC Handle. + * @param __INTERRUPT__ ADC Interrupt. + * @retval None + */ +#define __HAL_ADC_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR1) |= (__INTERRUPT__)) + +/** + * @brief Disable the ADC end of conversion interrupt. + * @param __HANDLE__ specifies the ADC Handle. + * @param __INTERRUPT__ ADC interrupt. + * @retval None + */ +#define __HAL_ADC_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR1) &= ~(__INTERRUPT__)) + +/** @brief Check if the specified ADC interrupt source is enabled or disabled. + * @param __HANDLE__ specifies the ADC Handle. + * @param __INTERRUPT__ specifies the ADC interrupt source to check. + * @retval The new state of __IT__ (TRUE or FALSE). + */ +#define __HAL_ADC_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR1 & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Clear the ADC's pending flags. + * @param __HANDLE__ specifies the ADC Handle. + * @param __FLAG__ ADC flag. + * @retval None + */ +#define __HAL_ADC_CLEAR_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR) = ~(__FLAG__)) + +/** + * @brief Get the selected ADC's flag status. + * @param __HANDLE__ specifies the ADC Handle. + * @param __FLAG__ ADC flag. + * @retval None + */ +#define __HAL_ADC_GET_FLAG(__HANDLE__, __FLAG__) ((((__HANDLE__)->Instance->SR) & (__FLAG__)) == (__FLAG__)) + +/** + * @} + */ + +/* Include ADC HAL Extension module */ +#include "stm32f7xx_hal_adc_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup ADC_Exported_Functions + * @{ + */ + +/** @addtogroup ADC_Exported_Functions_Group1 + * @{ + */ +/* Initialization/de-initialization functions ***********************************/ +HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_DeInit(ADC_HandleTypeDef *hadc); +void HAL_ADC_MspInit(ADC_HandleTypeDef* hadc); +void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc); + +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +/* Callbacks Register/UnRegister functions ***********************************/ +HAL_StatusTypeDef HAL_ADC_RegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_CallbackIDTypeDef CallbackID, pADC_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_ADC_UnRegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup ADC_Exported_Functions_Group2 + * @{ + */ +/* I/O operation functions ******************************************************/ +HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout); + +HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventType, uint32_t Timeout); + +HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef* hadc); + +void HAL_ADC_IRQHandler(ADC_HandleTypeDef* hadc); + +HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length); +HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc); + +uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef* hadc); + +void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* hadc); +void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef* hadc); +void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef* hadc); +void HAL_ADC_ErrorCallback(ADC_HandleTypeDef *hadc); +/** + * @} + */ + +/** @addtogroup ADC_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions *************************************************/ +HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConfTypeDef* sConfig); +HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDGConfTypeDef* AnalogWDGConfig); +/** + * @} + */ + +/** @addtogroup ADC_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions ***************************************************/ +uint32_t HAL_ADC_GetState(ADC_HandleTypeDef* hadc); +uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup ADC_Private_Constants ADC Private Constants + * @{ + */ +/* Delay for ADC stabilization time. */ +/* Maximum delay is 1us (refer to device datasheet, parameter tSTAB). */ +/* Unit: us */ +#define ADC_STAB_DELAY_US ((uint32_t) 3U) +/* Delay for temperature sensor stabilization time. */ +/* Maximum delay is 10us (refer to device datasheet, parameter tSTART). */ +/* Unit: us */ +#define ADC_TEMPSENSOR_DELAY_US ((uint32_t) 10U) +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup ADC_Private_Macros ADC Private Macros + * @{ + */ +/* Macro reserved for internal HAL driver usage, not intended to be used in + code of final user */ + +/** + * @brief Verification of ADC state: enabled or disabled + * @param __HANDLE__ ADC handle + * @retval SET (ADC enabled) or RESET (ADC disabled) + */ +#define ADC_IS_ENABLE(__HANDLE__) \ + ((( ((__HANDLE__)->Instance->SR & ADC_SR_ADONS) == ADC_SR_ADONS ) \ + ) ? SET : RESET) + +/** + * @brief Test if conversion trigger of regular group is software start + * or external trigger. + * @param __HANDLE__ ADC handle + * @retval SET (software start) or RESET (external trigger) + */ +#define ADC_IS_SOFTWARE_START_REGULAR(__HANDLE__) \ + (((__HANDLE__)->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + +/** + * @brief Test if conversion trigger of injected group is software start + * or external trigger. + * @param __HANDLE__ ADC handle + * @retval SET (software start) or RESET (external trigger) + */ +#define ADC_IS_SOFTWARE_START_INJECTED(__HANDLE__) \ + (((__HANDLE__)->Instance->CR2 & ADC_CR2_JEXTEN) == RESET) + +/** + * @brief Simultaneously clears and sets specific bits of the handle State + * @note: ADC_STATE_CLR_SET() macro is merely aliased to generic macro MODIFY_REG(), + * the first parameter is the ADC handle State, the second parameter is the + * bit field to clear, the third and last parameter is the bit field to set. + * @retval None + */ +#define ADC_STATE_CLR_SET MODIFY_REG + +/** + * @brief Clear ADC error code (set it to error code: "no error") + * @param __HANDLE__ ADC handle + * @retval None + */ +#define ADC_CLEAR_ERRORCODE(__HANDLE__) \ + ((__HANDLE__)->ErrorCode = HAL_ADC_ERROR_NONE) + +#define IS_ADC_CHANNEL(CHANNEL) (((CHANNEL) <= ADC_CHANNEL_18) || \ + ((CHANNEL) == ADC_CHANNEL_TEMPSENSOR) || \ + ((CHANNEL) == ADC_INTERNAL_NONE)) +#define IS_ADC_CLOCKPRESCALER(__ADC_CLOCK__) (((__ADC_CLOCK__) == ADC_CLOCK_SYNC_PCLK_DIV2) || \ + ((__ADC_CLOCK__) == ADC_CLOCK_SYNC_PCLK_DIV4) || \ + ((__ADC_CLOCK__) == ADC_CLOCK_SYNC_PCLK_DIV6) || \ + ((__ADC_CLOCK__) == ADC_CLOCK_SYNC_PCLK_DIV8)) +#define IS_ADC_SAMPLING_DELAY(__DELAY__) (((__DELAY__) == ADC_TWOSAMPLINGDELAY_5CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_6CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_7CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_8CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_9CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_10CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_11CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_12CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_13CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_14CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_15CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_16CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_17CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_18CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_19CYCLES) || \ + ((__DELAY__) == ADC_TWOSAMPLINGDELAY_20CYCLES)) +#define IS_ADC_RESOLUTION(__RESOLUTION__) (((__RESOLUTION__) == ADC_RESOLUTION_12B) || \ + ((__RESOLUTION__) == ADC_RESOLUTION_10B) || \ + ((__RESOLUTION__) == ADC_RESOLUTION_8B) || \ + ((__RESOLUTION__) == ADC_RESOLUTION_6B)) +#define IS_ADC_EXT_TRIG_EDGE(__EDGE__) (((__EDGE__) == ADC_EXTERNALTRIGCONVEDGE_NONE) || \ + ((__EDGE__) == ADC_EXTERNALTRIGCONVEDGE_RISING) || \ + ((__EDGE__) == ADC_EXTERNALTRIGCONVEDGE_FALLING) || \ + ((__EDGE__) == ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING)) +#define IS_ADC_EXT_TRIG(__REGTRIG__) (((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T1_CC1) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T1_CC2) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T1_CC3) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T2_CC2) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T5_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T4_CC4) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T3_CC4) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T8_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T8_TRGO2) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T1_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T1_TRGO2) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T2_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T4_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_T6_TRGO) || \ + ((__REGTRIG__) == ADC_EXTERNALTRIGCONV_EXT_IT11) || \ + ((__REGTRIG__) == ADC_SOFTWARE_START)) +#define IS_ADC_DATA_ALIGN(__ALIGN__) (((__ALIGN__) == ADC_DATAALIGN_RIGHT) || \ + ((__ALIGN__) == ADC_DATAALIGN_LEFT)) + + +#define IS_ADC_SAMPLE_TIME(__TIME__) (((__TIME__) == ADC_SAMPLETIME_3CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_15CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_28CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_56CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_84CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_112CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_144CYCLES) || \ + ((__TIME__) == ADC_SAMPLETIME_480CYCLES)) +#define IS_ADC_EOCSelection(__EOCSelection__) (((__EOCSelection__) == ADC_EOC_SINGLE_CONV) || \ + ((__EOCSelection__) == ADC_EOC_SEQ_CONV) || \ + ((__EOCSelection__) == ADC_EOC_SINGLE_SEQ_CONV)) +#define IS_ADC_EVENT_TYPE(__EVENT__) (((__EVENT__) == ADC_AWD_EVENT) || \ + ((__EVENT__) == ADC_OVR_EVENT)) +#define IS_ADC_ANALOG_WATCHDOG(__WATCHDOG__) (((__WATCHDOG__) == ADC_ANALOGWATCHDOG_SINGLE_REG) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_SINGLE_INJEC) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_SINGLE_REGINJEC) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_ALL_REG) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_ALL_INJEC) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_ALL_REGINJEC) || \ + ((__WATCHDOG__) == ADC_ANALOGWATCHDOG_NONE)) +#define IS_ADC_CHANNELS_TYPE(CHANNEL_TYPE) (((CHANNEL_TYPE) == ADC_ALL_CHANNELS) || \ + ((CHANNEL_TYPE) == ADC_REGULAR_CHANNELS) || \ + ((CHANNEL_TYPE) == ADC_INJECTED_CHANNELS)) + +#define IS_ADC_REGULAR_RANK(__RANK__) (((__RANK__) == ADC_REGULAR_RANK_1 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_2 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_3 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_4 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_5 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_6 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_7 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_8 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_9 ) || \ + ((__RANK__) == ADC_REGULAR_RANK_10) || \ + ((__RANK__) == ADC_REGULAR_RANK_11) || \ + ((__RANK__) == ADC_REGULAR_RANK_12) || \ + ((__RANK__) == ADC_REGULAR_RANK_13) || \ + ((__RANK__) == ADC_REGULAR_RANK_14) || \ + ((__RANK__) == ADC_REGULAR_RANK_15) || \ + ((__RANK__) == ADC_REGULAR_RANK_16)) + +#define IS_ADC_SCAN_MODE(__SCAN_MODE__) (((__SCAN_MODE__) == ADC_SCAN_DISABLE) || \ + ((__SCAN_MODE__) == ADC_SCAN_ENABLE)) + +#define IS_ADC_THRESHOLD(__THRESHOLD__) ((__THRESHOLD__) <= ((uint32_t)0xFFF)) +#define IS_ADC_REGULAR_LENGTH(__LENGTH__) (((__LENGTH__) >= ((uint32_t)1)) && ((__LENGTH__) <= ((uint32_t)16))) +#define IS_ADC_REGULAR_DISC_NUMBER(__NUMBER__) (((__NUMBER__) >= ((uint32_t)1)) && ((__NUMBER__) <= ((uint32_t)8))) +#define IS_ADC_RANGE(__RESOLUTION__, __ADC_VALUE__) \ + ((((__RESOLUTION__) == ADC_RESOLUTION_12B) && ((__ADC_VALUE__) <= ((uint32_t)0x0FFF))) || \ + (((__RESOLUTION__) == ADC_RESOLUTION_10B) && ((__ADC_VALUE__) <= ((uint32_t)0x03FF))) || \ + (((__RESOLUTION__) == ADC_RESOLUTION_8B) && ((__ADC_VALUE__) <= ((uint32_t)0x00FF))) || \ + (((__RESOLUTION__) == ADC_RESOLUTION_6B) && ((__ADC_VALUE__) <= ((uint32_t)0x003F)))) + +/** + * @brief Set ADC Regular channel sequence length. + * @param _NbrOfConversion_ Regular channel sequence length. + * @retval None + */ +#define ADC_SQR1(_NbrOfConversion_) (((_NbrOfConversion_) - (uint8_t)1) << 20) + +/** + * @brief Set the ADC's sample time for channel numbers between 10 and 18. + * @param _SAMPLETIME_ Sample time parameter. + * @param _CHANNELNB_ Channel number. + * @retval None + */ +#define ADC_SMPR1(_SAMPLETIME_, _CHANNELNB_) ((_SAMPLETIME_) << (3 * (((uint32_t)((uint16_t)(_CHANNELNB_))) - 10))) + +/** + * @brief Set the ADC's sample time for channel numbers between 0 and 9. + * @param _SAMPLETIME_ Sample time parameter. + * @param _CHANNELNB_ Channel number. + * @retval None + */ +#define ADC_SMPR2(_SAMPLETIME_, _CHANNELNB_) ((_SAMPLETIME_) << (3 * ((uint32_t)((uint16_t)(_CHANNELNB_))))) + +/** + * @brief Set the selected regular channel rank for rank between 1 and 6. + * @param _CHANNELNB_ Channel number. + * @param _RANKNB_ Rank number. + * @retval None + */ +#define ADC_SQR3_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5 * ((_RANKNB_) - 1))) + +/** + * @brief Set the selected regular channel rank for rank between 7 and 12. + * @param _CHANNELNB_ Channel number. + * @param _RANKNB_ Rank number. + * @retval None + */ +#define ADC_SQR2_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5 * ((_RANKNB_) - 7))) + +/** + * @brief Set the selected regular channel rank for rank between 13 and 16. + * @param _CHANNELNB_ Channel number. + * @param _RANKNB_ Rank number. + * @retval None + */ +#define ADC_SQR1_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5 * ((_RANKNB_) - 13))) + +/** + * @brief Enable ADC continuous conversion mode. + * @param _CONTINUOUS_MODE_ Continuous mode. + * @retval None + */ +#define ADC_CR2_CONTINUOUS(_CONTINUOUS_MODE_) ((_CONTINUOUS_MODE_) << 1) + +/** + * @brief Configures the number of discontinuous conversions for the regular group channels. + * @param _NBR_DISCONTINUOUSCONV_ Number of discontinuous conversions. + * @retval None + */ +#define ADC_CR1_DISCONTINUOUS(_NBR_DISCONTINUOUSCONV_) (((_NBR_DISCONTINUOUSCONV_) - 1) << ADC_CR1_DISCNUM_Pos) + +/** + * @brief Enable ADC scan mode. + * @param _SCANCONV_MODE_ Scan conversion mode. + * @retval None + */ +#define ADC_CR1_SCANCONV(_SCANCONV_MODE_) ((_SCANCONV_MODE_) << 8) + +/** + * @brief Enable the ADC end of conversion selection. + * @param _EOCSelection_MODE_ End of conversion selection mode. + * @retval None + */ +#define ADC_CR2_EOCSelection(_EOCSelection_MODE_) ((_EOCSelection_MODE_) << 10) + +/** + * @brief Enable the ADC DMA continuous request. + * @param _DMAContReq_MODE_ DMA continuous request mode. + * @retval None + */ +#define ADC_CR2_DMAContReq(_DMAContReq_MODE_) ((_DMAContReq_MODE_) << 9) + +/** + * @brief Return resolution bits in CR1 register. + * @param __HANDLE__ ADC handle + * @retval None + */ +#define ADC_GET_RESOLUTION(__HANDLE__) (((__HANDLE__)->Instance->CR1) & ADC_CR1_RES) + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup ADC_Private_Functions ADC Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_ADC_H */ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc_ex.h new file mode 100644 index 000000000..cef8f49c3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_adc_ex.h @@ -0,0 +1,356 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_adc.h + * @author MCD Application Team + * @brief Header file of ADC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_ADC_EX_H +#define STM32F7xx_ADC_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup ADCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup ADCEx_Exported_Types ADC Exported Types + * @{ + */ + +/** + * @brief ADC Configuration injected Channel structure definition + * @note Parameters of this structure are shared within 2 scopes: + * - Scope channel: InjectedChannel, InjectedRank, InjectedSamplingTime, InjectedOffset + * - Scope injected group (affects all channels of injected group): InjectedNbrOfConversion, InjectedDiscontinuousConvMode, + * AutoInjectedConv, ExternalTrigInjecConvEdge, ExternalTrigInjecConv. + * @note The setting of these parameters with function HAL_ADCEx_InjectedConfigChannel() is conditioned to ADC state. + * ADC state can be either: + * - For all parameters: ADC disabled + * - For all except parameters 'InjectedDiscontinuousConvMode' and 'AutoInjectedConv': ADC enabled without conversion on going on injected group. + * - For parameters 'ExternalTrigInjecConv' and 'ExternalTrigInjecConvEdge': ADC enabled, even with conversion on going on injected group. + */ +typedef struct +{ + uint32_t InjectedChannel; /*!< Selection of ADC channel to configure + This parameter can be a value of @ref ADC_channels + Note: Depending on devices, some channels may not be available on package pins. Refer to device datasheet for channels availability. */ + uint32_t InjectedRank; /*!< Rank in the injected group sequencer + This parameter must be a value of @ref ADCEx_injected_rank + Note: In case of need to disable a channel or change order of conversion sequencer, rank containing a previous channel setting can be overwritten by the new channel setting (or parameter number of conversions can be adjusted) */ + uint32_t InjectedSamplingTime; /*!< Sampling time value to be set for the selected channel. + Unit: ADC clock cycles + Conversion time is the addition of sampling time and processing time (12 ADC clock cycles at ADC resolution 12 bits, 11 cycles at 10 bits, 9 cycles at 8 bits, 7 cycles at 6 bits). + This parameter can be a value of @ref ADC_sampling_times + Caution: This parameter updates the parameter property of the channel, that can be used into regular and/or injected groups. + If this same channel has been previously configured in the other group (regular/injected), it will be updated to last setting. + Note: In case of usage of internal measurement channels (VrefInt/Vbat/TempSensor), + sampling time constraints must be respected (sampling time can be adjusted in function of ADC clock frequency and sampling time setting) + Refer to device datasheet for timings values, parameters TS_vrefint, TS_temp (values rough order: 4us min). */ + uint32_t InjectedOffset; /*!< Defines the offset to be subtracted from the raw converted data (for channels set on injected group only). + Offset value must be a positive number. + Depending of ADC resolution selected (12, 10, 8 or 6 bits), + this parameter must be a number between Min_Data = 0x000 and Max_Data = 0xFFF, 0x3FF, 0xFF or 0x3F respectively. */ + uint32_t InjectedNbrOfConversion; /*!< Specifies the number of ranks that will be converted within the injected group sequencer. + To use the injected group sequencer and convert several ranks, parameter 'ScanConvMode' must be enabled. + This parameter must be a number between Min_Data = 1 and Max_Data = 4. + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + configure a channel on injected group can impact the configuration of other channels previously set. */ + FunctionalState InjectedDiscontinuousConvMode; /*!< Specifies whether the conversions sequence of injected group is performed in Complete-sequence/Discontinuous-sequence (main sequence subdivided in successive parts). + Discontinuous mode is used only if sequencer is enabled (parameter 'ScanConvMode'). If sequencer is disabled, this parameter is discarded. + Discontinuous mode can be enabled only if continuous mode is disabled. If continuous mode is enabled, this parameter setting is discarded. + This parameter can be set to ENABLE or DISABLE. + Note: For injected group, number of discontinuous ranks increment is fixed to one-by-one. + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + configure a channel on injected group can impact the configuration of other channels previously set. */ + FunctionalState AutoInjectedConv; /*!< Enables or disables the selected ADC automatic injected group conversion after regular one + This parameter can be set to ENABLE or DISABLE. + Note: To use Automatic injected conversion, discontinuous mode must be disabled ('DiscontinuousConvMode' and 'InjectedDiscontinuousConvMode' set to DISABLE) + Note: To use Automatic injected conversion, injected group external triggers must be disabled ('ExternalTrigInjecConv' set to ADC_SOFTWARE_START) + Note: In case of DMA used with regular group: if DMA configured in normal mode (single shot) JAUTO will be stopped upon DMA transfer complete. + To maintain JAUTO always enabled, DMA must be configured in circular mode. + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + configure a channel on injected group can impact the configuration of other channels previously set. */ + uint32_t ExternalTrigInjecConv; /*!< Selects the external event used to trigger the conversion start of injected group. + If set to ADC_INJECTED_SOFTWARE_START, external triggers are disabled. + If set to external trigger source, triggering is on event rising edge. + This parameter can be a value of @ref ADCEx_External_trigger_Source_Injected + Note: This parameter must be modified when ADC is disabled (before ADC start conversion or after ADC stop conversion). + If ADC is enabled, this parameter setting is bypassed without error reporting (as it can be the expected behaviour in case of another parameter update on the fly) + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + configure a channel on injected group can impact the configuration of other channels previously set. */ + uint32_t ExternalTrigInjecConvEdge; /*!< Selects the external trigger edge of injected group. + This parameter can be a value of @ref ADCEx_External_trigger_edge_Injected. + If trigger is set to ADC_INJECTED_SOFTWARE_START, this parameter is discarded. + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + configure a channel on injected group can impact the configuration of other channels previously set. */ +}ADC_InjectionConfTypeDef; + +/** + * @brief ADC Configuration multi-mode structure definition + */ +typedef struct +{ + uint32_t Mode; /*!< Configures the ADC to operate in independent or multi mode. + This parameter can be a value of @ref ADCEx_Common_mode */ + uint32_t DMAAccessMode; /*!< Configures the Direct memory access mode for multi ADC mode. + This parameter can be a value of @ref ADCEx_Direct_memory_access_mode_for_multi_mode */ + uint32_t TwoSamplingDelay; /*!< Configures the Delay between 2 sampling phases. + This parameter can be a value of @ref ADC_delay_between_2_sampling_phases */ +}ADC_MultiModeTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup ADCEx_Exported_Constants ADC Exported Constants + * @{ + */ + +/** @defgroup ADCEx_Common_mode ADC Common Mode + * @{ + */ +#define ADC_MODE_INDEPENDENT ((uint32_t)0x00000000U) +#define ADC_DUALMODE_REGSIMULT_INJECSIMULT ((uint32_t)ADC_CCR_MULTI_0) +#define ADC_DUALMODE_REGSIMULT_ALTERTRIG ((uint32_t)ADC_CCR_MULTI_1) +#define ADC_DUALMODE_INJECSIMULT ((uint32_t)(ADC_CCR_MULTI_2 | ADC_CCR_MULTI_0)) +#define ADC_DUALMODE_REGSIMULT ((uint32_t)(ADC_CCR_MULTI_2 | ADC_CCR_MULTI_1)) +#define ADC_DUALMODE_INTERL ((uint32_t)(ADC_CCR_MULTI_2 | ADC_CCR_MULTI_1 | ADC_CCR_MULTI_0)) +#define ADC_DUALMODE_ALTERTRIG ((uint32_t)(ADC_CCR_MULTI_3 | ADC_CCR_MULTI_0)) +#define ADC_TRIPLEMODE_REGSIMULT_INJECSIMULT ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_0)) +#define ADC_TRIPLEMODE_REGSIMULT_AlterTrig ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_1)) +#define ADC_TRIPLEMODE_INJECSIMULT ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_2 | ADC_CCR_MULTI_0)) +#define ADC_TRIPLEMODE_REGSIMULT ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_2 | ADC_CCR_MULTI_1)) +#define ADC_TRIPLEMODE_INTERL ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_2 | ADC_CCR_MULTI_1 | ADC_CCR_MULTI_0)) +#define ADC_TRIPLEMODE_ALTERTRIG ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_3 | ADC_CCR_MULTI_0)) +/** + * @} + */ + +/** @defgroup ADCEx_Direct_memory_access_mode_for_multi_mode ADC Direct Memory Access Mode For Multi Mode + * @{ + */ +#define ADC_DMAACCESSMODE_DISABLED ((uint32_t)0x00000000U) /*!< DMA mode disabled */ +#define ADC_DMAACCESSMODE_1 ((uint32_t)ADC_CCR_DMA_0) /*!< DMA mode 1 enabled (2 / 3 half-words one by one - 1 then 2 then 3)*/ +#define ADC_DMAACCESSMODE_2 ((uint32_t)ADC_CCR_DMA_1) /*!< DMA mode 2 enabled (2 / 3 half-words by pairs - 2&1 then 1&3 then 3&2)*/ +#define ADC_DMAACCESSMODE_3 ((uint32_t)ADC_CCR_DMA) /*!< DMA mode 3 enabled (2 / 3 bytes by pairs - 2&1 then 1&3 then 3&2) */ +/** + * @} + */ + +/** @defgroup ADCEx_External_trigger_edge_Injected ADC External Trigger Edge Injected + * @{ + */ +#define ADC_EXTERNALTRIGINJECCONVEDGE_NONE ((uint32_t)0x00000000U) +#define ADC_EXTERNALTRIGINJECCONVEDGE_RISING ((uint32_t)ADC_CR2_JEXTEN_0) +#define ADC_EXTERNALTRIGINJECCONVEDGE_FALLING ((uint32_t)ADC_CR2_JEXTEN_1) +#define ADC_EXTERNALTRIGINJECCONVEDGE_RISINGFALLING ((uint32_t)ADC_CR2_JEXTEN) +/** + * @} + */ + +/** @defgroup ADCEx_External_trigger_Source_Injected ADC External Trigger Source Injected + * @{ + */ +#define ADC_EXTERNALTRIGINJECCONV_T1_TRGO ((uint32_t)0x00000000U) +#define ADC_EXTERNALTRIGINJECCONV_T1_CC4 ((uint32_t)ADC_CR2_JEXTSEL_0) +#define ADC_EXTERNALTRIGINJECCONV_T2_TRGO ((uint32_t)ADC_CR2_JEXTSEL_1) +#define ADC_EXTERNALTRIGINJECCONV_T2_CC1 ((uint32_t)(ADC_CR2_JEXTSEL_1 | ADC_CR2_JEXTSEL_0)) +#define ADC_EXTERNALTRIGINJECCONV_T3_CC4 ((uint32_t)ADC_CR2_JEXTSEL_2) +#define ADC_EXTERNALTRIGINJECCONV_T4_TRGO ((uint32_t)(ADC_CR2_JEXTSEL_2 | ADC_CR2_JEXTSEL_0)) + +#define ADC_EXTERNALTRIGINJECCONV_T8_CC4 ((uint32_t)(ADC_CR2_JEXTSEL_2 | ADC_CR2_JEXTSEL_1 | ADC_CR2_JEXTSEL_0)) +#define ADC_EXTERNALTRIGINJECCONV_T1_TRGO2 ((uint32_t)ADC_CR2_JEXTSEL_3) +#define ADC_EXTERNALTRIGINJECCONV_T8_TRGO ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_0)) +#define ADC_EXTERNALTRIGINJECCONV_T8_TRGO2 ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_1)) +#define ADC_EXTERNALTRIGINJECCONV_T3_CC3 ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_1 | ADC_CR2_JEXTSEL_0)) +#define ADC_EXTERNALTRIGINJECCONV_T5_TRGO ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_2)) +#define ADC_EXTERNALTRIGINJECCONV_T3_CC1 ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_2 | ADC_CR2_JEXTSEL_0)) +#define ADC_EXTERNALTRIGINJECCONV_T6_TRGO ((uint32_t)(ADC_CR2_JEXTSEL_3 | ADC_CR2_JEXTSEL_2 | ADC_CR2_JEXTSEL_1)) +#define ADC_INJECTED_SOFTWARE_START ((uint32_t)ADC_CR2_JEXTSEL + 1) +/** + * @} + */ + +/** @defgroup ADCEx_injected_rank ADC Injected Channel Rank + * @{ + */ +#define ADC_INJECTED_RANK_1 ((uint32_t)0x00000001U) +#define ADC_INJECTED_RANK_2 ((uint32_t)0x00000002U) +#define ADC_INJECTED_RANK_3 ((uint32_t)0x00000003U) +#define ADC_INJECTED_RANK_4 ((uint32_t)0x00000004U) +/** + * @} + */ + +/** @defgroup ADCEx_channels ADC Specific Channels + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup ADC_Exported_Macros ADC Exported Macros + * @{ + */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup ADCEx_Exported_Functions + * @{ + */ + +/** @addtogroup ADCEx_Exported_Functions_Group1 + * @{ + */ + +/* I/O operation functions ******************************************************/ +HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout); +HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc); +uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef* hadc, uint32_t InjectedRank); +HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length); +HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef* hadc); +uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef* hadc); +void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef* hadc); + +/* Peripheral Control functions *************************************************/ +HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc,ADC_InjectionConfTypeDef* sConfigInjected); +HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_MultiModeTypeDef* multimode); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup ADCEx_Private_Constants ADC Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup ADCEx_Private_Macros ADC Private Macros + * @{ + */ + +#define IS_ADC_MODE(__MODE__) (((__MODE__) == ADC_MODE_INDEPENDENT) || \ + ((__MODE__) == ADC_DUALMODE_REGSIMULT_INJECSIMULT) || \ + ((__MODE__) == ADC_DUALMODE_REGSIMULT_ALTERTRIG) || \ + ((__MODE__) == ADC_DUALMODE_INJECSIMULT) || \ + ((__MODE__) == ADC_DUALMODE_REGSIMULT) || \ + ((__MODE__) == ADC_DUALMODE_INTERL) || \ + ((__MODE__) == ADC_DUALMODE_ALTERTRIG) || \ + ((__MODE__) == ADC_TRIPLEMODE_REGSIMULT_INJECSIMULT) || \ + ((__MODE__) == ADC_TRIPLEMODE_REGSIMULT_AlterTrig) || \ + ((__MODE__) == ADC_TRIPLEMODE_INJECSIMULT) || \ + ((__MODE__) == ADC_TRIPLEMODE_REGSIMULT) || \ + ((__MODE__) == ADC_TRIPLEMODE_INTERL) || \ + ((__MODE__) == ADC_TRIPLEMODE_ALTERTRIG)) +#define IS_ADC_DMA_ACCESS_MODE(__MODE__) (((__MODE__) == ADC_DMAACCESSMODE_DISABLED) || \ + ((__MODE__) == ADC_DMAACCESSMODE_1) || \ + ((__MODE__) == ADC_DMAACCESSMODE_2) || \ + ((__MODE__) == ADC_DMAACCESSMODE_3)) +#define IS_ADC_EXT_INJEC_TRIG_EDGE(__EDGE__) (((__EDGE__) == ADC_EXTERNALTRIGINJECCONVEDGE_NONE) || \ + ((__EDGE__) == ADC_EXTERNALTRIGINJECCONVEDGE_RISING) || \ + ((__EDGE__) == ADC_EXTERNALTRIGINJECCONVEDGE_FALLING) || \ + ((__EDGE__) == ADC_EXTERNALTRIGINJECCONVEDGE_RISINGFALLING)) +#define IS_ADC_EXT_INJEC_TRIG(__INJTRIG__) (((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T1_TRGO) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T1_CC4) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T2_TRGO) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T2_CC1) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T3_CC4) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T4_TRGO) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T8_CC4) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T1_TRGO2) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T8_TRGO) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T8_TRGO2) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T3_CC3) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T5_TRGO) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T3_CC1) || \ + ((__INJTRIG__) == ADC_EXTERNALTRIGINJECCONV_T6_TRGO) || \ + ((__INJTRIG__) == ADC_INJECTED_SOFTWARE_START)) +#define IS_ADC_INJECTED_RANK(__RANK__) (((__RANK__) == ADC_INJECTED_RANK_1) || \ + ((__RANK__) == ADC_INJECTED_RANK_2) || \ + ((__RANK__) == ADC_INJECTED_RANK_3) || \ + ((__RANK__) == ADC_INJECTED_RANK_4)) +#define IS_ADC_INJECTED_LENGTH(__LENGTH__) (((__LENGTH__) >= ((uint32_t)1)) && ((__LENGTH__) <= ((uint32_t)4))) + +/** + * @brief Set the selected injected Channel rank. + * @param _CHANNELNB_ Channel number. + * @param _RANKNB_ Rank number. + * @param _JSQR_JL_ Sequence length. + * @retval None + */ +#define ADC_JSQR(_CHANNELNB_, _RANKNB_,_JSQR_JL_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5 * (uint8_t)(((_RANKNB_) + 3) - (_JSQR_JL_)))) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup ADCEx_Private_Functions ADC Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_ADC_EX_H */ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h new file mode 100644 index 000000000..c9162d4ed --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h @@ -0,0 +1,406 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_cortex.h + * @author MCD Application Team + * @brief Header file of CORTEX HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_CORTEX_H +#define __STM32F7xx_HAL_CORTEX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup CORTEX + * @{ + */ +/* Exported types ------------------------------------------------------------*/ +/** @defgroup CORTEX_Exported_Types Cortex Exported Types + * @{ + */ + +#if (__MPU_PRESENT == 1) +/** @defgroup CORTEX_MPU_Region_Initialization_Structure_definition MPU Region Initialization Structure Definition + * @brief MPU Region initialization structure + * @{ + */ +typedef struct +{ + uint8_t Enable; /*!< Specifies the status of the region. + This parameter can be a value of @ref CORTEX_MPU_Region_Enable */ + uint8_t Number; /*!< Specifies the number of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Number */ + uint32_t BaseAddress; /*!< Specifies the base address of the region to protect. */ + uint8_t Size; /*!< Specifies the size of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Size */ + uint8_t SubRegionDisable; /*!< Specifies the number of the subregion protection to disable. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + uint8_t TypeExtField; /*!< Specifies the TEX field level. + This parameter can be a value of @ref CORTEX_MPU_TEX_Levels */ + uint8_t AccessPermission; /*!< Specifies the region access permission type. + This parameter can be a value of @ref CORTEX_MPU_Region_Permission_Attributes */ + uint8_t DisableExec; /*!< Specifies the instruction access status. + This parameter can be a value of @ref CORTEX_MPU_Instruction_Access */ + uint8_t IsShareable; /*!< Specifies the shareability status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Shareable */ + uint8_t IsCacheable; /*!< Specifies the cacheable status of the region protected. + This parameter can be a value of @ref CORTEX_MPU_Access_Cacheable */ + uint8_t IsBufferable; /*!< Specifies the bufferable status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Bufferable */ +}MPU_Region_InitTypeDef; +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Constants CORTEX Exported Constants + * @{ + */ + +/** @defgroup CORTEX_Preemption_Priority_Group CORTEX Preemption Priority Group + * @{ + */ +#define NVIC_PRIORITYGROUP_0 ((uint32_t)0x00000007U) /*!< 0 bits for pre-emption priority + 4 bits for subpriority */ +#define NVIC_PRIORITYGROUP_1 ((uint32_t)0x00000006U) /*!< 1 bits for pre-emption priority + 3 bits for subpriority */ +#define NVIC_PRIORITYGROUP_2 ((uint32_t)0x00000005U) /*!< 2 bits for pre-emption priority + 2 bits for subpriority */ +#define NVIC_PRIORITYGROUP_3 ((uint32_t)0x00000004U) /*!< 3 bits for pre-emption priority + 1 bits for subpriority */ +#define NVIC_PRIORITYGROUP_4 ((uint32_t)0x00000003U) /*!< 4 bits for pre-emption priority + 0 bits for subpriority */ +/** + * @} + */ + +/** @defgroup CORTEX_SysTick_clock_source CORTEX _SysTick clock source + * @{ + */ +#define SYSTICK_CLKSOURCE_HCLK_DIV8 ((uint32_t)0x00000000U) +#define SYSTICK_CLKSOURCE_HCLK ((uint32_t)0x00000004U) + +/** + * @} + */ + +#if (__MPU_PRESENT == 1) +/** @defgroup CORTEX_MPU_HFNMI_PRIVDEF_Control MPU HFNMI and PRIVILEGED Access control + * @{ + */ +#define MPU_HFNMI_PRIVDEF_NONE ((uint32_t)0x00000000U) +#define MPU_HARDFAULT_NMI ((uint32_t)0x00000002U) +#define MPU_PRIVILEGED_DEFAULT ((uint32_t)0x00000004U) +#define MPU_HFNMI_PRIVDEF ((uint32_t)0x00000006U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Enable CORTEX MPU Region Enable + * @{ + */ +#define MPU_REGION_ENABLE ((uint8_t)0x01U) +#define MPU_REGION_DISABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Instruction_Access CORTEX MPU Instruction Access + * @{ + */ +#define MPU_INSTRUCTION_ACCESS_ENABLE ((uint8_t)0x00U) +#define MPU_INSTRUCTION_ACCESS_DISABLE ((uint8_t)0x01U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Shareable CORTEX MPU Instruction Access Shareable + * @{ + */ +#define MPU_ACCESS_SHAREABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_SHAREABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Cacheable CORTEX MPU Instruction Access Cacheable + * @{ + */ +#define MPU_ACCESS_CACHEABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_CACHEABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Bufferable CORTEX MPU Instruction Access Bufferable + * @{ + */ +#define MPU_ACCESS_BUFFERABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_BUFFERABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_TEX_Levels MPU TEX Levels + * @{ + */ +#define MPU_TEX_LEVEL0 ((uint8_t)0x00U) +#define MPU_TEX_LEVEL1 ((uint8_t)0x01U) +#define MPU_TEX_LEVEL2 ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Size CORTEX MPU Region Size + * @{ + */ +#define MPU_REGION_SIZE_32B ((uint8_t)0x04U) +#define MPU_REGION_SIZE_64B ((uint8_t)0x05U) +#define MPU_REGION_SIZE_128B ((uint8_t)0x06U) +#define MPU_REGION_SIZE_256B ((uint8_t)0x07U) +#define MPU_REGION_SIZE_512B ((uint8_t)0x08U) +#define MPU_REGION_SIZE_1KB ((uint8_t)0x09U) +#define MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) +#define MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) +#define MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) +#define MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) +#define MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) +#define MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) +#define MPU_REGION_SIZE_128KB ((uint8_t)0x10U) +#define MPU_REGION_SIZE_256KB ((uint8_t)0x11U) +#define MPU_REGION_SIZE_512KB ((uint8_t)0x12U) +#define MPU_REGION_SIZE_1MB ((uint8_t)0x13U) +#define MPU_REGION_SIZE_2MB ((uint8_t)0x14U) +#define MPU_REGION_SIZE_4MB ((uint8_t)0x15U) +#define MPU_REGION_SIZE_8MB ((uint8_t)0x16U) +#define MPU_REGION_SIZE_16MB ((uint8_t)0x17U) +#define MPU_REGION_SIZE_32MB ((uint8_t)0x18U) +#define MPU_REGION_SIZE_64MB ((uint8_t)0x19U) +#define MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) +#define MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) +#define MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) +#define MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) +#define MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) +#define MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Permission_Attributes CORTEX MPU Region Permission Attributes + * @{ + */ +#define MPU_REGION_NO_ACCESS ((uint8_t)0x00U) +#define MPU_REGION_PRIV_RW ((uint8_t)0x01U) +#define MPU_REGION_PRIV_RW_URO ((uint8_t)0x02U) +#define MPU_REGION_FULL_ACCESS ((uint8_t)0x03U) +#define MPU_REGION_PRIV_RO ((uint8_t)0x05U) +#define MPU_REGION_PRIV_RO_URO ((uint8_t)0x06U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Number CORTEX MPU Region Number + * @{ + */ +#define MPU_REGION_NUMBER0 ((uint8_t)0x00U) +#define MPU_REGION_NUMBER1 ((uint8_t)0x01U) +#define MPU_REGION_NUMBER2 ((uint8_t)0x02U) +#define MPU_REGION_NUMBER3 ((uint8_t)0x03U) +#define MPU_REGION_NUMBER4 ((uint8_t)0x04U) +#define MPU_REGION_NUMBER5 ((uint8_t)0x05U) +#define MPU_REGION_NUMBER6 ((uint8_t)0x06U) +#define MPU_REGION_NUMBER7 ((uint8_t)0x07U) +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + + +/* Exported Macros -----------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup CORTEX_Exported_Functions + * @{ + */ + +/** @addtogroup CORTEX_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup); +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority); +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn); +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn); +void HAL_NVIC_SystemReset(void); +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb); +/** + * @} + */ + +/** @addtogroup CORTEX_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +#if (__MPU_PRESENT == 1) +void HAL_MPU_Enable(uint32_t MPU_Control); +void HAL_MPU_Disable(void); +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init); +#endif /* __MPU_PRESENT */ +uint32_t HAL_NVIC_GetPriorityGrouping(void); +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority); +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn); +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn); +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource); +void HAL_SYSTICK_IRQHandler(void); +void HAL_SYSTICK_Callback(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup CORTEX_Private_Macros CORTEX Private Macros + * @{ + */ +#define IS_NVIC_PRIORITY_GROUP(GROUP) (((GROUP) == NVIC_PRIORITYGROUP_0) || \ + ((GROUP) == NVIC_PRIORITYGROUP_1) || \ + ((GROUP) == NVIC_PRIORITYGROUP_2) || \ + ((GROUP) == NVIC_PRIORITYGROUP_3) || \ + ((GROUP) == NVIC_PRIORITYGROUP_4)) + +#define IS_NVIC_PREEMPTION_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_SUB_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_DEVICE_IRQ(IRQ) ((IRQ) >= 0x00) + +#define IS_SYSTICK_CLK_SOURCE(SOURCE) (((SOURCE) == SYSTICK_CLKSOURCE_HCLK) || \ + ((SOURCE) == SYSTICK_CLKSOURCE_HCLK_DIV8)) + +#if (__MPU_PRESENT == 1) +#define IS_MPU_REGION_ENABLE(STATE) (((STATE) == MPU_REGION_ENABLE) || \ + ((STATE) == MPU_REGION_DISABLE)) + +#define IS_MPU_INSTRUCTION_ACCESS(STATE) (((STATE) == MPU_INSTRUCTION_ACCESS_ENABLE) || \ + ((STATE) == MPU_INSTRUCTION_ACCESS_DISABLE)) + +#define IS_MPU_ACCESS_SHAREABLE(STATE) (((STATE) == MPU_ACCESS_SHAREABLE) || \ + ((STATE) == MPU_ACCESS_NOT_SHAREABLE)) + +#define IS_MPU_ACCESS_CACHEABLE(STATE) (((STATE) == MPU_ACCESS_CACHEABLE) || \ + ((STATE) == MPU_ACCESS_NOT_CACHEABLE)) + +#define IS_MPU_ACCESS_BUFFERABLE(STATE) (((STATE) == MPU_ACCESS_BUFFERABLE) || \ + ((STATE) == MPU_ACCESS_NOT_BUFFERABLE)) + +#define IS_MPU_TEX_LEVEL(TYPE) (((TYPE) == MPU_TEX_LEVEL0) || \ + ((TYPE) == MPU_TEX_LEVEL1) || \ + ((TYPE) == MPU_TEX_LEVEL2)) + +#define IS_MPU_REGION_PERMISSION_ATTRIBUTE(TYPE) (((TYPE) == MPU_REGION_NO_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RW) || \ + ((TYPE) == MPU_REGION_PRIV_RW_URO) || \ + ((TYPE) == MPU_REGION_FULL_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RO) || \ + ((TYPE) == MPU_REGION_PRIV_RO_URO)) + +#define IS_MPU_REGION_NUMBER(NUMBER) (((NUMBER) == MPU_REGION_NUMBER0) || \ + ((NUMBER) == MPU_REGION_NUMBER1) || \ + ((NUMBER) == MPU_REGION_NUMBER2) || \ + ((NUMBER) == MPU_REGION_NUMBER3) || \ + ((NUMBER) == MPU_REGION_NUMBER4) || \ + ((NUMBER) == MPU_REGION_NUMBER5) || \ + ((NUMBER) == MPU_REGION_NUMBER6) || \ + ((NUMBER) == MPU_REGION_NUMBER7)) + +#define IS_MPU_REGION_SIZE(SIZE) (((SIZE) == MPU_REGION_SIZE_32B) || \ + ((SIZE) == MPU_REGION_SIZE_64B) || \ + ((SIZE) == MPU_REGION_SIZE_128B) || \ + ((SIZE) == MPU_REGION_SIZE_256B) || \ + ((SIZE) == MPU_REGION_SIZE_512B) || \ + ((SIZE) == MPU_REGION_SIZE_1KB) || \ + ((SIZE) == MPU_REGION_SIZE_2KB) || \ + ((SIZE) == MPU_REGION_SIZE_4KB) || \ + ((SIZE) == MPU_REGION_SIZE_8KB) || \ + ((SIZE) == MPU_REGION_SIZE_16KB) || \ + ((SIZE) == MPU_REGION_SIZE_32KB) || \ + ((SIZE) == MPU_REGION_SIZE_64KB) || \ + ((SIZE) == MPU_REGION_SIZE_128KB) || \ + ((SIZE) == MPU_REGION_SIZE_256KB) || \ + ((SIZE) == MPU_REGION_SIZE_512KB) || \ + ((SIZE) == MPU_REGION_SIZE_1MB) || \ + ((SIZE) == MPU_REGION_SIZE_2MB) || \ + ((SIZE) == MPU_REGION_SIZE_4MB) || \ + ((SIZE) == MPU_REGION_SIZE_8MB) || \ + ((SIZE) == MPU_REGION_SIZE_16MB) || \ + ((SIZE) == MPU_REGION_SIZE_32MB) || \ + ((SIZE) == MPU_REGION_SIZE_64MB) || \ + ((SIZE) == MPU_REGION_SIZE_128MB) || \ + ((SIZE) == MPU_REGION_SIZE_256MB) || \ + ((SIZE) == MPU_REGION_SIZE_512MB) || \ + ((SIZE) == MPU_REGION_SIZE_1GB) || \ + ((SIZE) == MPU_REGION_SIZE_2GB) || \ + ((SIZE) == MPU_REGION_SIZE_4GB)) + +#define IS_MPU_SUB_REGION_DISABLE(SUBREGION) ((SUBREGION) < (uint16_t)0x00FFU) +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_CORTEX_H */ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc.h new file mode 100644 index 000000000..ece879023 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc.h @@ -0,0 +1,344 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_crc.h + * @author MCD Application Team + * @brief Header file of CRC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_CRC_H +#define STM32F7xx_HAL_CRC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup CRC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup CRC_Exported_Types CRC Exported Types + * @{ + */ + +/** + * @brief CRC HAL State Structure definition + */ +typedef enum +{ + HAL_CRC_STATE_RESET = 0x00U, /*!< CRC not yet initialized or disabled */ + HAL_CRC_STATE_READY = 0x01U, /*!< CRC initialized and ready for use */ + HAL_CRC_STATE_BUSY = 0x02U, /*!< CRC internal process is ongoing */ + HAL_CRC_STATE_TIMEOUT = 0x03U, /*!< CRC timeout state */ + HAL_CRC_STATE_ERROR = 0x04U /*!< CRC error state */ +} HAL_CRC_StateTypeDef; + +/** + * @brief CRC Init Structure definition + */ +typedef struct +{ + uint8_t DefaultPolynomialUse; /*!< This parameter is a value of @ref CRC_Default_Polynomial and indicates if default polynomial is used. + If set to DEFAULT_POLYNOMIAL_ENABLE, resort to default + X^32 + X^26 + X^23 + X^22 + X^16 + X^12 + X^11 + X^10 +X^8 + X^7 + X^5 + X^4 + X^2+ X +1. + In that case, there is no need to set GeneratingPolynomial field. + If otherwise set to DEFAULT_POLYNOMIAL_DISABLE, GeneratingPolynomial and CRCLength fields must be set. */ + + uint8_t DefaultInitValueUse; /*!< This parameter is a value of @ref CRC_Default_InitValue_Use and indicates if default init value is used. + If set to DEFAULT_INIT_VALUE_ENABLE, resort to default + 0xFFFFFFFF value. In that case, there is no need to set InitValue field. + If otherwise set to DEFAULT_INIT_VALUE_DISABLE, InitValue field must be set. */ + + uint32_t GeneratingPolynomial; /*!< Set CRC generating polynomial as a 7, 8, 16 or 32-bit long value for a polynomial degree + respectively equal to 7, 8, 16 or 32. This field is written in normal representation, + e.g., for a polynomial of degree 7, X^7 + X^6 + X^5 + X^2 + 1 is written 0x65. + No need to specify it if DefaultPolynomialUse is set to DEFAULT_POLYNOMIAL_ENABLE. */ + + uint32_t CRCLength; /*!< This parameter is a value of @ref CRC_Polynomial_Sizes and indicates CRC length. + Value can be either one of + @arg @ref CRC_POLYLENGTH_32B (32-bit CRC), + @arg @ref CRC_POLYLENGTH_16B (16-bit CRC), + @arg @ref CRC_POLYLENGTH_8B (8-bit CRC), + @arg @ref CRC_POLYLENGTH_7B (7-bit CRC). */ + + uint32_t InitValue; /*!< Init value to initiate CRC computation. No need to specify it if DefaultInitValueUse + is set to DEFAULT_INIT_VALUE_ENABLE. */ + + uint32_t InputDataInversionMode; /*!< This parameter is a value of @ref CRCEx_Input_Data_Inversion and specifies input data inversion mode. + Can be either one of the following values + @arg @ref CRC_INPUTDATA_INVERSION_NONE no input data inversion + @arg @ref CRC_INPUTDATA_INVERSION_BYTE byte-wise inversion, 0x1A2B3C4D becomes 0x58D43CB2 + @arg @ref CRC_INPUTDATA_INVERSION_HALFWORD halfword-wise inversion, 0x1A2B3C4D becomes 0xD458B23C + @arg @ref CRC_INPUTDATA_INVERSION_WORD word-wise inversion, 0x1A2B3C4D becomes 0xB23CD458 */ + + uint32_t OutputDataInversionMode; /*!< This parameter is a value of @ref CRCEx_Output_Data_Inversion and specifies output data (i.e. CRC) inversion mode. + Can be either + @arg @ref CRC_OUTPUTDATA_INVERSION_DISABLE no CRC inversion, + @arg @ref CRC_OUTPUTDATA_INVERSION_ENABLE CRC 0x11223344 is converted into 0x22CC4488 */ +} CRC_InitTypeDef; + +/** + * @brief CRC Handle Structure definition + */ +typedef struct +{ + CRC_TypeDef *Instance; /*!< Register base address */ + + CRC_InitTypeDef Init; /*!< CRC configuration parameters */ + + HAL_LockTypeDef Lock; /*!< CRC Locking object */ + + __IO HAL_CRC_StateTypeDef State; /*!< CRC communication state */ + + uint32_t InputDataFormat; /*!< This parameter is a value of @ref CRC_Input_Buffer_Format and specifies input data format. + Can be either + @arg @ref CRC_INPUTDATA_FORMAT_BYTES input data is a stream of bytes (8-bit data) + @arg @ref CRC_INPUTDATA_FORMAT_HALFWORDS input data is a stream of half-words (16-bit data) + @arg @ref CRC_INPUTDATA_FORMAT_WORDS input data is a stream of words (32-bit data) + + Note that constant CRC_INPUT_FORMAT_UNDEFINED is defined but an initialization error + must occur if InputBufferFormat is not one of the three values listed above */ +} CRC_HandleTypeDef; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup CRC_Exported_Constants CRC Exported Constants + * @{ + */ + +/** @defgroup CRC_Default_Polynomial_Value Default CRC generating polynomial + * @{ + */ +#define DEFAULT_CRC32_POLY 0x04C11DB7U /*!< X^32 + X^26 + X^23 + X^22 + X^16 + X^12 + X^11 + X^10 +X^8 + X^7 + X^5 + X^4 + X^2+ X +1 */ +/** + * @} + */ + +/** @defgroup CRC_Default_InitValue Default CRC computation initialization value + * @{ + */ +#define DEFAULT_CRC_INITVALUE 0xFFFFFFFFU /*!< Initial CRC default value */ +/** + * @} + */ + +/** @defgroup CRC_Default_Polynomial Indicates whether or not default polynomial is used + * @{ + */ +#define DEFAULT_POLYNOMIAL_ENABLE ((uint8_t)0x00U) /*!< Enable default generating polynomial 0x04C11DB7 */ +#define DEFAULT_POLYNOMIAL_DISABLE ((uint8_t)0x01U) /*!< Disable default generating polynomial 0x04C11DB7 */ +/** + * @} + */ + +/** @defgroup CRC_Default_InitValue_Use Indicates whether or not default init value is used + * @{ + */ +#define DEFAULT_INIT_VALUE_ENABLE ((uint8_t)0x00U) /*!< Enable initial CRC default value */ +#define DEFAULT_INIT_VALUE_DISABLE ((uint8_t)0x01U) /*!< Disable initial CRC default value */ +/** + * @} + */ + +/** @defgroup CRC_Polynomial_Sizes Polynomial sizes to configure the peripheral + * @{ + */ +#define CRC_POLYLENGTH_32B 0x00000000U /*!< Resort to a 32-bit long generating polynomial */ +#define CRC_POLYLENGTH_16B CRC_CR_POLYSIZE_0 /*!< Resort to a 16-bit long generating polynomial */ +#define CRC_POLYLENGTH_8B CRC_CR_POLYSIZE_1 /*!< Resort to a 8-bit long generating polynomial */ +#define CRC_POLYLENGTH_7B CRC_CR_POLYSIZE /*!< Resort to a 7-bit long generating polynomial */ +/** + * @} + */ + +/** @defgroup CRC_Polynomial_Size_Definitions CRC polynomial possible sizes actual definitions + * @{ + */ +#define HAL_CRC_LENGTH_32B 32U /*!< 32-bit long CRC */ +#define HAL_CRC_LENGTH_16B 16U /*!< 16-bit long CRC */ +#define HAL_CRC_LENGTH_8B 8U /*!< 8-bit long CRC */ +#define HAL_CRC_LENGTH_7B 7U /*!< 7-bit long CRC */ +/** + * @} + */ + +/** @defgroup CRC_Input_Buffer_Format Input Buffer Format + * @{ + */ +/* WARNING: CRC_INPUT_FORMAT_UNDEFINED is created for reference purposes but + * an error is triggered in HAL_CRC_Init() if InputDataFormat field is set + * to CRC_INPUT_FORMAT_UNDEFINED: the format MUST be defined by the user for + * the CRC APIs to provide a correct result */ +#define CRC_INPUTDATA_FORMAT_UNDEFINED 0x00000000U /*!< Undefined input data format */ +#define CRC_INPUTDATA_FORMAT_BYTES 0x00000001U /*!< Input data in byte format */ +#define CRC_INPUTDATA_FORMAT_HALFWORDS 0x00000002U /*!< Input data in half-word format */ +#define CRC_INPUTDATA_FORMAT_WORDS 0x00000003U /*!< Input data in word format */ +/** + * @} + */ + +/** @defgroup CRC_Aliases CRC API aliases + * @{ + */ +#define HAL_CRC_Input_Data_Reverse HAL_CRCEx_Input_Data_Reverse /*!< Aliased to HAL_CRCEx_Input_Data_Reverse for inter STM32 series compatibility */ +#define HAL_CRC_Output_Data_Reverse HAL_CRCEx_Output_Data_Reverse /*!< Aliased to HAL_CRCEx_Output_Data_Reverse for inter STM32 series compatibility */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup CRC_Exported_Macros CRC Exported Macros + * @{ + */ + +/** @brief Reset CRC handle state. + * @param __HANDLE__ CRC handle. + * @retval None + */ +#define __HAL_CRC_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_CRC_STATE_RESET) + +/** + * @brief Reset CRC Data Register. + * @param __HANDLE__ CRC handle + * @retval None + */ +#define __HAL_CRC_DR_RESET(__HANDLE__) ((__HANDLE__)->Instance->CR |= CRC_CR_RESET) + +/** + * @brief Set CRC INIT non-default value + * @param __HANDLE__ CRC handle + * @param __INIT__ 32-bit initial value + * @retval None + */ +#define __HAL_CRC_INITIALCRCVALUE_CONFIG(__HANDLE__, __INIT__) ((__HANDLE__)->Instance->INIT = (__INIT__)) + +/** + * @brief Store data in the Independent Data (ID) register. + * @param __HANDLE__ CRC handle + * @param __VALUE__ Value to be stored in the ID register + * @note Refer to the Reference Manual to get the authorized __VALUE__ length in bits + * @retval None + */ +#define __HAL_CRC_SET_IDR(__HANDLE__, __VALUE__) (WRITE_REG((__HANDLE__)->Instance->IDR, (__VALUE__))) + +/** + * @brief Return the data stored in the Independent Data (ID) register. + * @param __HANDLE__ CRC handle + * @note Refer to the Reference Manual to get the authorized __VALUE__ length in bits + * @retval Value of the ID register + */ +#define __HAL_CRC_GET_IDR(__HANDLE__) (((__HANDLE__)->Instance->IDR) & CRC_IDR_IDR) +/** + * @} + */ + + +/* Private macros --------------------------------------------------------*/ +/** @defgroup CRC_Private_Macros CRC Private Macros + * @{ + */ + +#define IS_DEFAULT_POLYNOMIAL(DEFAULT) (((DEFAULT) == DEFAULT_POLYNOMIAL_ENABLE) || \ + ((DEFAULT) == DEFAULT_POLYNOMIAL_DISABLE)) + + +#define IS_DEFAULT_INIT_VALUE(VALUE) (((VALUE) == DEFAULT_INIT_VALUE_ENABLE) || \ + ((VALUE) == DEFAULT_INIT_VALUE_DISABLE)) + +#define IS_CRC_POL_LENGTH(LENGTH) (((LENGTH) == CRC_POLYLENGTH_32B) || \ + ((LENGTH) == CRC_POLYLENGTH_16B) || \ + ((LENGTH) == CRC_POLYLENGTH_8B) || \ + ((LENGTH) == CRC_POLYLENGTH_7B)) + +#define IS_CRC_INPUTDATA_FORMAT(FORMAT) (((FORMAT) == CRC_INPUTDATA_FORMAT_BYTES) || \ + ((FORMAT) == CRC_INPUTDATA_FORMAT_HALFWORDS) || \ + ((FORMAT) == CRC_INPUTDATA_FORMAT_WORDS)) + +/** + * @} + */ + +/* Include CRC HAL Extended module */ +#include "stm32f7xx_hal_crc_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup CRC_Exported_Functions CRC Exported Functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +/** @defgroup CRC_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_CRC_Init(CRC_HandleTypeDef *hcrc); +HAL_StatusTypeDef HAL_CRC_DeInit(CRC_HandleTypeDef *hcrc); +void HAL_CRC_MspInit(CRC_HandleTypeDef *hcrc); +void HAL_CRC_MspDeInit(CRC_HandleTypeDef *hcrc); +/** + * @} + */ + +/* Peripheral Control functions ***********************************************/ +/** @defgroup CRC_Exported_Functions_Group2 Peripheral Control functions + * @{ + */ +uint32_t HAL_CRC_Accumulate(CRC_HandleTypeDef *hcrc, uint32_t pBuffer[], uint32_t BufferLength); +uint32_t HAL_CRC_Calculate(CRC_HandleTypeDef *hcrc, uint32_t pBuffer[], uint32_t BufferLength); +/** + * @} + */ + +/* Peripheral State and Error functions ***************************************/ +/** @defgroup CRC_Exported_Functions_Group3 Peripheral State functions + * @{ + */ +HAL_CRC_StateTypeDef HAL_CRC_GetState(CRC_HandleTypeDef *hcrc); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_CRC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc_ex.h new file mode 100644 index 000000000..f2ebba1f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_crc_ex.h @@ -0,0 +1,153 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_crc_ex.h + * @author MCD Application Team + * @brief Header file of CRC HAL extended module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_CRC_EX_H +#define STM32F7xx_HAL_CRC_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup CRCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup CRCEx_Exported_Constants CRC Extended Exported Constants + * @{ + */ + +/** @defgroup CRCEx_Input_Data_Inversion Input Data Inversion Modes + * @{ + */ +#define CRC_INPUTDATA_INVERSION_NONE 0x00000000U /*!< No input data inversion */ +#define CRC_INPUTDATA_INVERSION_BYTE CRC_CR_REV_IN_0 /*!< Byte-wise input data inversion */ +#define CRC_INPUTDATA_INVERSION_HALFWORD CRC_CR_REV_IN_1 /*!< HalfWord-wise input data inversion */ +#define CRC_INPUTDATA_INVERSION_WORD CRC_CR_REV_IN /*!< Word-wise input data inversion */ +/** + * @} + */ + +/** @defgroup CRCEx_Output_Data_Inversion Output Data Inversion Modes + * @{ + */ +#define CRC_OUTPUTDATA_INVERSION_DISABLE 0x00000000U /*!< No output data inversion */ +#define CRC_OUTPUTDATA_INVERSION_ENABLE CRC_CR_REV_OUT /*!< Bit-wise output data inversion */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup CRCEx_Exported_Macros CRC Extended Exported Macros + * @{ + */ + +/** + * @brief Set CRC output reversal + * @param __HANDLE__ CRC handle + * @retval None + */ +#define __HAL_CRC_OUTPUTREVERSAL_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= CRC_CR_REV_OUT) + +/** + * @brief Unset CRC output reversal + * @param __HANDLE__ CRC handle + * @retval None + */ +#define __HAL_CRC_OUTPUTREVERSAL_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(CRC_CR_REV_OUT)) + +/** + * @brief Set CRC non-default polynomial + * @param __HANDLE__ CRC handle + * @param __POLYNOMIAL__ 7, 8, 16 or 32-bit polynomial + * @retval None + */ +#define __HAL_CRC_POLYNOMIAL_CONFIG(__HANDLE__, __POLYNOMIAL__) ((__HANDLE__)->Instance->POL = (__POLYNOMIAL__)) + +/** + * @} + */ + +/* Private macros --------------------------------------------------------*/ +/** @defgroup CRCEx_Private_Macros CRC Extended Private Macros + * @{ + */ + +#define IS_CRC_INPUTDATA_INVERSION_MODE(MODE) (((MODE) == CRC_INPUTDATA_INVERSION_NONE) || \ + ((MODE) == CRC_INPUTDATA_INVERSION_BYTE) || \ + ((MODE) == CRC_INPUTDATA_INVERSION_HALFWORD) || \ + ((MODE) == CRC_INPUTDATA_INVERSION_WORD)) + +#define IS_CRC_OUTPUTDATA_INVERSION_MODE(MODE) (((MODE) == CRC_OUTPUTDATA_INVERSION_DISABLE) || \ + ((MODE) == CRC_OUTPUTDATA_INVERSION_ENABLE)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup CRCEx_Exported_Functions + * @{ + */ + +/** @addtogroup CRCEx_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_CRCEx_Polynomial_Set(CRC_HandleTypeDef *hcrc, uint32_t Pol, uint32_t PolyLength); +HAL_StatusTypeDef HAL_CRCEx_Input_Data_Reverse(CRC_HandleTypeDef *hcrc, uint32_t InputReverseMode); +HAL_StatusTypeDef HAL_CRCEx_Output_Data_Reverse(CRC_HandleTypeDef *hcrc, uint32_t OutputReverseMode); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_CRC_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac.h new file mode 100644 index 000000000..b86205d5a --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac.h @@ -0,0 +1,448 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dac.h + * @author MCD Application Team + * @brief Header file of DAC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DAC_H +#define __STM32F7xx_HAL_DAC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DAC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DAC_Exported_Types DAC Exported Types + * @{ + */ + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_DAC_STATE_RESET = 0x00U, /*!< DAC not yet initialized or disabled */ + HAL_DAC_STATE_READY = 0x01U, /*!< DAC initialized and ready for use */ + HAL_DAC_STATE_BUSY = 0x02U, /*!< DAC internal processing is ongoing */ + HAL_DAC_STATE_TIMEOUT = 0x03U, /*!< DAC timeout state */ + HAL_DAC_STATE_ERROR = 0x04U /*!< DAC error state */ +}HAL_DAC_StateTypeDef; + +/** + * @brief DAC handle Structure definition + */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +typedef struct __DAC_HandleTypeDef +#else +typedef struct +#endif +{ + DAC_TypeDef *Instance; /*!< Register base address */ + + __IO HAL_DAC_StateTypeDef State; /*!< DAC communication state */ + + HAL_LockTypeDef Lock; /*!< DAC locking object */ + + DMA_HandleTypeDef *DMA_Handle1; /*!< Pointer DMA handler for channel 1 */ + + DMA_HandleTypeDef *DMA_Handle2; /*!< Pointer DMA handler for channel 2 */ + + __IO uint32_t ErrorCode; /*!< DAC Error code */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + void (* ConvCpltCallbackCh1) (struct __DAC_HandleTypeDef *hdac); + void (* ConvHalfCpltCallbackCh1) (struct __DAC_HandleTypeDef *hdac); + void (* ErrorCallbackCh1) (struct __DAC_HandleTypeDef *hdac); + void (* DMAUnderrunCallbackCh1) (struct __DAC_HandleTypeDef *hdac); + void (* ConvCpltCallbackCh2) (struct __DAC_HandleTypeDef* hdac); + void (* ConvHalfCpltCallbackCh2) (struct __DAC_HandleTypeDef* hdac); + void (* ErrorCallbackCh2) (struct __DAC_HandleTypeDef* hdac); + void (* DMAUnderrunCallbackCh2) (struct __DAC_HandleTypeDef* hdac); + + void (* MspInitCallback) (struct __DAC_HandleTypeDef *hdac); + void (* MspDeInitCallback ) (struct __DAC_HandleTypeDef *hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + +}DAC_HandleTypeDef; + +/** + * @brief DAC Configuration regular Channel structure definition + */ +typedef struct +{ + uint32_t DAC_Trigger; /*!< Specifies the external trigger for the selected DAC channel. + This parameter can be a value of @ref DAC_trigger_selection */ + + uint32_t DAC_OutputBuffer; /*!< Specifies whether the DAC channel output buffer is enabled or disabled. + This parameter can be a value of @ref DAC_output_buffer */ +}DAC_ChannelConfTypeDef; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +/** + * @brief HAL DAC Callback ID enumeration definition + */ +typedef enum +{ + HAL_DAC_CH1_COMPLETE_CB_ID = 0x00U, /*!< DAC CH1 Complete Callback ID */ + HAL_DAC_CH1_HALF_COMPLETE_CB_ID = 0x01U, /*!< DAC CH1 half Complete Callback ID */ + HAL_DAC_CH1_ERROR_ID = 0x02U, /*!< DAC CH1 error Callback ID */ + HAL_DAC_CH1_UNDERRUN_CB_ID = 0x03U, /*!< DAC CH1 underrun Callback ID */ + HAL_DAC_CH2_COMPLETE_CB_ID = 0x04U, /*!< DAC CH2 Complete Callback ID */ + HAL_DAC_CH2_HALF_COMPLETE_CB_ID = 0x05U, /*!< DAC CH2 half Complete Callback ID */ + HAL_DAC_CH2_ERROR_ID = 0x06U, /*!< DAC CH2 error Callback ID */ + HAL_DAC_CH2_UNDERRUN_CB_ID = 0x07U, /*!< DAC CH2 underrun Callback ID */ + HAL_DAC_MSP_INIT_CB_ID = 0x08U, /*!< DAC MspInit Callback ID */ + HAL_DAC_MSP_DEINIT_CB_ID = 0x09U, /*!< DAC MspDeInit Callback ID */ + HAL_DAC_ALL_CB_ID = 0x0AU /*!< DAC All ID */ +}HAL_DAC_CallbackIDTypeDef; + +/** + * @brief HAL DAC Callback pointer definition + */ +typedef void (*pDAC_CallbackTypeDef)(DAC_HandleTypeDef *hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DAC_Exported_Constants DAC Exported Constants + * @{ + */ + +/** @defgroup DAC_Error_Code DAC Error Code + * @{ + */ +#define HAL_DAC_ERROR_NONE 0x00U /*!< No error */ +#define HAL_DAC_ERROR_DMAUNDERRUNCH1 0x01U /*!< DAC channel1 DAM underrun error */ +#define HAL_DAC_ERROR_DMAUNDERRUNCH2 0x02U /*!< DAC channel2 DAM underrun error */ +#define HAL_DAC_ERROR_DMA 0x04U /*!< DMA error */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +#define HAL_DAC_ERROR_INVALID_CALLBACK 0x10U /*!< Invalid callback error */ +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup DAC_trigger_selection DAC Trigger Selection + * @{ + */ + +#define DAC_TRIGGER_NONE ((uint32_t)0x00000000U) /*!< Conversion is automatic once the DAC1_DHRxxxx register + has been loaded, and not by external trigger */ +#define DAC_TRIGGER_T2_TRGO ((uint32_t)(DAC_CR_TSEL1_2 | DAC_CR_TEN1)) /*!< TIM2 TRGO selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_T4_TRGO ((uint32_t)(DAC_CR_TSEL1_2 | DAC_CR_TSEL1_0 | DAC_CR_TEN1)) /*!< TIM4 TRGO selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_T5_TRGO ((uint32_t)(DAC_CR_TSEL1_1 | DAC_CR_TSEL1_0 | DAC_CR_TEN1)) /*!< TIM5 TRGO selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_T6_TRGO ((uint32_t)DAC_CR_TEN1) /*!< TIM6 TRGO selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_T7_TRGO ((uint32_t)(DAC_CR_TSEL1_1 | DAC_CR_TEN1)) /*!< TIM7 TRGO selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_T8_TRGO ((uint32_t)(DAC_CR_TSEL1_0 | DAC_CR_TEN1)) /*!< TIM8 TRGO selected as external conversion trigger for DAC channel */ + +#define DAC_TRIGGER_EXT_IT9 ((uint32_t)(DAC_CR_TSEL1_2 | DAC_CR_TSEL1_1 | DAC_CR_TEN1)) /*!< EXTI Line9 event selected as external conversion trigger for DAC channel */ +#define DAC_TRIGGER_SOFTWARE ((uint32_t)(DAC_CR_TSEL1 | DAC_CR_TEN1)) /*!< Conversion started by software trigger for DAC channel */ +/** + * @} + */ + +/** @defgroup DAC_output_buffer DAC Output Buffer + * @{ + */ +#define DAC_OUTPUTBUFFER_ENABLE ((uint32_t)0x00000000U) +#define DAC_OUTPUTBUFFER_DISABLE ((uint32_t)DAC_CR_BOFF1) +/** + * @} + */ + +/** @defgroup DAC_Channel_selection DAC Channel Selection + * @{ + */ +#define DAC_CHANNEL_1 ((uint32_t)0x00000000U) +#define DAC_CHANNEL_2 ((uint32_t)0x00000010U) +/** + * @} + */ + +/** @defgroup DAC_data_alignment DAC Data Alignment + * @{ + */ +#define DAC_ALIGN_12B_R ((uint32_t)0x00000000U) +#define DAC_ALIGN_12B_L ((uint32_t)0x00000004U) +#define DAC_ALIGN_8B_R ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup DAC_flags_definition DAC Flags Definition + * @{ + */ +#define DAC_FLAG_DMAUDR1 ((uint32_t)DAC_SR_DMAUDR1) +#define DAC_FLAG_DMAUDR2 ((uint32_t)DAC_SR_DMAUDR2) +/** + * @} + */ + +/** @defgroup DAC_IT_definition DAC IT Definition + * @{ + */ +#define DAC_IT_DMAUDR1 ((uint32_t)DAC_SR_DMAUDR1) +#define DAC_IT_DMAUDR2 ((uint32_t)DAC_SR_DMAUDR2) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup DAC_Exported_Macros DAC Exported Macros + * @{ + */ + +/** @brief Reset DAC handle state + * @param __HANDLE__: specifies the DAC handle. + * @retval None + */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +#define __HAL_DAC_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_DAC_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_DAC_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DAC_STATE_RESET) +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + +/** @brief Enable the DAC channel + * @param __HANDLE__: specifies the DAC handle. + * @param __DAC_CHANNEL__: specifies the DAC channel + * @retval None + */ +#define __HAL_DAC_ENABLE(__HANDLE__, __DAC_CHANNEL__) \ +((__HANDLE__)->Instance->CR |= (DAC_CR_EN1 << (__DAC_CHANNEL__))) + +/** @brief Disable the DAC channel + * @param __HANDLE__: specifies the DAC handle + * @param __DAC_CHANNEL__: specifies the DAC channel. + * @retval None + */ +#define __HAL_DAC_DISABLE(__HANDLE__, __DAC_CHANNEL__) \ +((__HANDLE__)->Instance->CR &= ~(DAC_CR_EN1 << (__DAC_CHANNEL__))) + + +/** @brief Enable the DAC interrupt + * @param __HANDLE__: specifies the DAC handle + * @param __INTERRUPT__: specifies the DAC interrupt. + * @retval None + */ +#define __HAL_DAC_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR) |= (__INTERRUPT__)) + +/** @brief Disable the DAC interrupt + * @param __HANDLE__: specifies the DAC handle + * @param __INTERRUPT__: specifies the DAC interrupt. + * @retval None + */ +#define __HAL_DAC_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR) &= ~(__INTERRUPT__)) + +/** @brief Checks if the specified DAC interrupt source is enabled or disabled. + * @param __HANDLE__: DAC handle + * @param __INTERRUPT__: DAC interrupt source to check + * This parameter can be any combination of the following values: + * @arg DAC_IT_DMAUDR1: DAC channel 1 DMA underrun interrupt + * @arg DAC_IT_DMAUDR2: DAC channel 2 DMA underrun interrupt + * @retval State of interruption (SET or RESET) + */ +#define __HAL_DAC_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->CR & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** @brief Get the selected DAC's flag status. + * @param __HANDLE__: specifies the DAC handle. + * @param __FLAG__: specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DAC_FLAG_DMAUDR1: DMA underrun 1 flag + * @arg DAC_FLAG_DMAUDR2: DMA underrun 2 flag + * @retval None + */ +#define __HAL_DAC_GET_FLAG(__HANDLE__, __FLAG__) ((((__HANDLE__)->Instance->SR) & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the DAC's flag. + * @param __HANDLE__: specifies the DAC handle. + * @param __FLAG__: specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DAC_FLAG_DMAUDR1: DMA underrun 1 flag + * @arg DAC_FLAG_DMAUDR2: DMA underrun 2 flag + * @retval None + */ +#define __HAL_DAC_CLEAR_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR) = (__FLAG__)) +/** + * @} + */ + +/* Include DAC HAL Extension module */ +#include "stm32f7xx_hal_dac_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup DAC_Exported_Functions + * @{ + */ + +/** @addtogroup DAC_Exported_Functions_Group1 + * @{ + */ +/* Initialization/de-initialization functions *********************************/ +HAL_StatusTypeDef HAL_DAC_Init(DAC_HandleTypeDef* hdac); +HAL_StatusTypeDef HAL_DAC_DeInit(DAC_HandleTypeDef* hdac); +void HAL_DAC_MspInit(DAC_HandleTypeDef* hdac); +void HAL_DAC_MspDeInit(DAC_HandleTypeDef* hdac); +/** + * @} + */ + +/** @addtogroup DAC_Exported_Functions_Group2 + * @{ + */ +/* I/O operation functions ****************************************************/ +HAL_StatusTypeDef HAL_DAC_Start(DAC_HandleTypeDef* hdac, uint32_t Channel); +HAL_StatusTypeDef HAL_DAC_Stop(DAC_HandleTypeDef* hdac, uint32_t Channel); +HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t* pData, uint32_t Length, uint32_t Alignment); +HAL_StatusTypeDef HAL_DAC_Stop_DMA(DAC_HandleTypeDef* hdac, uint32_t Channel); +uint32_t HAL_DAC_GetValue(DAC_HandleTypeDef* hdac, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup DAC_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef* hdac, DAC_ChannelConfTypeDef* sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_DAC_SetValue(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Alignment, uint32_t Data); +/** + * @} + */ + +/** @addtogroup DAC_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions *************************************************/ +HAL_DAC_StateTypeDef HAL_DAC_GetState(DAC_HandleTypeDef* hdac); +void HAL_DAC_IRQHandler(DAC_HandleTypeDef* hdac); +uint32_t HAL_DAC_GetError(DAC_HandleTypeDef *hdac); + +void HAL_DAC_ConvCpltCallbackCh1(DAC_HandleTypeDef* hdac); +void HAL_DAC_ConvHalfCpltCallbackCh1(DAC_HandleTypeDef* hdac); +void HAL_DAC_ErrorCallbackCh1(DAC_HandleTypeDef *hdac); +void HAL_DAC_DMAUnderrunCallbackCh1(DAC_HandleTypeDef *hdac); +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +/* DAC callback registering/unregistering */ +HAL_StatusTypeDef HAL_DAC_RegisterCallback (DAC_HandleTypeDef *hdac, HAL_DAC_CallbackIDTypeDef CallbackID, pDAC_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_DAC_UnRegisterCallback (DAC_HandleTypeDef *hdac, HAL_DAC_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DAC_Private_Constants DAC Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DAC_Private_Macros DAC Private Macros + * @{ + */ +#define IS_DAC_DATA(DATA) ((DATA) <= 0xFFF0U) +#define IS_DAC_ALIGN(ALIGN) (((ALIGN) == DAC_ALIGN_12B_R) || \ + ((ALIGN) == DAC_ALIGN_12B_L) || \ + ((ALIGN) == DAC_ALIGN_8B_R)) +#define IS_DAC_CHANNEL(CHANNEL) (((CHANNEL) == DAC_CHANNEL_1) || \ + ((CHANNEL) == DAC_CHANNEL_2)) +#define IS_DAC_OUTPUT_BUFFER_STATE(STATE) (((STATE) == DAC_OUTPUTBUFFER_ENABLE) || \ + ((STATE) == DAC_OUTPUTBUFFER_DISABLE)) + +#define IS_DAC_TRIGGER(TRIGGER) (((TRIGGER) == DAC_TRIGGER_NONE) || \ + ((TRIGGER) == DAC_TRIGGER_T2_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_T8_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_T7_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_T5_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_T6_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_T4_TRGO) || \ + ((TRIGGER) == DAC_TRIGGER_EXT_IT9) || \ + ((TRIGGER) == DAC_TRIGGER_SOFTWARE)) + +/** @brief Set DHR12R1 alignment + * @param __ALIGNMENT__: specifies the DAC alignment + * @retval None + */ +#define DAC_DHR12R1_ALIGNMENT(__ALIGNMENT__) (((uint32_t)0x00000008U) + (__ALIGNMENT__)) + +/** @brief Set DHR12R2 alignment + * @param __ALIGNMENT__: specifies the DAC alignment + * @retval None + */ +#define DAC_DHR12R2_ALIGNMENT(__ALIGNMENT__) (((uint32_t)0x00000014U) + (__ALIGNMENT__)) + +/** @brief Set DHR12RD alignment + * @param __ALIGNMENT__: specifies the DAC alignment + * @retval None + */ +#define DAC_DHR12RD_ALIGNMENT(__ALIGNMENT__) (((uint32_t)0x00000020U) + (__ALIGNMENT__)) + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DAC_Private_Functions DAC Private Functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /*__STM32F7xx_HAL_DAC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac_ex.h new file mode 100644 index 000000000..2e3650d06 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dac_ex.h @@ -0,0 +1,173 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dac.h + * @author MCD Application Team + * @brief Header file of DAC HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DAC_EX_H +#define __STM32F7xx_HAL_DAC_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DACEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DACEx_Exported_Constants DAC Exported Constants + * @{ + */ + +/** @defgroup DACEx_lfsrunmask_triangleamplitude DAC LFS Run Mask Triangle Amplitude + * @{ + */ +#define DAC_LFSRUNMASK_BIT0 ((uint32_t)0x00000000U) /*!< Unmask DAC channel LFSR bit0 for noise wave generation */ +#define DAC_LFSRUNMASK_BITS1_0 ((uint32_t)DAC_CR_MAMP1_0) /*!< Unmask DAC channel LFSR bit[1:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS2_0 ((uint32_t)DAC_CR_MAMP1_1) /*!< Unmask DAC channel LFSR bit[2:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS3_0 ((uint32_t)DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0)/*!< Unmask DAC channel LFSR bit[3:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS4_0 ((uint32_t)DAC_CR_MAMP1_2) /*!< Unmask DAC channel LFSR bit[4:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS5_0 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_0) /*!< Unmask DAC channel LFSR bit[5:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS6_0 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1) /*!< Unmask DAC channel LFSR bit[6:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS7_0 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Unmask DAC channel LFSR bit[7:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS8_0 ((uint32_t)DAC_CR_MAMP1_3) /*!< Unmask DAC channel LFSR bit[8:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS9_0 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_0) /*!< Unmask DAC channel LFSR bit[9:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS10_0 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1) /*!< Unmask DAC channel LFSR bit[10:0] for noise wave generation */ +#define DAC_LFSRUNMASK_BITS11_0 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Unmask DAC channel LFSR bit[11:0] for noise wave generation */ +#define DAC_TRIANGLEAMPLITUDE_1 ((uint32_t)0x00000000U) /*!< Select max triangle amplitude of 1 */ +#define DAC_TRIANGLEAMPLITUDE_3 ((uint32_t)DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 3 */ +#define DAC_TRIANGLEAMPLITUDE_7 ((uint32_t)DAC_CR_MAMP1_1) /*!< Select max triangle amplitude of 7 */ +#define DAC_TRIANGLEAMPLITUDE_15 ((uint32_t)DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 15 */ +#define DAC_TRIANGLEAMPLITUDE_31 ((uint32_t)DAC_CR_MAMP1_2) /*!< Select max triangle amplitude of 31 */ +#define DAC_TRIANGLEAMPLITUDE_63 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 63 */ +#define DAC_TRIANGLEAMPLITUDE_127 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1) /*!< Select max triangle amplitude of 127 */ +#define DAC_TRIANGLEAMPLITUDE_255 ((uint32_t)DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 255 */ +#define DAC_TRIANGLEAMPLITUDE_511 ((uint32_t)DAC_CR_MAMP1_3) /*!< Select max triangle amplitude of 511 */ +#define DAC_TRIANGLEAMPLITUDE_1023 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 1023 */ +#define DAC_TRIANGLEAMPLITUDE_2047 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1) /*!< Select max triangle amplitude of 2047 */ +#define DAC_TRIANGLEAMPLITUDE_4095 ((uint32_t)DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Select max triangle amplitude of 4095 */ +/** + * @} + */ + +/** + * @} + */ +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup DACEx_Exported_Functions + * @{ + */ + +/** @addtogroup DACEx_Exported_Functions_Group1 + * @{ + */ +/* Extension features functions ***********************************************/ +uint32_t HAL_DACEx_DualGetValue(DAC_HandleTypeDef* hdac); +HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude); +HAL_StatusTypeDef HAL_DACEx_NoiseWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude); +HAL_StatusTypeDef HAL_DACEx_DualSetValue(DAC_HandleTypeDef* hdac, uint32_t Alignment, uint32_t Data1, uint32_t Data2); + +void HAL_DACEx_ConvCpltCallbackCh2(DAC_HandleTypeDef* hdac); +void HAL_DACEx_ConvHalfCpltCallbackCh2(DAC_HandleTypeDef* hdac); +void HAL_DACEx_ErrorCallbackCh2(DAC_HandleTypeDef* hdac); +void HAL_DACEx_DMAUnderrunCallbackCh2(DAC_HandleTypeDef* hdac); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DACEx_Private_Constants DAC Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DACEx_Private_Macros DAC Private Macros + * @{ + */ +#define IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(VALUE) (((VALUE) == DAC_LFSRUNMASK_BIT0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS1_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS2_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS3_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS4_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS5_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS6_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS7_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS8_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS9_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS10_0) || \ + ((VALUE) == DAC_LFSRUNMASK_BITS11_0) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_1) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_3) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_7) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_15) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_31) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_63) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_127) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_255) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_511) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_1023) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_2047) || \ + ((VALUE) == DAC_TRIANGLEAMPLITUDE_4095)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DACEx_Private_Functions DAC Private Functions + * @{ + */ +void DAC_DMAConvCpltCh2(DMA_HandleTypeDef *hdma); +void DAC_DMAErrorCh2(DMA_HandleTypeDef *hdma); +void DAC_DMAHalfConvCpltCh2(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /*__STM32F7xx_HAL_DAC_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h new file mode 100644 index 000000000..334b42c7d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h @@ -0,0 +1,205 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_def.h + * @author MCD Application Team + * @brief This file contains HAL common defines, enumeration, macros and + * structures definitions. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DEF +#define __STM32F7xx_HAL_DEF + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx.h" +#include "Legacy/stm32_hal_legacy.h" +#include + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief HAL Status structures definition + */ +typedef enum +{ + HAL_OK = 0x00U, + HAL_ERROR = 0x01U, + HAL_BUSY = 0x02U, + HAL_TIMEOUT = 0x03U +} HAL_StatusTypeDef; + +/** + * @brief HAL Lock structures definition + */ +typedef enum +{ + HAL_UNLOCKED = 0x00U, + HAL_LOCKED = 0x01U +} HAL_LockTypeDef; + +/* Exported macro ------------------------------------------------------------*/ + +#define UNUSED(X) (void)X /* To avoid gcc/g++ warnings */ + +#define HAL_MAX_DELAY 0xFFFFFFFFU + +#define HAL_IS_BIT_SET(REG, BIT) (((REG) & (BIT)) == (BIT)) +#define HAL_IS_BIT_CLR(REG, BIT) (((REG) & (BIT)) == 0U) + +#define __HAL_LINKDMA(__HANDLE__, __PPP_DMA_FIELD__, __DMA_HANDLE__) \ + do{ \ + (__HANDLE__)->__PPP_DMA_FIELD__ = &(__DMA_HANDLE__); \ + (__DMA_HANDLE__).Parent = (__HANDLE__); \ + } while(0) + +/** @brief Reset the Handle's State field. + * @param __HANDLE__ specifies the Peripheral Handle. + * @note This macro can be used for the following purpose: + * - When the Handle is declared as local variable; before passing it as parameter + * to HAL_PPP_Init() for the first time, it is mandatory to use this macro + * to set to 0 the Handle's "State" field. + * Otherwise, "State" field may have any random value and the first time the function + * HAL_PPP_Init() is called, the low level hardware initialization will be missed + * (i.e. HAL_PPP_MspInit() will not be executed). + * - When there is a need to reconfigure the low level hardware: instead of calling + * HAL_PPP_DeInit() then HAL_PPP_Init(), user can make a call to this macro then HAL_PPP_Init(). + * In this later function, when the Handle's "State" field is set to 0, it will execute the function + * HAL_PPP_MspInit() which will reconfigure the low level hardware. + * @retval None + */ +#define __HAL_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = 0U) + +#if (USE_RTOS == 1U) + /* Reserved for future use */ + #error "USE_RTOS should be 0 in the current HAL release" +#else + #define __HAL_LOCK(__HANDLE__) \ + do{ \ + if((__HANDLE__)->Lock == HAL_LOCKED) \ + { \ + return HAL_BUSY; \ + } \ + else \ + { \ + (__HANDLE__)->Lock = HAL_LOCKED; \ + } \ + }while (0U) + + #define __HAL_UNLOCK(__HANDLE__) \ + do{ \ + (__HANDLE__)->Lock = HAL_UNLOCKED; \ + }while (0U) +#endif /* USE_RTOS */ + +#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif /* __weak */ + #ifndef __packed + #define __packed __attribute__((__packed__)) + #endif /* __packed */ +#endif /* __GNUC__ */ + + +/* Macro to get variable aligned on 4-bytes, for __ICCARM__ the directive "#pragma data_alignment=4" must be used instead */ +#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __ALIGN_END + #define __ALIGN_END __attribute__ ((aligned (4))) + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif /* __ALIGN_BEGIN */ +#else + #ifndef __ALIGN_END + #define __ALIGN_END + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #if defined (__CC_ARM) /* ARM Compiler */ + #define __ALIGN_BEGIN __align(4) + #elif defined (__ICCARM__) /* IAR Compiler */ + #define __ALIGN_BEGIN + #endif /* __CC_ARM */ + #endif /* __ALIGN_BEGIN */ +#endif /* __GNUC__ */ + +/* Macro to get variable aligned on 32-bytes,needed for cache maintenance purpose */ +#if defined (__GNUC__) /* GNU Compiler */ + #define ALIGN_32BYTES(buf) buf __attribute__ ((aligned (32))) +#elif defined (__ICCARM__) /* IAR Compiler */ + #define ALIGN_32BYTES(buf) _Pragma("data_alignment=32") buf +#elif defined (__CC_ARM) /* ARM Compiler */ + #define ALIGN_32BYTES(buf) __align(32) buf +#endif + +/** + * @brief __RAM_FUNC definition + */ +#if defined ( __CC_ARM ) +/* ARM Compiler + ------------ + RAM functions are defined using the toolchain options. + Functions that are executed in RAM should reside in a separate source module. + Using the 'Options for File' dialog you can simply change the 'Code / Const' + area of a module to a memory space in physical RAM. + Available memory areas are declared in the 'Target' tab of the 'Options for Target' + dialog. +*/ +#define __RAM_FUNC + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- + RAM functions are defined using a specific toolchain keyword "__ramfunc". +*/ +#define __RAM_FUNC __ramfunc + +#elif defined ( __GNUC__ ) +/* GNU Compiler + ------------ + RAM functions are defined using a specific toolchain attribute + "__attribute__((section(".RamFunc")))". +*/ +#define __RAM_FUNC __attribute__((section(".RamFunc"))) + +#endif + +/** + * @brief __NOINLINE definition + */ +#if defined ( __CC_ARM ) || defined ( __GNUC__ ) +/* ARM & GNUCompiler + ---------------- +*/ +#define __NOINLINE __attribute__ ( (noinline) ) + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- +*/ +#define __NOINLINE _Pragma("optimize = no_inline") + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ___STM32F7xx_HAL_DEF */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h new file mode 100644 index 000000000..99a36e879 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h @@ -0,0 +1,749 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma.h + * @author MCD Application Team + * @brief Header file of DMA HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DMA_H +#define __STM32F7xx_HAL_DMA_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMA + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Types DMA Exported Types + * @brief DMA Exported Types + * @{ + */ + +/** + * @brief DMA Configuration Structure definition + */ +typedef struct +{ + uint32_t Channel; /*!< Specifies the channel used for the specified stream. + This parameter can be a value of @ref DMAEx_Channel_selection */ + + uint32_t Direction; /*!< Specifies if the data will be transferred from memory to peripheral, + from memory to memory or from peripheral to memory. + This parameter can be a value of @ref DMA_Data_transfer_direction */ + + uint32_t PeriphInc; /*!< Specifies whether the Peripheral address register should be incremented or not. + This parameter can be a value of @ref DMA_Peripheral_incremented_mode */ + + uint32_t MemInc; /*!< Specifies whether the memory address register should be incremented or not. + This parameter can be a value of @ref DMA_Memory_incremented_mode */ + + uint32_t PeriphDataAlignment; /*!< Specifies the Peripheral data width. + This parameter can be a value of @ref DMA_Peripheral_data_size */ + + uint32_t MemDataAlignment; /*!< Specifies the Memory data width. + This parameter can be a value of @ref DMA_Memory_data_size */ + + uint32_t Mode; /*!< Specifies the operation mode of the DMAy Streamx. + This parameter can be a value of @ref DMA_mode + @note The circular buffer mode cannot be used if the memory-to-memory + data transfer is configured on the selected Stream */ + + uint32_t Priority; /*!< Specifies the software priority for the DMAy Streamx. + This parameter can be a value of @ref DMA_Priority_level */ + + uint32_t FIFOMode; /*!< Specifies if the FIFO mode or Direct mode will be used for the specified stream. + This parameter can be a value of @ref DMA_FIFO_direct_mode + @note The Direct mode (FIFO mode disabled) cannot be used if the + memory-to-memory data transfer is configured on the selected stream */ + + uint32_t FIFOThreshold; /*!< Specifies the FIFO threshold level. + This parameter can be a value of @ref DMA_FIFO_threshold_level */ + + uint32_t MemBurst; /*!< Specifies the Burst transfer configuration for the memory transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Memory_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ + + uint32_t PeriphBurst; /*!< Specifies the Burst transfer configuration for the peripheral transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Peripheral_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ +}DMA_InitTypeDef; + +/** + * @brief HAL DMA State structures definition + */ +typedef enum +{ + HAL_DMA_STATE_RESET = 0x00U, /*!< DMA not yet initialized or disabled */ + HAL_DMA_STATE_READY = 0x01U, /*!< DMA initialized and ready for use */ + HAL_DMA_STATE_BUSY = 0x02U, /*!< DMA process is ongoing */ + HAL_DMA_STATE_TIMEOUT = 0x03U, /*!< DMA timeout state */ + HAL_DMA_STATE_ERROR = 0x04U, /*!< DMA error state */ + HAL_DMA_STATE_ABORT = 0x05U, /*!< DMA Abort state */ +}HAL_DMA_StateTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_FULL_TRANSFER = 0x00U, /*!< Full transfer */ + HAL_DMA_HALF_TRANSFER = 0x01U, /*!< Half Transfer */ +}HAL_DMA_LevelCompleteTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_XFER_CPLT_CB_ID = 0x00U, /*!< Full transfer */ + HAL_DMA_XFER_HALFCPLT_CB_ID = 0x01U, /*!< Half Transfer */ + HAL_DMA_XFER_M1CPLT_CB_ID = 0x02U, /*!< M1 Full Transfer */ + HAL_DMA_XFER_M1HALFCPLT_CB_ID = 0x03U, /*!< M1 Half Transfer */ + HAL_DMA_XFER_ERROR_CB_ID = 0x04U, /*!< Error */ + HAL_DMA_XFER_ABORT_CB_ID = 0x05U, /*!< Abort */ + HAL_DMA_XFER_ALL_CB_ID = 0x06U /*!< All */ +}HAL_DMA_CallbackIDTypeDef; + +/** + * @brief DMA handle Structure definition + */ +typedef struct __DMA_HandleTypeDef +{ + DMA_Stream_TypeDef *Instance; /*!< Register base address */ + + DMA_InitTypeDef Init; /*!< DMA communication parameters */ + + HAL_LockTypeDef Lock; /*!< DMA locking object */ + + __IO HAL_DMA_StateTypeDef State; /*!< DMA transfer state */ + + void *Parent; /*!< Parent object state */ + + void (* XferCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete callback */ + + void (* XferHalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA Half transfer complete callback */ + + void (* XferM1CpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete Memory1 callback */ + + void (* XferM1HalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Half complete Memory1 callback */ + + void (* XferErrorCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer error callback */ + + void (* XferAbortCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Abort callback */ + + __IO uint32_t ErrorCode; /*!< DMA Error code */ + + uint32_t StreamBaseAddress; /*!< DMA Stream Base Address */ + + uint32_t StreamIndex; /*!< DMA Stream Index */ + +}DMA_HandleTypeDef; + +/** + * @} + */ + + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Constants DMA Exported Constants + * @brief DMA Exported constants + * @{ + */ + +/** @defgroup DMA_Error_Code DMA Error Code + * @brief DMA Error Code + * @{ + */ +#define HAL_DMA_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_DMA_ERROR_TE 0x00000001U /*!< Transfer error */ +#define HAL_DMA_ERROR_FE 0x00000002U /*!< FIFO error */ +#define HAL_DMA_ERROR_DME 0x00000004U /*!< Direct Mode error */ +#define HAL_DMA_ERROR_TIMEOUT 0x00000020U /*!< Timeout error */ +#define HAL_DMA_ERROR_PARAM 0x00000040U /*!< Parameter error */ +#define HAL_DMA_ERROR_NO_XFER 0x00000080U /*!< Abort requested with no Xfer ongoing */ +#define HAL_DMA_ERROR_NOT_SUPPORTED 0x00000100U /*!< Not supported mode */ +/** + * @} + */ + +/** @defgroup DMA_Data_transfer_direction DMA Data transfer direction + * @brief DMA data transfer direction + * @{ + */ +#define DMA_PERIPH_TO_MEMORY 0x00000000U /*!< Peripheral to memory direction */ +#define DMA_MEMORY_TO_PERIPH DMA_SxCR_DIR_0 /*!< Memory to peripheral direction */ +#define DMA_MEMORY_TO_MEMORY DMA_SxCR_DIR_1 /*!< Memory to memory direction */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_incremented_mode DMA Peripheral incremented mode + * @brief DMA peripheral incremented mode + * @{ + */ +#define DMA_PINC_ENABLE DMA_SxCR_PINC /*!< Peripheral increment mode enable */ +#define DMA_PINC_DISABLE 0x00000000U /*!< Peripheral increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Memory_incremented_mode DMA Memory incremented mode + * @brief DMA memory incremented mode + * @{ + */ +#define DMA_MINC_ENABLE DMA_SxCR_MINC /*!< Memory increment mode enable */ +#define DMA_MINC_DISABLE 0x00000000U /*!< Memory increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_data_size DMA Peripheral data size + * @brief DMA peripheral data size + * @{ + */ +#define DMA_PDATAALIGN_BYTE 0x00000000U /*!< Peripheral data alignment: Byte */ +#define DMA_PDATAALIGN_HALFWORD DMA_SxCR_PSIZE_0 /*!< Peripheral data alignment: HalfWord */ +#define DMA_PDATAALIGN_WORD DMA_SxCR_PSIZE_1 /*!< Peripheral data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_Memory_data_size DMA Memory data size + * @brief DMA memory data size + * @{ + */ +#define DMA_MDATAALIGN_BYTE 0x00000000U /*!< Memory data alignment: Byte */ +#define DMA_MDATAALIGN_HALFWORD DMA_SxCR_MSIZE_0 /*!< Memory data alignment: HalfWord */ +#define DMA_MDATAALIGN_WORD DMA_SxCR_MSIZE_1 /*!< Memory data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_mode DMA mode + * @brief DMA mode + * @{ + */ +#define DMA_NORMAL 0x00000000U /*!< Normal mode */ +#define DMA_CIRCULAR DMA_SxCR_CIRC /*!< Circular mode */ +#define DMA_PFCTRL DMA_SxCR_PFCTRL /*!< Peripheral flow control mode */ +/** + * @} + */ + +/** @defgroup DMA_Priority_level DMA Priority level + * @brief DMA priority levels + * @{ + */ +#define DMA_PRIORITY_LOW 0x00000000U /*!< Priority level: Low */ +#define DMA_PRIORITY_MEDIUM DMA_SxCR_PL_0 /*!< Priority level: Medium */ +#define DMA_PRIORITY_HIGH DMA_SxCR_PL_1 /*!< Priority level: High */ +#define DMA_PRIORITY_VERY_HIGH DMA_SxCR_PL /*!< Priority level: Very High */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_direct_mode DMA FIFO direct mode + * @brief DMA FIFO direct mode + * @{ + */ +#define DMA_FIFOMODE_DISABLE 0x00000000U /*!< FIFO mode disable */ +#define DMA_FIFOMODE_ENABLE DMA_SxFCR_DMDIS /*!< FIFO mode enable */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_threshold_level DMA FIFO threshold level + * @brief DMA FIFO level + * @{ + */ +#define DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00000000U /*!< FIFO threshold 1 quart full configuration */ +#define DMA_FIFO_THRESHOLD_HALFFULL DMA_SxFCR_FTH_0 /*!< FIFO threshold half full configuration */ +#define DMA_FIFO_THRESHOLD_3QUARTERSFULL DMA_SxFCR_FTH_1 /*!< FIFO threshold 3 quarts full configuration */ +#define DMA_FIFO_THRESHOLD_FULL DMA_SxFCR_FTH /*!< FIFO threshold full configuration */ +/** + * @} + */ + +/** @defgroup DMA_Memory_burst DMA Memory burst + * @brief DMA memory burst + * @{ + */ +#define DMA_MBURST_SINGLE 0x00000000U +#define DMA_MBURST_INC4 DMA_SxCR_MBURST_0 +#define DMA_MBURST_INC8 DMA_SxCR_MBURST_1 +#define DMA_MBURST_INC16 DMA_SxCR_MBURST +/** + * @} + */ + +/** @defgroup DMA_Peripheral_burst DMA Peripheral burst + * @brief DMA peripheral burst + * @{ + */ +#define DMA_PBURST_SINGLE 0x00000000U +#define DMA_PBURST_INC4 DMA_SxCR_PBURST_0 +#define DMA_PBURST_INC8 DMA_SxCR_PBURST_1 +#define DMA_PBURST_INC16 DMA_SxCR_PBURST +/** + * @} + */ + +/** @defgroup DMA_interrupt_enable_definitions DMA interrupt enable definitions + * @brief DMA interrupts definition + * @{ + */ +#define DMA_IT_TC DMA_SxCR_TCIE +#define DMA_IT_HT DMA_SxCR_HTIE +#define DMA_IT_TE DMA_SxCR_TEIE +#define DMA_IT_DME DMA_SxCR_DMEIE +#define DMA_IT_FE 0x00000080U +/** + * @} + */ + +/** @defgroup DMA_flag_definitions DMA flag definitions + * @brief DMA flag definitions + * @{ + */ +#define DMA_FLAG_FEIF0_4 0x00000001U +#define DMA_FLAG_DMEIF0_4 0x00000004U +#define DMA_FLAG_TEIF0_4 0x00000008U +#define DMA_FLAG_HTIF0_4 0x00000010U +#define DMA_FLAG_TCIF0_4 0x00000020U +#define DMA_FLAG_FEIF1_5 0x00000040U +#define DMA_FLAG_DMEIF1_5 0x00000100U +#define DMA_FLAG_TEIF1_5 0x00000200U +#define DMA_FLAG_HTIF1_5 0x00000400U +#define DMA_FLAG_TCIF1_5 0x00000800U +#define DMA_FLAG_FEIF2_6 0x00010000U +#define DMA_FLAG_DMEIF2_6 0x00040000U +#define DMA_FLAG_TEIF2_6 0x00080000U +#define DMA_FLAG_HTIF2_6 0x00100000U +#define DMA_FLAG_TCIF2_6 0x00200000U +#define DMA_FLAG_FEIF3_7 0x00400000U +#define DMA_FLAG_DMEIF3_7 0x01000000U +#define DMA_FLAG_TEIF3_7 0x02000000U +#define DMA_FLAG_HTIF3_7 0x04000000U +#define DMA_FLAG_TCIF3_7 0x08000000U +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/** @brief Reset DMA handle state + * @param __HANDLE__ specifies the DMA handle. + * @retval None + */ +#define __HAL_DMA_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DMA_STATE_RESET) + +/** + * @brief Return the current DMA Stream FIFO filled level. + * @param __HANDLE__ DMA handle + * @retval The FIFO filling state. + * - DMA_FIFOStatus_Less1QuarterFull: when FIFO is less than 1 quarter-full + * and not empty. + * - DMA_FIFOStatus_1QuarterFull: if more than 1 quarter-full. + * - DMA_FIFOStatus_HalfFull: if more than 1 half-full. + * - DMA_FIFOStatus_3QuartersFull: if more than 3 quarters-full. + * - DMA_FIFOStatus_Empty: when FIFO is empty + * - DMA_FIFOStatus_Full: when FIFO is full + */ +#define __HAL_DMA_GET_FS(__HANDLE__) (((__HANDLE__)->Instance->FCR & (DMA_SxFCR_FS))) + +/** + * @brief Enable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= DMA_SxCR_EN) + +/** + * @brief Disable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~DMA_SxCR_EN) + +/* Interrupt & Flag management */ + +/** + * @brief Return the current DMA Stream transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer complete flag index. + */ +#define __HAL_DMA_GET_TC_FLAG_INDEX(__HANDLE__) \ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TCIF2_6 :\ + DMA_FLAG_TCIF3_7) + +/** + * @brief Return the current DMA Stream half transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified half transfer complete flag index. + */ +#define __HAL_DMA_GET_HT_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_HTIF2_6 :\ + DMA_FLAG_HTIF3_7) + +/** + * @brief Return the current DMA Stream transfer error flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer error flag index. + */ +#define __HAL_DMA_GET_TE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TEIF2_6 :\ + DMA_FLAG_TEIF3_7) + +/** + * @brief Return the current DMA Stream FIFO error flag. + * @param __HANDLE__ DMA handle + * @retval The specified FIFO error flag index. + */ +#define __HAL_DMA_GET_FE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_FEIF2_6 :\ + DMA_FLAG_FEIF3_7) + +/** + * @brief Return the current DMA Stream direct mode error flag. + * @param __HANDLE__ DMA handle + * @retval The specified direct mode error flag index. + */ +#define __HAL_DMA_GET_DME_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_DMEIF2_6 :\ + DMA_FLAG_DMEIF3_7) + +/** + * @brief Get the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ Get the specified flag. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval The state of FLAG (SET or RESET). + */ +#define __HAL_DMA_GET_FLAG(__HANDLE__, __FLAG__)\ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HISR & (__FLAG__)) : (DMA1->LISR & (__FLAG__))) + +/** + * @brief Clear the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval None + */ +#define __HAL_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) \ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HIFCR = (__FLAG__)) : (DMA1->LIFCR = (__FLAG__))) + +/** + * @brief Enable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR |= (__INTERRUPT__))) + +/** + * @brief Disable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR &= ~(__INTERRUPT__))) + +/** + * @brief Check whether the specified DMA Stream interrupt is enabled or not. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt source to check. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval The state of DMA_IT. + */ +#define __HAL_DMA_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ + ((__HANDLE__)->Instance->CR & (__INTERRUPT__)) : \ + ((__HANDLE__)->Instance->FCR & (__INTERRUPT__))) + +/** + * @brief Writes the number of data units to be transferred on the DMA Stream. + * @param __HANDLE__ DMA handle + * @param __COUNTER__ Number of data units to be transferred (from 0 to 65535) + * Number of data items depends only on the Peripheral data format. + * + * @note If Peripheral data format is Bytes: number of data units is equal + * to total number of bytes to be transferred. + * + * @note If Peripheral data format is Half-Word: number of data units is + * equal to total number of bytes to be transferred / 2. + * + * @note If Peripheral data format is Word: number of data units is equal + * to total number of bytes to be transferred / 4. + * + * @retval The number of remaining data units in the current DMAy Streamx transfer. + */ +#define __HAL_DMA_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->NDTR = (uint16_t)(__COUNTER__)) + +/** + * @brief Returns the number of remaining data units in the current DMAy Streamx transfer. + * @param __HANDLE__ DMA handle + * + * @retval The number of remaining data units in the current DMA Stream transfer. + */ +#define __HAL_DMA_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->NDTR) + + +/* Include DMA HAL Extension module */ +#include "stm32f7xx_hal_dma_ex.h" + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Functions DMA Exported Functions + * @brief DMA Exported functions + * @{ + */ + +/** @defgroup DMA_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group2 I/O operation functions + * @brief I/O operation functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Start (DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout); +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)); +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID); + +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group3 Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma); +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/** + * @} + */ +/* Private Constants -------------------------------------------------------------*/ +/** @defgroup DMA_Private_Constants DMA Private Constants + * @brief DMA private defines and constants + * @{ + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMA_Private_Macros DMA Private Macros + * @brief DMA private macros + * @{ + */ +#define IS_DMA_DIRECTION(DIRECTION) (((DIRECTION) == DMA_PERIPH_TO_MEMORY ) || \ + ((DIRECTION) == DMA_MEMORY_TO_PERIPH) || \ + ((DIRECTION) == DMA_MEMORY_TO_MEMORY)) + +#define IS_DMA_BUFFER_SIZE(SIZE) (((SIZE) >= 0x01U) && ((SIZE) < 0x10000U)) + +#define IS_DMA_PERIPHERAL_INC_STATE(STATE) (((STATE) == DMA_PINC_ENABLE) || \ + ((STATE) == DMA_PINC_DISABLE)) + +#define IS_DMA_MEMORY_INC_STATE(STATE) (((STATE) == DMA_MINC_ENABLE) || \ + ((STATE) == DMA_MINC_DISABLE)) + +#define IS_DMA_PERIPHERAL_DATA_SIZE(SIZE) (((SIZE) == DMA_PDATAALIGN_BYTE) || \ + ((SIZE) == DMA_PDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_PDATAALIGN_WORD)) + +#define IS_DMA_MEMORY_DATA_SIZE(SIZE) (((SIZE) == DMA_MDATAALIGN_BYTE) || \ + ((SIZE) == DMA_MDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_MDATAALIGN_WORD )) + +#define IS_DMA_MODE(MODE) (((MODE) == DMA_NORMAL ) || \ + ((MODE) == DMA_CIRCULAR) || \ + ((MODE) == DMA_PFCTRL)) + +#define IS_DMA_PRIORITY(PRIORITY) (((PRIORITY) == DMA_PRIORITY_LOW ) || \ + ((PRIORITY) == DMA_PRIORITY_MEDIUM) || \ + ((PRIORITY) == DMA_PRIORITY_HIGH) || \ + ((PRIORITY) == DMA_PRIORITY_VERY_HIGH)) + +#define IS_DMA_FIFO_MODE_STATE(STATE) (((STATE) == DMA_FIFOMODE_DISABLE ) || \ + ((STATE) == DMA_FIFOMODE_ENABLE)) + +#define IS_DMA_FIFO_THRESHOLD(THRESHOLD) (((THRESHOLD) == DMA_FIFO_THRESHOLD_1QUARTERFULL ) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_HALFFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_3QUARTERSFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_FULL)) + +#define IS_DMA_MEMORY_BURST(BURST) (((BURST) == DMA_MBURST_SINGLE) || \ + ((BURST) == DMA_MBURST_INC4) || \ + ((BURST) == DMA_MBURST_INC8) || \ + ((BURST) == DMA_MBURST_INC16)) + +#define IS_DMA_PERIPHERAL_BURST(BURST) (((BURST) == DMA_PBURST_SINGLE) || \ + ((BURST) == DMA_PBURST_INC4) || \ + ((BURST) == DMA_PBURST_INC8) || \ + ((BURST) == DMA_PBURST_INC16)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMA_Private_Functions DMA Private Functions + * @brief DMA private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_DMA_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma2d.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma2d.h new file mode 100644 index 000000000..31d5b1a16 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma2d.h @@ -0,0 +1,670 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma2d.h + * @author MCD Application Team + * @brief Header file of DMA2D HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_DMA2D_H +#define STM32F7xx_HAL_DMA2D_H + +#ifdef __cplusplus + extern "C" { +#endif + +#if defined (DMA2D) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMA2D DMA2D + * @brief DMA2D HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DMA2D_Exported_Types DMA2D Exported Types + * @{ + */ +#define MAX_DMA2D_LAYER 2U /*!< DMA2D maximum number of layers */ + +/** + * @brief DMA2D CLUT Structure definition + */ +typedef struct +{ + uint32_t *pCLUT; /*!< Configures the DMA2D CLUT memory address.*/ + + uint32_t CLUTColorMode; /*!< Configures the DMA2D CLUT color mode. + This parameter can be one value of @ref DMA2D_CLUT_CM. */ + + uint32_t Size; /*!< Configures the DMA2D CLUT size. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF.*/ +} DMA2D_CLUTCfgTypeDef; + +/** + * @brief DMA2D Init structure definition + */ +typedef struct +{ + uint32_t Mode; /*!< Configures the DMA2D transfer mode. + This parameter can be one value of @ref DMA2D_Mode. */ + + uint32_t ColorMode; /*!< Configures the color format of the output image. + This parameter can be one value of @ref DMA2D_Output_Color_Mode. */ + + uint32_t OutputOffset; /*!< Specifies the Offset value. + This parameter must be a number between Min_Data = 0x0000 and Max_Data = 0x3FFF. */ +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + uint32_t AlphaInverted; /*!< Select regular or inverted alpha value for the output pixel format converter. + This parameter can be one value of @ref DMA2D_Alpha_Inverted. */ + + uint32_t RedBlueSwap; /*!< Select regular mode (RGB or ARGB) or swap mode (BGR or ABGR) + for the output pixel format converter. + This parameter can be one value of @ref DMA2D_RB_Swap. */ + +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + + + +} DMA2D_InitTypeDef; + + +/** + * @brief DMA2D Layer structure definition + */ +typedef struct +{ + uint32_t InputOffset; /*!< Configures the DMA2D foreground or background offset. + This parameter must be a number between Min_Data = 0x0000 and Max_Data = 0x3FFF. */ + + uint32_t InputColorMode; /*!< Configures the DMA2D foreground or background color mode. + This parameter can be one value of @ref DMA2D_Input_Color_Mode. */ + + uint32_t AlphaMode; /*!< Configures the DMA2D foreground or background alpha mode. + This parameter can be one value of @ref DMA2D_Alpha_Mode. */ + + uint32_t InputAlpha; /*!< Specifies the DMA2D foreground or background alpha value and color value in case of A8 or A4 color mode. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF except for the color modes detailed below. + @note In case of A8 or A4 color mode (ARGB), this parameter must be a number between + Min_Data = 0x00000000 and Max_Data = 0xFFFFFFFF where + - InputAlpha[24:31] is the alpha value ALPHA[0:7] + - InputAlpha[16:23] is the red value RED[0:7] + - InputAlpha[8:15] is the green value GREEN[0:7] + - InputAlpha[0:7] is the blue value BLUE[0:7]. */ +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + uint32_t AlphaInverted; /*!< Select regular or inverted alpha value. + This parameter can be one value of @ref DMA2D_Alpha_Inverted. */ + + uint32_t RedBlueSwap; /*!< Select regular mode (RGB or ARGB) or swap mode (BGR or ABGR). + This parameter can be one value of @ref DMA2D_RB_Swap. */ +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + +} DMA2D_LayerCfgTypeDef; + +/** + * @brief HAL DMA2D State structures definition + */ +typedef enum +{ + HAL_DMA2D_STATE_RESET = 0x00U, /*!< DMA2D not yet initialized or disabled */ + HAL_DMA2D_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_DMA2D_STATE_BUSY = 0x02U, /*!< An internal process is ongoing */ + HAL_DMA2D_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_DMA2D_STATE_ERROR = 0x04U, /*!< DMA2D state error */ + HAL_DMA2D_STATE_SUSPEND = 0x05U /*!< DMA2D process is suspended */ +}HAL_DMA2D_StateTypeDef; + +/** + * @brief DMA2D handle Structure definition + */ +typedef struct __DMA2D_HandleTypeDef +{ + DMA2D_TypeDef *Instance; /*!< DMA2D register base address. */ + + DMA2D_InitTypeDef Init; /*!< DMA2D communication parameters. */ + + void (* XferCpltCallback)(struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D transfer complete callback. */ + + void (* XferErrorCallback)(struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D transfer error callback. */ + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) + void (* LineEventCallback)( struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D line event callback. */ + + void (* CLUTLoadingCpltCallback)( struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D CLUT loading completion callback. */ + + void (* MspInitCallback)( struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D Msp Init callback. */ + + void (* MspDeInitCallback)( struct __DMA2D_HandleTypeDef * hdma2d); /*!< DMA2D Msp DeInit callback. */ + +#endif /* (USE_HAL_DMA2D_REGISTER_CALLBACKS) */ + + DMA2D_LayerCfgTypeDef LayerCfg[MAX_DMA2D_LAYER]; /*!< DMA2D Layers parameters */ + + HAL_LockTypeDef Lock; /*!< DMA2D lock. */ + + __IO HAL_DMA2D_StateTypeDef State; /*!< DMA2D transfer state. */ + + __IO uint32_t ErrorCode; /*!< DMA2D error code. */ +} DMA2D_HandleTypeDef; + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +/** + * @brief HAL DMA2D Callback pointer definition + */ +typedef void (*pDMA2D_CallbackTypeDef)(DMA2D_HandleTypeDef * hdma2d); /*!< Pointer to a DMA2D common callback function */ +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DMA2D_Exported_Constants DMA2D Exported Constants + * @{ + */ + +/** @defgroup DMA2D_Error_Code DMA2D Error Code + * @{ + */ +#define HAL_DMA2D_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_DMA2D_ERROR_TE 0x00000001U /*!< Transfer error */ +#define HAL_DMA2D_ERROR_CE 0x00000002U /*!< Configuration error */ +#define HAL_DMA2D_ERROR_CAE 0x00000004U /*!< CLUT access error */ +#define HAL_DMA2D_ERROR_TIMEOUT 0x00000020U /*!< Timeout error */ +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +#define HAL_DMA2D_ERROR_INVALID_CALLBACK 0x00000040U /*!< Invalid callback error */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup DMA2D_Mode DMA2D Mode + * @{ + */ +#define DMA2D_M2M 0x00000000U /*!< DMA2D memory to memory transfer mode */ +#define DMA2D_M2M_PFC DMA2D_CR_MODE_0 /*!< DMA2D memory to memory with pixel format conversion transfer mode */ +#define DMA2D_M2M_BLEND DMA2D_CR_MODE_1 /*!< DMA2D memory to memory with blending transfer mode */ +#define DMA2D_R2M DMA2D_CR_MODE /*!< DMA2D register to memory transfer mode */ +/** + * @} + */ + +/** @defgroup DMA2D_Output_Color_Mode DMA2D Output Color Mode + * @{ + */ +#define DMA2D_OUTPUT_ARGB8888 0x00000000U /*!< ARGB8888 DMA2D color mode */ +#define DMA2D_OUTPUT_RGB888 DMA2D_OPFCCR_CM_0 /*!< RGB888 DMA2D color mode */ +#define DMA2D_OUTPUT_RGB565 DMA2D_OPFCCR_CM_1 /*!< RGB565 DMA2D color mode */ +#define DMA2D_OUTPUT_ARGB1555 (DMA2D_OPFCCR_CM_0|DMA2D_OPFCCR_CM_1) /*!< ARGB1555 DMA2D color mode */ +#define DMA2D_OUTPUT_ARGB4444 DMA2D_OPFCCR_CM_2 /*!< ARGB4444 DMA2D color mode */ +/** + * @} + */ + +/** @defgroup DMA2D_Input_Color_Mode DMA2D Input Color Mode + * @{ + */ +#define DMA2D_INPUT_ARGB8888 0x00000000U /*!< ARGB8888 color mode */ +#define DMA2D_INPUT_RGB888 0x00000001U /*!< RGB888 color mode */ +#define DMA2D_INPUT_RGB565 0x00000002U /*!< RGB565 color mode */ +#define DMA2D_INPUT_ARGB1555 0x00000003U /*!< ARGB1555 color mode */ +#define DMA2D_INPUT_ARGB4444 0x00000004U /*!< ARGB4444 color mode */ +#define DMA2D_INPUT_L8 0x00000005U /*!< L8 color mode */ +#define DMA2D_INPUT_AL44 0x00000006U /*!< AL44 color mode */ +#define DMA2D_INPUT_AL88 0x00000007U /*!< AL88 color mode */ +#define DMA2D_INPUT_L4 0x00000008U /*!< L4 color mode */ +#define DMA2D_INPUT_A8 0x00000009U /*!< A8 color mode */ +#define DMA2D_INPUT_A4 0x0000000AU /*!< A4 color mode */ +/** + * @} + */ + +/** @defgroup DMA2D_Alpha_Mode DMA2D Alpha Mode + * @{ + */ +#define DMA2D_NO_MODIF_ALPHA 0x00000000U /*!< No modification of the alpha channel value */ +#define DMA2D_REPLACE_ALPHA 0x00000001U /*!< Replace original alpha channel value by programmed alpha value */ +#define DMA2D_COMBINE_ALPHA 0x00000002U /*!< Replace original alpha channel value by programmed alpha value + with original alpha channel value */ +/** + * @} + */ + +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) +/** @defgroup DMA2D_Alpha_Inverted DMA2D Alpha Inversion + * @{ + */ +#define DMA2D_REGULAR_ALPHA 0x00000000U /*!< No modification of the alpha channel value */ +#define DMA2D_INVERTED_ALPHA 0x00000001U /*!< Invert the alpha channel value */ +/** + * @} + */ + +/** @defgroup DMA2D_RB_Swap DMA2D Red and Blue Swap + * @{ + */ +#define DMA2D_RB_REGULAR 0x00000000U /*!< Select regular mode (RGB or ARGB) */ +#define DMA2D_RB_SWAP 0x00000001U /*!< Select swap mode (BGR or ABGR) */ +/** + * @} + */ +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + + + + +/** @defgroup DMA2D_CLUT_CM DMA2D CLUT Color Mode + * @{ + */ +#define DMA2D_CCM_ARGB8888 0x00000000U /*!< ARGB8888 DMA2D CLUT color mode */ +#define DMA2D_CCM_RGB888 0x00000001U /*!< RGB888 DMA2D CLUT color mode */ +/** + * @} + */ + +/** @defgroup DMA2D_Interrupts DMA2D Interrupts + * @{ + */ +#define DMA2D_IT_CE DMA2D_CR_CEIE /*!< Configuration Error Interrupt */ +#define DMA2D_IT_CTC DMA2D_CR_CTCIE /*!< CLUT Transfer Complete Interrupt */ +#define DMA2D_IT_CAE DMA2D_CR_CAEIE /*!< CLUT Access Error Interrupt */ +#define DMA2D_IT_TW DMA2D_CR_TWIE /*!< Transfer Watermark Interrupt */ +#define DMA2D_IT_TC DMA2D_CR_TCIE /*!< Transfer Complete Interrupt */ +#define DMA2D_IT_TE DMA2D_CR_TEIE /*!< Transfer Error Interrupt */ +/** + * @} + */ + +/** @defgroup DMA2D_Flags DMA2D Flags + * @{ + */ +#define DMA2D_FLAG_CE DMA2D_ISR_CEIF /*!< Configuration Error Interrupt Flag */ +#define DMA2D_FLAG_CTC DMA2D_ISR_CTCIF /*!< CLUT Transfer Complete Interrupt Flag */ +#define DMA2D_FLAG_CAE DMA2D_ISR_CAEIF /*!< CLUT Access Error Interrupt Flag */ +#define DMA2D_FLAG_TW DMA2D_ISR_TWIF /*!< Transfer Watermark Interrupt Flag */ +#define DMA2D_FLAG_TC DMA2D_ISR_TCIF /*!< Transfer Complete Interrupt Flag */ +#define DMA2D_FLAG_TE DMA2D_ISR_TEIF /*!< Transfer Error Interrupt Flag */ +/** + * @} + */ + +/** @defgroup DMA2D_Aliases DMA2D API Aliases + * @{ + */ +#define HAL_DMA2D_DisableCLUT HAL_DMA2D_CLUTLoading_Abort /*!< Aliased to HAL_DMA2D_CLUTLoading_Abort for compatibility with legacy code */ +/** + * @} + */ + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +/** + * @brief HAL DMA2D common Callback ID enumeration definition + */ +typedef enum +{ + HAL_DMA2D_MSPINIT_CB_ID = 0x00U, /*!< DMA2D MspInit callback ID */ + HAL_DMA2D_MSPDEINIT_CB_ID = 0x01U, /*!< DMA2D MspDeInit callback ID */ + HAL_DMA2D_TRANSFERCOMPLETE_CB_ID = 0x02U, /*!< DMA2D transfer complete callback ID */ + HAL_DMA2D_TRANSFERERROR_CB_ID = 0x03U, /*!< DMA2D transfer error callback ID */ + HAL_DMA2D_LINEEVENT_CB_ID = 0x04U, /*!< DMA2D line event callback ID */ + HAL_DMA2D_CLUTLOADINGCPLT_CB_ID = 0x05U, /*!< DMA2D CLUT loading completion callback ID */ +}HAL_DMA2D_CallbackIDTypeDef; +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + + +/** + * @} + */ +/* Exported macros ------------------------------------------------------------*/ +/** @defgroup DMA2D_Exported_Macros DMA2D Exported Macros + * @{ + */ + +/** @brief Reset DMA2D handle state + * @param __HANDLE__ specifies the DMA2D handle. + * @retval None + */ +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +#define __HAL_DMA2D_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_DMA2D_STATE_RESET;\ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + }while(0) +#else +#define __HAL_DMA2D_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DMA2D_STATE_RESET) +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + + +/** + * @brief Enable the DMA2D. + * @param __HANDLE__ DMA2D handle + * @retval None. + */ +#define __HAL_DMA2D_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= DMA2D_CR_START) + + +/* Interrupt & Flag management */ +/** + * @brief Get the DMA2D pending flags. + * @param __HANDLE__ DMA2D handle + * @param __FLAG__ flag to check. + * This parameter can be any combination of the following values: + * @arg DMA2D_FLAG_CE: Configuration error flag + * @arg DMA2D_FLAG_CTC: CLUT transfer complete flag + * @arg DMA2D_FLAG_CAE: CLUT access error flag + * @arg DMA2D_FLAG_TW: Transfer Watermark flag + * @arg DMA2D_FLAG_TC: Transfer complete flag + * @arg DMA2D_FLAG_TE: Transfer error flag + * @retval The state of FLAG. + */ +#define __HAL_DMA2D_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR & (__FLAG__)) + +/** + * @brief Clear the DMA2D pending flags. + * @param __HANDLE__ DMA2D handle + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DMA2D_FLAG_CE: Configuration error flag + * @arg DMA2D_FLAG_CTC: CLUT transfer complete flag + * @arg DMA2D_FLAG_CAE: CLUT access error flag + * @arg DMA2D_FLAG_TW: Transfer Watermark flag + * @arg DMA2D_FLAG_TC: Transfer complete flag + * @arg DMA2D_FLAG_TE: Transfer error flag + * @retval None + */ +#define __HAL_DMA2D_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->IFCR = (__FLAG__)) + +/** + * @brief Enable the specified DMA2D interrupts. + * @param __HANDLE__ DMA2D handle + * @param __INTERRUPT__ specifies the DMA2D interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg DMA2D_IT_CE: Configuration error interrupt mask + * @arg DMA2D_IT_CTC: CLUT transfer complete interrupt mask + * @arg DMA2D_IT_CAE: CLUT access error interrupt mask + * @arg DMA2D_IT_TW: Transfer Watermark interrupt mask + * @arg DMA2D_IT_TC: Transfer complete interrupt mask + * @arg DMA2D_IT_TE: Transfer error interrupt mask + * @retval None + */ +#define __HAL_DMA2D_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the specified DMA2D interrupts. + * @param __HANDLE__ DMA2D handle + * @param __INTERRUPT__ specifies the DMA2D interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg DMA2D_IT_CE: Configuration error interrupt mask + * @arg DMA2D_IT_CTC: CLUT transfer complete interrupt mask + * @arg DMA2D_IT_CAE: CLUT access error interrupt mask + * @arg DMA2D_IT_TW: Transfer Watermark interrupt mask + * @arg DMA2D_IT_TC: Transfer complete interrupt mask + * @arg DMA2D_IT_TE: Transfer error interrupt mask + * @retval None + */ +#define __HAL_DMA2D_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) + +/** + * @brief Check whether the specified DMA2D interrupt source is enabled or not. + * @param __HANDLE__ DMA2D handle + * @param __INTERRUPT__ specifies the DMA2D interrupt source to check. + * This parameter can be one of the following values: + * @arg DMA2D_IT_CE: Configuration error interrupt mask + * @arg DMA2D_IT_CTC: CLUT transfer complete interrupt mask + * @arg DMA2D_IT_CAE: CLUT access error interrupt mask + * @arg DMA2D_IT_TW: Transfer Watermark interrupt mask + * @arg DMA2D_IT_TC: Transfer complete interrupt mask + * @arg DMA2D_IT_TE: Transfer error interrupt mask + * @retval The state of INTERRUPT source. + */ +#define __HAL_DMA2D_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR & (__INTERRUPT__)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup DMA2D_Exported_Functions DMA2D Exported Functions + * @{ + */ + +/** @addtogroup DMA2D_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ + +/* Initialization and de-initialization functions *******************************/ +HAL_StatusTypeDef HAL_DMA2D_Init(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_DeInit (DMA2D_HandleTypeDef *hdma2d); +void HAL_DMA2D_MspInit(DMA2D_HandleTypeDef* hdma2d); +void HAL_DMA2D_MspDeInit(DMA2D_HandleTypeDef* hdma2d); +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_DMA2D_RegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID, pDMA2D_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_DMA2D_UnRegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + +/** + * @} + */ + + +/** @addtogroup DMA2D_Exported_Functions_Group2 IO operation functions + * @{ + */ + +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_DMA2D_Start(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height); +HAL_StatusTypeDef HAL_DMA2D_BlendingStart(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2, uint32_t DstAddress, uint32_t Width, uint32_t Height); +HAL_StatusTypeDef HAL_DMA2D_Start_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height); +HAL_StatusTypeDef HAL_DMA2D_BlendingStart_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2, uint32_t DstAddress, uint32_t Width, uint32_t Height); +HAL_StatusTypeDef HAL_DMA2D_Suspend(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_Resume(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_Abort(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_EnableCLUT(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_CLUTLoad(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_CLUTLoad_IT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Abort(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Suspend(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Resume(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_PollForTransfer(DMA2D_HandleTypeDef *hdma2d, uint32_t Timeout); +void HAL_DMA2D_IRQHandler(DMA2D_HandleTypeDef *hdma2d); +void HAL_DMA2D_LineEventCallback(DMA2D_HandleTypeDef *hdma2d); +void HAL_DMA2D_CLUTLoadingCpltCallback(DMA2D_HandleTypeDef *hdma2d); + +/** + * @} + */ + +/** @addtogroup DMA2D_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ + +/* Peripheral Control functions *************************************************/ +HAL_StatusTypeDef HAL_DMA2D_ConfigLayer(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_ConfigCLUT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_DMA2D_ProgramLineEvent(DMA2D_HandleTypeDef *hdma2d, uint32_t Line); +HAL_StatusTypeDef HAL_DMA2D_EnableDeadTime(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_DisableDeadTime(DMA2D_HandleTypeDef *hdma2d); +HAL_StatusTypeDef HAL_DMA2D_ConfigDeadTime(DMA2D_HandleTypeDef *hdma2d, uint8_t DeadTime); + +/** + * @} + */ + +/** @addtogroup DMA2D_Exported_Functions_Group4 Peripheral State and Error functions + * @{ + */ + +/* Peripheral State functions ***************************************************/ +HAL_DMA2D_StateTypeDef HAL_DMA2D_GetState(DMA2D_HandleTypeDef *hdma2d); +uint32_t HAL_DMA2D_GetError(DMA2D_HandleTypeDef *hdma2d); + +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ + +/** @addtogroup DMA2D_Private_Constants DMA2D Private Constants + * @{ + */ + +/** @defgroup DMA2D_Maximum_Line_WaterMark DMA2D Maximum Line Watermark + * @{ + */ +#define DMA2D_LINE_WATERMARK_MAX DMA2D_LWR_LW /*!< DMA2D maximum line watermark */ +/** + * @} + */ + +/** @defgroup DMA2D_Color_Value DMA2D Color Value + * @{ + */ +#define DMA2D_COLOR_VALUE 0x000000FFU /*!< Color value mask */ +/** + * @} + */ + +/** @defgroup DMA2D_Max_Layer DMA2D Maximum Number of Layers + * @{ + */ +#define DMA2D_MAX_LAYER 2U /*!< DMA2D maximum number of layers */ +/** + * @} + */ + +/** @defgroup DMA2D_Layers DMA2D Layers + * @{ + */ +#define DMA2D_BACKGROUND_LAYER 0x00000000U /*!< DMA2D Background Layer (layer 0) */ +#define DMA2D_FOREGROUND_LAYER 0x00000001U /*!< DMA2D Foreground Layer (layer 1) */ +/** + * @} + */ + +/** @defgroup DMA2D_Offset DMA2D Offset + * @{ + */ +#define DMA2D_OFFSET DMA2D_FGOR_LO /*!< maximum Line Offset */ +/** + * @} + */ + +/** @defgroup DMA2D_Size DMA2D Size + * @{ + */ +#define DMA2D_PIXEL (DMA2D_NLR_PL >> 16U) /*!< DMA2D maximum number of pixels per line */ +#define DMA2D_LINE DMA2D_NLR_NL /*!< DMA2D maximum number of lines */ +/** + * @} + */ + +/** @defgroup DMA2D_CLUT_Size DMA2D CLUT Size + * @{ + */ +#define DMA2D_CLUT_SIZE (DMA2D_FGPFCCR_CS >> 8U) /*!< DMA2D maximum CLUT size */ +/** + * @} + */ + +/** + * @} + */ + + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMA2D_Private_Macros DMA2D Private Macros + * @{ + */ +#define IS_DMA2D_LAYER(LAYER) (((LAYER) == DMA2D_BACKGROUND_LAYER) || ((LAYER) == DMA2D_FOREGROUND_LAYER)) + +#define IS_DMA2D_MODE(MODE) (((MODE) == DMA2D_M2M) || ((MODE) == DMA2D_M2M_PFC) || \ + ((MODE) == DMA2D_M2M_BLEND) || ((MODE) == DMA2D_R2M)) + +#define IS_DMA2D_CMODE(MODE_ARGB) (((MODE_ARGB) == DMA2D_OUTPUT_ARGB8888) || ((MODE_ARGB) == DMA2D_OUTPUT_RGB888) || \ + ((MODE_ARGB) == DMA2D_OUTPUT_RGB565) || ((MODE_ARGB) == DMA2D_OUTPUT_ARGB1555) || \ + ((MODE_ARGB) == DMA2D_OUTPUT_ARGB4444)) + +#define IS_DMA2D_COLOR(COLOR) ((COLOR) <= DMA2D_COLOR_VALUE) +#define IS_DMA2D_LINE(LINE) ((LINE) <= DMA2D_LINE) +#define IS_DMA2D_PIXEL(PIXEL) ((PIXEL) <= DMA2D_PIXEL) +#define IS_DMA2D_OFFSET(OOFFSET) ((OOFFSET) <= DMA2D_OFFSET) + +#define IS_DMA2D_INPUT_COLOR_MODE(INPUT_CM) (((INPUT_CM) == DMA2D_INPUT_ARGB8888) || ((INPUT_CM) == DMA2D_INPUT_RGB888) || \ + ((INPUT_CM) == DMA2D_INPUT_RGB565) || ((INPUT_CM) == DMA2D_INPUT_ARGB1555) || \ + ((INPUT_CM) == DMA2D_INPUT_ARGB4444) || ((INPUT_CM) == DMA2D_INPUT_L8) || \ + ((INPUT_CM) == DMA2D_INPUT_AL44) || ((INPUT_CM) == DMA2D_INPUT_AL88) || \ + ((INPUT_CM) == DMA2D_INPUT_L4) || ((INPUT_CM) == DMA2D_INPUT_A8) || \ + ((INPUT_CM) == DMA2D_INPUT_A4)) + +#define IS_DMA2D_ALPHA_MODE(AlphaMode) (((AlphaMode) == DMA2D_NO_MODIF_ALPHA) || \ + ((AlphaMode) == DMA2D_REPLACE_ALPHA) || \ + ((AlphaMode) == DMA2D_COMBINE_ALPHA)) + +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) +#define IS_DMA2D_ALPHA_INVERTED(Alpha_Inverted) (((Alpha_Inverted) == DMA2D_REGULAR_ALPHA) || \ + ((Alpha_Inverted) == DMA2D_INVERTED_ALPHA)) + +#define IS_DMA2D_RB_SWAP(RB_Swap) (((RB_Swap) == DMA2D_RB_REGULAR) || \ + ((RB_Swap) == DMA2D_RB_SWAP)) +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + + + +#define IS_DMA2D_CLUT_CM(CLUT_CM) (((CLUT_CM) == DMA2D_CCM_ARGB8888) || ((CLUT_CM) == DMA2D_CCM_RGB888)) +#define IS_DMA2D_CLUT_SIZE(CLUT_SIZE) ((CLUT_SIZE) <= DMA2D_CLUT_SIZE) +#define IS_DMA2D_LINEWATERMARK(LineWatermark) ((LineWatermark) <= DMA2D_LINE_WATERMARK_MAX) +#define IS_DMA2D_IT(IT) (((IT) == DMA2D_IT_CTC) || ((IT) == DMA2D_IT_CAE) || \ + ((IT) == DMA2D_IT_TW) || ((IT) == DMA2D_IT_TC) || \ + ((IT) == DMA2D_IT_TE) || ((IT) == DMA2D_IT_CE)) +#define IS_DMA2D_GET_FLAG(FLAG) (((FLAG) == DMA2D_FLAG_CTC) || ((FLAG) == DMA2D_FLAG_CAE) || \ + ((FLAG) == DMA2D_FLAG_TW) || ((FLAG) == DMA2D_FLAG_TC) || \ + ((FLAG) == DMA2D_FLAG_TE) || ((FLAG) == DMA2D_FLAG_CE)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (DMA2D) */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_DMA2D_H */ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h new file mode 100644 index 000000000..d15ea505e --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h @@ -0,0 +1,185 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma_ex.h + * @author MCD Application Team + * @brief Header file of DMA HAL extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DMA_EX_H +#define __STM32F7xx_HAL_DMA_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMAEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Types DMAEx Exported Types + * @brief DMAEx Exported types + * @{ + */ + +/** + * @brief HAL DMA Memory definition + */ +typedef enum +{ + MEMORY0 = 0x00U, /*!< Memory 0 */ + MEMORY1 = 0x01U, /*!< Memory 1 */ + +}HAL_DMA_MemoryTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Constants DMA Exported Constants + * @brief DMA Exported constants + * @{ + */ + +/** @defgroup DMAEx_Channel_selection DMA Channel selection + * @brief DMAEx channel selection + * @{ + */ +#define DMA_CHANNEL_0 0x00000000U /*!< DMA Channel 0 */ +#define DMA_CHANNEL_1 0x02000000U /*!< DMA Channel 1 */ +#define DMA_CHANNEL_2 0x04000000U /*!< DMA Channel 2 */ +#define DMA_CHANNEL_3 0x06000000U /*!< DMA Channel 3 */ +#define DMA_CHANNEL_4 0x08000000U /*!< DMA Channel 4 */ +#define DMA_CHANNEL_5 0x0A000000U /*!< DMA Channel 5 */ +#define DMA_CHANNEL_6 0x0C000000U /*!< DMA Channel 6 */ +#define DMA_CHANNEL_7 0x0E000000U /*!< DMA Channel 7 */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define DMA_CHANNEL_8 0x10000000U /*!< DMA Channel 8 */ +#define DMA_CHANNEL_9 0x12000000U /*!< DMA Channel 9 */ +#define DMA_CHANNEL_10 0x14000000U /*!< DMA Channel 10*/ +#define DMA_CHANNEL_11 0x16000000U /*!< DMA Channel 11*/ +#define DMA_CHANNEL_12 0x18000000U /*!< DMA Channel 12*/ +#define DMA_CHANNEL_13 0x1A000000U /*!< DMA Channel 13*/ +#define DMA_CHANNEL_14 0x1C000000U /*!< DMA Channel 14*/ +#define DMA_CHANNEL_15 0x1E000000U /*!< DMA Channel 15*/ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Functions DMAEx Exported Functions + * @brief DMAEx Exported functions + * @{ + */ + +/** @defgroup DMAEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * @{ + */ + +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory); + +/** + * @} + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMAEx_Private_Macros DMA Private Macros + * @brief DMAEx private macros + * @{ + */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7) || \ + ((CHANNEL) == DMA_CHANNEL_8) || \ + ((CHANNEL) == DMA_CHANNEL_9) || \ + ((CHANNEL) == DMA_CHANNEL_10) || \ + ((CHANNEL) == DMA_CHANNEL_11) || \ + ((CHANNEL) == DMA_CHANNEL_12) || \ + ((CHANNEL) == DMA_CHANNEL_13) || \ + ((CHANNEL) == DMA_CHANNEL_14) || \ + ((CHANNEL) == DMA_CHANNEL_15)) +#else +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx*/ +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMAEx_Private_Functions DMAEx Private Functions + * @brief DMAEx Private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_DMA_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dsi.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dsi.h new file mode 100644 index 000000000..38cb0e571 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dsi.h @@ -0,0 +1,1337 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dsi.h + * @author MCD Application Team + * @brief Header file of DSI HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_DSI_H +#define STM32F7xx_HAL_DSI_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(DSI) +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DSI DSI + * @brief DSI HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** + * @brief DSI Init Structure definition + */ +typedef struct +{ + uint32_t AutomaticClockLaneControl; /*!< Automatic clock lane control + This parameter can be any value of @ref DSI_Automatic_Clk_Lane_Control */ + + uint32_t TXEscapeCkdiv; /*!< TX Escape clock division + The values 0 and 1 stop the TX_ESC clock generation */ + + uint32_t NumberOfLanes; /*!< Number of lanes + This parameter can be any value of @ref DSI_Number_Of_Lanes */ + +} DSI_InitTypeDef; + +/** + * @brief DSI PLL Clock structure definition + */ +typedef struct +{ + uint32_t PLLNDIV; /*!< PLL Loop Division Factor + This parameter must be a value between 10 and 125 */ + + uint32_t PLLIDF; /*!< PLL Input Division Factor + This parameter can be any value of @ref DSI_PLL_IDF */ + + uint32_t PLLODF; /*!< PLL Output Division Factor + This parameter can be any value of @ref DSI_PLL_ODF */ + +} DSI_PLLInitTypeDef; + +/** + * @brief DSI Video mode configuration + */ +typedef struct +{ + uint32_t VirtualChannelID; /*!< Virtual channel ID */ + + uint32_t ColorCoding; /*!< Color coding for LTDC interface + This parameter can be any value of @ref DSI_Color_Coding */ + + uint32_t LooselyPacked; /*!< Enable or disable loosely packed stream (needed only when using + 18-bit configuration). + This parameter can be any value of @ref DSI_LooselyPacked */ + + uint32_t Mode; /*!< Video mode type + This parameter can be any value of @ref DSI_Video_Mode_Type */ + + uint32_t PacketSize; /*!< Video packet size */ + + uint32_t NumberOfChunks; /*!< Number of chunks */ + + uint32_t NullPacketSize; /*!< Null packet size */ + + uint32_t HSPolarity; /*!< HSYNC pin polarity + This parameter can be any value of @ref DSI_HSYNC_Polarity */ + + uint32_t VSPolarity; /*!< VSYNC pin polarity + This parameter can be any value of @ref DSI_VSYNC_Active_Polarity */ + + uint32_t DEPolarity; /*!< Data Enable pin polarity + This parameter can be any value of @ref DSI_DATA_ENABLE_Polarity */ + + uint32_t HorizontalSyncActive; /*!< Horizontal synchronism active duration (in lane byte clock cycles) */ + + uint32_t HorizontalBackPorch; /*!< Horizontal back-porch duration (in lane byte clock cycles) */ + + uint32_t HorizontalLine; /*!< Horizontal line duration (in lane byte clock cycles) */ + + uint32_t VerticalSyncActive; /*!< Vertical synchronism active duration */ + + uint32_t VerticalBackPorch; /*!< Vertical back-porch duration */ + + uint32_t VerticalFrontPorch; /*!< Vertical front-porch duration */ + + uint32_t VerticalActive; /*!< Vertical active duration */ + + uint32_t LPCommandEnable; /*!< Low-power command enable + This parameter can be any value of @ref DSI_LP_Command */ + + uint32_t LPLargestPacketSize; /*!< The size, in bytes, of the low power largest packet that + can fit in a line during VSA, VBP and VFP regions */ + + uint32_t LPVACTLargestPacketSize; /*!< The size, in bytes, of the low power largest packet that + can fit in a line during VACT region */ + + uint32_t LPHorizontalFrontPorchEnable; /*!< Low-power horizontal front-porch enable + This parameter can be any value of @ref DSI_LP_HFP */ + + uint32_t LPHorizontalBackPorchEnable; /*!< Low-power horizontal back-porch enable + This parameter can be any value of @ref DSI_LP_HBP */ + + uint32_t LPVerticalActiveEnable; /*!< Low-power vertical active enable + This parameter can be any value of @ref DSI_LP_VACT */ + + uint32_t LPVerticalFrontPorchEnable; /*!< Low-power vertical front-porch enable + This parameter can be any value of @ref DSI_LP_VFP */ + + uint32_t LPVerticalBackPorchEnable; /*!< Low-power vertical back-porch enable + This parameter can be any value of @ref DSI_LP_VBP */ + + uint32_t LPVerticalSyncActiveEnable; /*!< Low-power vertical sync active enable + This parameter can be any value of @ref DSI_LP_VSYNC */ + + uint32_t FrameBTAAcknowledgeEnable; /*!< Frame bus-turn-around acknowledge enable + This parameter can be any value of @ref DSI_FBTA_acknowledge */ + +} DSI_VidCfgTypeDef; + +/** + * @brief DSI Adapted command mode configuration + */ +typedef struct +{ + uint32_t VirtualChannelID; /*!< Virtual channel ID */ + + uint32_t ColorCoding; /*!< Color coding for LTDC interface + This parameter can be any value of @ref DSI_Color_Coding */ + + uint32_t CommandSize; /*!< Maximum allowed size for an LTDC write memory command, measured in + pixels. This parameter can be any value between 0x00 and 0xFFFFU */ + + uint32_t TearingEffectSource; /*!< Tearing effect source + This parameter can be any value of @ref DSI_TearingEffectSource */ + + uint32_t TearingEffectPolarity; /*!< Tearing effect pin polarity + This parameter can be any value of @ref DSI_TearingEffectPolarity */ + + uint32_t HSPolarity; /*!< HSYNC pin polarity + This parameter can be any value of @ref DSI_HSYNC_Polarity */ + + uint32_t VSPolarity; /*!< VSYNC pin polarity + This parameter can be any value of @ref DSI_VSYNC_Active_Polarity */ + + uint32_t DEPolarity; /*!< Data Enable pin polarity + This parameter can be any value of @ref DSI_DATA_ENABLE_Polarity */ + + uint32_t VSyncPol; /*!< VSync edge on which the LTDC is halted + This parameter can be any value of @ref DSI_Vsync_Polarity */ + + uint32_t AutomaticRefresh; /*!< Automatic refresh mode + This parameter can be any value of @ref DSI_AutomaticRefresh */ + + uint32_t TEAcknowledgeRequest; /*!< Tearing Effect Acknowledge Request Enable + This parameter can be any value of @ref DSI_TE_AcknowledgeRequest */ + +} DSI_CmdCfgTypeDef; + +/** + * @brief DSI command transmission mode configuration + */ +typedef struct +{ + uint32_t LPGenShortWriteNoP; /*!< Generic Short Write Zero parameters Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortWriteNoP */ + + uint32_t LPGenShortWriteOneP; /*!< Generic Short Write One parameter Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortWriteOneP */ + + uint32_t LPGenShortWriteTwoP; /*!< Generic Short Write Two parameters Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortWriteTwoP */ + + uint32_t LPGenShortReadNoP; /*!< Generic Short Read Zero parameters Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortReadNoP */ + + uint32_t LPGenShortReadOneP; /*!< Generic Short Read One parameter Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortReadOneP */ + + uint32_t LPGenShortReadTwoP; /*!< Generic Short Read Two parameters Transmission + This parameter can be any value of @ref DSI_LP_LPGenShortReadTwoP */ + + uint32_t LPGenLongWrite; /*!< Generic Long Write Transmission + This parameter can be any value of @ref DSI_LP_LPGenLongWrite */ + + uint32_t LPDcsShortWriteNoP; /*!< DCS Short Write Zero parameters Transmission + This parameter can be any value of @ref DSI_LP_LPDcsShortWriteNoP */ + + uint32_t LPDcsShortWriteOneP; /*!< DCS Short Write One parameter Transmission + This parameter can be any value of @ref DSI_LP_LPDcsShortWriteOneP */ + + uint32_t LPDcsShortReadNoP; /*!< DCS Short Read Zero parameters Transmission + This parameter can be any value of @ref DSI_LP_LPDcsShortReadNoP */ + + uint32_t LPDcsLongWrite; /*!< DCS Long Write Transmission + This parameter can be any value of @ref DSI_LP_LPDcsLongWrite */ + + uint32_t LPMaxReadPacket; /*!< Maximum Read Packet Size Transmission + This parameter can be any value of @ref DSI_LP_LPMaxReadPacket */ + + uint32_t AcknowledgeRequest; /*!< Acknowledge Request Enable + This parameter can be any value of @ref DSI_AcknowledgeRequest */ + +} DSI_LPCmdTypeDef; + +/** + * @brief DSI PHY Timings definition + */ +typedef struct +{ + uint32_t ClockLaneHS2LPTime; /*!< The maximum time that the D-PHY clock lane takes to go from high-speed + to low-power transmission */ + + uint32_t ClockLaneLP2HSTime; /*!< The maximum time that the D-PHY clock lane takes to go from low-power + to high-speed transmission */ + + uint32_t DataLaneHS2LPTime; /*!< The maximum time that the D-PHY data lanes takes to go from high-speed + to low-power transmission */ + + uint32_t DataLaneLP2HSTime; /*!< The maximum time that the D-PHY data lanes takes to go from low-power + to high-speed transmission */ + + uint32_t DataLaneMaxReadTime; /*!< The maximum time required to perform a read command */ + + uint32_t StopWaitTime; /*!< The minimum wait period to request a High-Speed transmission after the + Stop state */ + +} DSI_PHY_TimerTypeDef; + +/** + * @brief DSI HOST Timeouts definition + */ +typedef struct +{ + uint32_t TimeoutCkdiv; /*!< Time-out clock division */ + + uint32_t HighSpeedTransmissionTimeout; /*!< High-speed transmission time-out */ + + uint32_t LowPowerReceptionTimeout; /*!< Low-power reception time-out */ + + uint32_t HighSpeedReadTimeout; /*!< High-speed read time-out */ + + uint32_t LowPowerReadTimeout; /*!< Low-power read time-out */ + + uint32_t HighSpeedWriteTimeout; /*!< High-speed write time-out */ + + uint32_t HighSpeedWritePrespMode; /*!< High-speed write presp mode + This parameter can be any value of @ref DSI_HS_PrespMode */ + + uint32_t LowPowerWriteTimeout; /*!< Low-speed write time-out */ + + uint32_t BTATimeout; /*!< BTA time-out */ + +} DSI_HOST_TimeoutTypeDef; + +/** + * @brief DSI States Structure definition + */ +typedef enum +{ + HAL_DSI_STATE_RESET = 0x00U, + HAL_DSI_STATE_READY = 0x01U, + HAL_DSI_STATE_ERROR = 0x02U, + HAL_DSI_STATE_BUSY = 0x03U, + HAL_DSI_STATE_TIMEOUT = 0x04U +} HAL_DSI_StateTypeDef; + +/** + * @brief DSI Handle Structure definition + */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +typedef struct __DSI_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ +{ + DSI_TypeDef *Instance; /*!< Register base address */ + DSI_InitTypeDef Init; /*!< DSI required parameters */ + HAL_LockTypeDef Lock; /*!< DSI peripheral status */ + __IO HAL_DSI_StateTypeDef State; /*!< DSI communication state */ + __IO uint32_t ErrorCode; /*!< DSI Error code */ + uint32_t ErrorMsk; /*!< DSI Error monitoring mask */ + +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + void (* TearingEffectCallback)(struct __DSI_HandleTypeDef *hdsi); /*!< DSI Tearing Effect Callback */ + void (* EndOfRefreshCallback)(struct __DSI_HandleTypeDef *hdsi); /*!< DSI End Of Refresh Callback */ + void (* ErrorCallback)(struct __DSI_HandleTypeDef *hdsi); /*!< DSI Error Callback */ + + void (* MspInitCallback)(struct __DSI_HandleTypeDef *hdsi); /*!< DSI Msp Init callback */ + void (* MspDeInitCallback)(struct __DSI_HandleTypeDef *hdsi); /*!< DSI Msp DeInit callback */ + +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + +} DSI_HandleTypeDef; + +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +/** + * @brief HAL DSI Callback ID enumeration definition + */ +typedef enum +{ + HAL_DSI_MSPINIT_CB_ID = 0x00U, /*!< DSI MspInit callback ID */ + HAL_DSI_MSPDEINIT_CB_ID = 0x01U, /*!< DSI MspDeInit callback ID */ + + HAL_DSI_TEARING_EFFECT_CB_ID = 0x02U, /*!< DSI Tearing Effect Callback ID */ + HAL_DSI_ENDOF_REFRESH_CB_ID = 0x03U, /*!< DSI End Of Refresh Callback ID */ + HAL_DSI_ERROR_CB_ID = 0x04U /*!< DSI Error Callback ID */ + +} HAL_DSI_CallbackIDTypeDef; + +/** + * @brief HAL DSI Callback pointer definition + */ +typedef void (*pDSI_CallbackTypeDef)(DSI_HandleTypeDef *hdsi); /*!< pointer to an DSI callback function */ + +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DSI_DCS_Command DSI DCS Command + * @{ + */ +#define DSI_ENTER_IDLE_MODE 0x39U +#define DSI_ENTER_INVERT_MODE 0x21U +#define DSI_ENTER_NORMAL_MODE 0x13U +#define DSI_ENTER_PARTIAL_MODE 0x12U +#define DSI_ENTER_SLEEP_MODE 0x10U +#define DSI_EXIT_IDLE_MODE 0x38U +#define DSI_EXIT_INVERT_MODE 0x20U +#define DSI_EXIT_SLEEP_MODE 0x11U +#define DSI_GET_3D_CONTROL 0x3FU +#define DSI_GET_ADDRESS_MODE 0x0BU +#define DSI_GET_BLUE_CHANNEL 0x08U +#define DSI_GET_DIAGNOSTIC_RESULT 0x0FU +#define DSI_GET_DISPLAY_MODE 0x0DU +#define DSI_GET_GREEN_CHANNEL 0x07U +#define DSI_GET_PIXEL_FORMAT 0x0CU +#define DSI_GET_POWER_MODE 0x0AU +#define DSI_GET_RED_CHANNEL 0x06U +#define DSI_GET_SCANLINE 0x45U +#define DSI_GET_SIGNAL_MODE 0x0EU +#define DSI_NOP 0x00U +#define DSI_READ_DDB_CONTINUE 0xA8U +#define DSI_READ_DDB_START 0xA1U +#define DSI_READ_MEMORY_CONTINUE 0x3EU +#define DSI_READ_MEMORY_START 0x2EU +#define DSI_SET_3D_CONTROL 0x3DU +#define DSI_SET_ADDRESS_MODE 0x36U +#define DSI_SET_COLUMN_ADDRESS 0x2AU +#define DSI_SET_DISPLAY_OFF 0x28U +#define DSI_SET_DISPLAY_ON 0x29U +#define DSI_SET_GAMMA_CURVE 0x26U +#define DSI_SET_PAGE_ADDRESS 0x2BU +#define DSI_SET_PARTIAL_COLUMNS 0x31U +#define DSI_SET_PARTIAL_ROWS 0x30U +#define DSI_SET_PIXEL_FORMAT 0x3AU +#define DSI_SET_SCROLL_AREA 0x33U +#define DSI_SET_SCROLL_START 0x37U +#define DSI_SET_TEAR_OFF 0x34U +#define DSI_SET_TEAR_ON 0x35U +#define DSI_SET_TEAR_SCANLINE 0x44U +#define DSI_SET_VSYNC_TIMING 0x40U +#define DSI_SOFT_RESET 0x01U +#define DSI_WRITE_LUT 0x2DU +#define DSI_WRITE_MEMORY_CONTINUE 0x3CU +#define DSI_WRITE_MEMORY_START 0x2CU +/** + * @} + */ + +/** @defgroup DSI_Video_Mode_Type DSI Video Mode Type + * @{ + */ +#define DSI_VID_MODE_NB_PULSES 0U +#define DSI_VID_MODE_NB_EVENTS 1U +#define DSI_VID_MODE_BURST 2U +/** + * @} + */ + +/** @defgroup DSI_Color_Mode DSI Color Mode + * @{ + */ +#define DSI_COLOR_MODE_FULL 0x00000000U +#define DSI_COLOR_MODE_EIGHT DSI_WCR_COLM +/** + * @} + */ + +/** @defgroup DSI_ShutDown DSI ShutDown + * @{ + */ +#define DSI_DISPLAY_ON 0x00000000U +#define DSI_DISPLAY_OFF DSI_WCR_SHTDN +/** + * @} + */ + +/** @defgroup DSI_LP_Command DSI LP Command + * @{ + */ +#define DSI_LP_COMMAND_DISABLE 0x00000000U +#define DSI_LP_COMMAND_ENABLE DSI_VMCR_LPCE +/** + * @} + */ + +/** @defgroup DSI_LP_HFP DSI LP HFP + * @{ + */ +#define DSI_LP_HFP_DISABLE 0x00000000U +#define DSI_LP_HFP_ENABLE DSI_VMCR_LPHFPE +/** + * @} + */ + +/** @defgroup DSI_LP_HBP DSI LP HBP + * @{ + */ +#define DSI_LP_HBP_DISABLE 0x00000000U +#define DSI_LP_HBP_ENABLE DSI_VMCR_LPHBPE +/** + * @} + */ + +/** @defgroup DSI_LP_VACT DSI LP VACT + * @{ + */ +#define DSI_LP_VACT_DISABLE 0x00000000U +#define DSI_LP_VACT_ENABLE DSI_VMCR_LPVAE +/** + * @} + */ + +/** @defgroup DSI_LP_VFP DSI LP VFP + * @{ + */ +#define DSI_LP_VFP_DISABLE 0x00000000U +#define DSI_LP_VFP_ENABLE DSI_VMCR_LPVFPE +/** + * @} + */ + +/** @defgroup DSI_LP_VBP DSI LP VBP + * @{ + */ +#define DSI_LP_VBP_DISABLE 0x00000000U +#define DSI_LP_VBP_ENABLE DSI_VMCR_LPVBPE +/** + * @} + */ + +/** @defgroup DSI_LP_VSYNC DSI LP VSYNC + * @{ + */ +#define DSI_LP_VSYNC_DISABLE 0x00000000U +#define DSI_LP_VSYNC_ENABLE DSI_VMCR_LPVSAE +/** + * @} + */ + +/** @defgroup DSI_FBTA_acknowledge DSI FBTA Acknowledge + * @{ + */ +#define DSI_FBTAA_DISABLE 0x00000000U +#define DSI_FBTAA_ENABLE DSI_VMCR_FBTAAE +/** + * @} + */ + +/** @defgroup DSI_TearingEffectSource DSI Tearing Effect Source + * @{ + */ +#define DSI_TE_DSILINK 0x00000000U +#define DSI_TE_EXTERNAL DSI_WCFGR_TESRC +/** + * @} + */ + +/** @defgroup DSI_TearingEffectPolarity DSI Tearing Effect Polarity + * @{ + */ +#define DSI_TE_RISING_EDGE 0x00000000U +#define DSI_TE_FALLING_EDGE DSI_WCFGR_TEPOL +/** + * @} + */ + +/** @defgroup DSI_Vsync_Polarity DSI Vsync Polarity + * @{ + */ +#define DSI_VSYNC_FALLING 0x00000000U +#define DSI_VSYNC_RISING DSI_WCFGR_VSPOL +/** + * @} + */ + +/** @defgroup DSI_AutomaticRefresh DSI Automatic Refresh + * @{ + */ +#define DSI_AR_DISABLE 0x00000000U +#define DSI_AR_ENABLE DSI_WCFGR_AR +/** + * @} + */ + +/** @defgroup DSI_TE_AcknowledgeRequest DSI TE Acknowledge Request + * @{ + */ +#define DSI_TE_ACKNOWLEDGE_DISABLE 0x00000000U +#define DSI_TE_ACKNOWLEDGE_ENABLE DSI_CMCR_TEARE +/** + * @} + */ + +/** @defgroup DSI_AcknowledgeRequest DSI Acknowledge Request + * @{ + */ +#define DSI_ACKNOWLEDGE_DISABLE 0x00000000U +#define DSI_ACKNOWLEDGE_ENABLE DSI_CMCR_ARE +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortWriteNoP DSI LP LPGen Short Write NoP + * @{ + */ +#define DSI_LP_GSW0P_DISABLE 0x00000000U +#define DSI_LP_GSW0P_ENABLE DSI_CMCR_GSW0TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortWriteOneP DSI LP LPGen Short Write OneP + * @{ + */ +#define DSI_LP_GSW1P_DISABLE 0x00000000U +#define DSI_LP_GSW1P_ENABLE DSI_CMCR_GSW1TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortWriteTwoP DSI LP LPGen Short Write TwoP + * @{ + */ +#define DSI_LP_GSW2P_DISABLE 0x00000000U +#define DSI_LP_GSW2P_ENABLE DSI_CMCR_GSW2TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortReadNoP DSI LP LPGen Short Read NoP + * @{ + */ +#define DSI_LP_GSR0P_DISABLE 0x00000000U +#define DSI_LP_GSR0P_ENABLE DSI_CMCR_GSR0TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortReadOneP DSI LP LPGen Short Read OneP + * @{ + */ +#define DSI_LP_GSR1P_DISABLE 0x00000000U +#define DSI_LP_GSR1P_ENABLE DSI_CMCR_GSR1TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenShortReadTwoP DSI LP LPGen Short Read TwoP + * @{ + */ +#define DSI_LP_GSR2P_DISABLE 0x00000000U +#define DSI_LP_GSR2P_ENABLE DSI_CMCR_GSR2TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPGenLongWrite DSI LP LPGen LongWrite + * @{ + */ +#define DSI_LP_GLW_DISABLE 0x00000000U +#define DSI_LP_GLW_ENABLE DSI_CMCR_GLWTX +/** + * @} + */ + +/** @defgroup DSI_LP_LPDcsShortWriteNoP DSI LP LPDcs Short Write NoP + * @{ + */ +#define DSI_LP_DSW0P_DISABLE 0x00000000U +#define DSI_LP_DSW0P_ENABLE DSI_CMCR_DSW0TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPDcsShortWriteOneP DSI LP LPDcs Short Write OneP + * @{ + */ +#define DSI_LP_DSW1P_DISABLE 0x00000000U +#define DSI_LP_DSW1P_ENABLE DSI_CMCR_DSW1TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPDcsShortReadNoP DSI LP LPDcs Short Read NoP + * @{ + */ +#define DSI_LP_DSR0P_DISABLE 0x00000000U +#define DSI_LP_DSR0P_ENABLE DSI_CMCR_DSR0TX +/** + * @} + */ + +/** @defgroup DSI_LP_LPDcsLongWrite DSI LP LPDcs Long Write + * @{ + */ +#define DSI_LP_DLW_DISABLE 0x00000000U +#define DSI_LP_DLW_ENABLE DSI_CMCR_DLWTX +/** + * @} + */ + +/** @defgroup DSI_LP_LPMaxReadPacket DSI LP LPMax Read Packet + * @{ + */ +#define DSI_LP_MRDP_DISABLE 0x00000000U +#define DSI_LP_MRDP_ENABLE DSI_CMCR_MRDPS +/** + * @} + */ + +/** @defgroup DSI_HS_PrespMode DSI HS Presp Mode + * @{ + */ +#define DSI_HS_PM_DISABLE 0x00000000U +#define DSI_HS_PM_ENABLE DSI_TCCR3_PM +/** + * @} + */ + + +/** @defgroup DSI_Automatic_Clk_Lane_Control DSI Automatic Clk Lane Control + * @{ + */ +#define DSI_AUTO_CLK_LANE_CTRL_DISABLE 0x00000000U +#define DSI_AUTO_CLK_LANE_CTRL_ENABLE DSI_CLCR_ACR +/** + * @} + */ + +/** @defgroup DSI_Number_Of_Lanes DSI Number Of Lanes + * @{ + */ +#define DSI_ONE_DATA_LANE 0U +#define DSI_TWO_DATA_LANES 1U +/** + * @} + */ + +/** @defgroup DSI_FlowControl DSI Flow Control + * @{ + */ +#define DSI_FLOW_CONTROL_CRC_RX DSI_PCR_CRCRXE +#define DSI_FLOW_CONTROL_ECC_RX DSI_PCR_ECCRXE +#define DSI_FLOW_CONTROL_BTA DSI_PCR_BTAE +#define DSI_FLOW_CONTROL_EOTP_RX DSI_PCR_ETRXE +#define DSI_FLOW_CONTROL_EOTP_TX DSI_PCR_ETTXE +#define DSI_FLOW_CONTROL_ALL (DSI_FLOW_CONTROL_CRC_RX | DSI_FLOW_CONTROL_ECC_RX | \ + DSI_FLOW_CONTROL_BTA | DSI_FLOW_CONTROL_EOTP_RX | \ + DSI_FLOW_CONTROL_EOTP_TX) +/** + * @} + */ + +/** @defgroup DSI_Color_Coding DSI Color Coding + * @{ + */ +#define DSI_RGB565 0x00000000U /*!< The values 0x00000001 and 0x00000002 can also be used for the RGB565 color mode configuration */ +#define DSI_RGB666 0x00000003U /*!< The value 0x00000004 can also be used for the RGB666 color mode configuration */ +#define DSI_RGB888 0x00000005U +/** + * @} + */ + +/** @defgroup DSI_LooselyPacked DSI Loosely Packed + * @{ + */ +#define DSI_LOOSELY_PACKED_ENABLE DSI_LCOLCR_LPE +#define DSI_LOOSELY_PACKED_DISABLE 0x00000000U +/** + * @} + */ + +/** @defgroup DSI_HSYNC_Polarity DSI HSYNC Polarity + * @{ + */ +#define DSI_HSYNC_ACTIVE_HIGH 0x00000000U +#define DSI_HSYNC_ACTIVE_LOW DSI_LPCR_HSP +/** + * @} + */ + +/** @defgroup DSI_VSYNC_Active_Polarity DSI VSYNC Active Polarity + * @{ + */ +#define DSI_VSYNC_ACTIVE_HIGH 0x00000000U +#define DSI_VSYNC_ACTIVE_LOW DSI_LPCR_VSP +/** + * @} + */ + +/** @defgroup DSI_DATA_ENABLE_Polarity DSI DATA ENABLE Polarity + * @{ + */ +#define DSI_DATA_ENABLE_ACTIVE_HIGH 0x00000000U +#define DSI_DATA_ENABLE_ACTIVE_LOW DSI_LPCR_DEP +/** + * @} + */ + +/** @defgroup DSI_PLL_IDF DSI PLL IDF + * @{ + */ +#define DSI_PLL_IN_DIV1 0x00000001U +#define DSI_PLL_IN_DIV2 0x00000002U +#define DSI_PLL_IN_DIV3 0x00000003U +#define DSI_PLL_IN_DIV4 0x00000004U +#define DSI_PLL_IN_DIV5 0x00000005U +#define DSI_PLL_IN_DIV6 0x00000006U +#define DSI_PLL_IN_DIV7 0x00000007U +/** + * @} + */ + +/** @defgroup DSI_PLL_ODF DSI PLL ODF + * @{ + */ +#define DSI_PLL_OUT_DIV1 0x00000000U +#define DSI_PLL_OUT_DIV2 0x00000001U +#define DSI_PLL_OUT_DIV4 0x00000002U +#define DSI_PLL_OUT_DIV8 0x00000003U +/** + * @} + */ + +/** @defgroup DSI_Flags DSI Flags + * @{ + */ +#define DSI_FLAG_TE DSI_WISR_TEIF +#define DSI_FLAG_ER DSI_WISR_ERIF +#define DSI_FLAG_BUSY DSI_WISR_BUSY +#define DSI_FLAG_PLLLS DSI_WISR_PLLLS +#define DSI_FLAG_PLLL DSI_WISR_PLLLIF +#define DSI_FLAG_PLLU DSI_WISR_PLLUIF +#define DSI_FLAG_RRS DSI_WISR_RRS +#define DSI_FLAG_RR DSI_WISR_RRIF +/** + * @} + */ + +/** @defgroup DSI_Interrupts DSI Interrupts + * @{ + */ +#define DSI_IT_TE DSI_WIER_TEIE +#define DSI_IT_ER DSI_WIER_ERIE +#define DSI_IT_PLLL DSI_WIER_PLLLIE +#define DSI_IT_PLLU DSI_WIER_PLLUIE +#define DSI_IT_RR DSI_WIER_RRIE +/** + * @} + */ + +/** @defgroup DSI_SHORT_WRITE_PKT_Data_Type DSI SHORT WRITE PKT Data Type + * @{ + */ +#define DSI_DCS_SHORT_PKT_WRITE_P0 0x00000005U /*!< DCS short write, no parameters */ +#define DSI_DCS_SHORT_PKT_WRITE_P1 0x00000015U /*!< DCS short write, one parameter */ +#define DSI_GEN_SHORT_PKT_WRITE_P0 0x00000003U /*!< Generic short write, no parameters */ +#define DSI_GEN_SHORT_PKT_WRITE_P1 0x00000013U /*!< Generic short write, one parameter */ +#define DSI_GEN_SHORT_PKT_WRITE_P2 0x00000023U /*!< Generic short write, two parameters */ +/** + * @} + */ + +/** @defgroup DSI_LONG_WRITE_PKT_Data_Type DSI LONG WRITE PKT Data Type + * @{ + */ +#define DSI_DCS_LONG_PKT_WRITE 0x00000039U /*!< DCS long write */ +#define DSI_GEN_LONG_PKT_WRITE 0x00000029U /*!< Generic long write */ +/** + * @} + */ + +/** @defgroup DSI_SHORT_READ_PKT_Data_Type DSI SHORT READ PKT Data Type + * @{ + */ +#define DSI_DCS_SHORT_PKT_READ 0x00000006U /*!< DCS short read */ +#define DSI_GEN_SHORT_PKT_READ_P0 0x00000004U /*!< Generic short read, no parameters */ +#define DSI_GEN_SHORT_PKT_READ_P1 0x00000014U /*!< Generic short read, one parameter */ +#define DSI_GEN_SHORT_PKT_READ_P2 0x00000024U /*!< Generic short read, two parameters */ +/** + * @} + */ + +/** @defgroup DSI_Error_Data_Type DSI Error Data Type + * @{ + */ +#define HAL_DSI_ERROR_NONE 0U +#define HAL_DSI_ERROR_ACK 0x00000001U /*!< acknowledge errors */ +#define HAL_DSI_ERROR_PHY 0x00000002U /*!< PHY related errors */ +#define HAL_DSI_ERROR_TX 0x00000004U /*!< transmission error */ +#define HAL_DSI_ERROR_RX 0x00000008U /*!< reception error */ +#define HAL_DSI_ERROR_ECC 0x00000010U /*!< ECC errors */ +#define HAL_DSI_ERROR_CRC 0x00000020U /*!< CRC error */ +#define HAL_DSI_ERROR_PSE 0x00000040U /*!< Packet Size error */ +#define HAL_DSI_ERROR_EOT 0x00000080U /*!< End Of Transmission error */ +#define HAL_DSI_ERROR_OVF 0x00000100U /*!< FIFO overflow error */ +#define HAL_DSI_ERROR_GEN 0x00000200U /*!< Generic FIFO related errors */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +#define HAL_DSI_ERROR_INVALID_CALLBACK 0x00000400U /*!< DSI Invalid Callback error */ +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup DSI_Lane_Group DSI Lane Group + * @{ + */ +#define DSI_CLOCK_LANE 0x00000000U +#define DSI_DATA_LANES 0x00000001U +/** + * @} + */ + +/** @defgroup DSI_Communication_Delay DSI Communication Delay + * @{ + */ +#define DSI_SLEW_RATE_HSTX 0x00000000U +#define DSI_SLEW_RATE_LPTX 0x00000001U +#define DSI_HS_DELAY 0x00000002U +/** + * @} + */ + +/** @defgroup DSI_CustomLane DSI CustomLane + * @{ + */ +#define DSI_SWAP_LANE_PINS 0x00000000U +#define DSI_INVERT_HS_SIGNAL 0x00000001U +/** + * @} + */ + +/** @defgroup DSI_Lane_Select DSI Lane Select + * @{ + */ +#define DSI_CLK_LANE 0x00000000U +#define DSI_DATA_LANE0 0x00000001U +#define DSI_DATA_LANE1 0x00000002U +/** + * @} + */ + +/** @defgroup DSI_PHY_Timing DSI PHY Timing + * @{ + */ +#define DSI_TCLK_POST 0x00000000U +#define DSI_TLPX_CLK 0x00000001U +#define DSI_THS_EXIT 0x00000002U +#define DSI_TLPX_DATA 0x00000003U +#define DSI_THS_ZERO 0x00000004U +#define DSI_THS_TRAIL 0x00000005U +#define DSI_THS_PREPARE 0x00000006U +#define DSI_TCLK_ZERO 0x00000007U +#define DSI_TCLK_PREPARE 0x00000008U +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** + * @brief Reset DSI handle state. + * @param __HANDLE__: DSI handle + * @retval None + */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +#define __HAL_DSI_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_DSI_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_DSI_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DSI_STATE_RESET) +#endif /*USE_HAL_DSI_REGISTER_CALLBACKS */ + +/** + * @brief Enables the DSI host. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_ENABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT((__HANDLE__)->Instance->CR, DSI_CR_EN);\ + /* Delay after an DSI Host enabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->CR, DSI_CR_EN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Disables the DSI host. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_DISABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + CLEAR_BIT((__HANDLE__)->Instance->CR, DSI_CR_EN);\ + /* Delay after an DSI Host disabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->CR, DSI_CR_EN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Enables the DSI wrapper. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_WRAPPER_ENABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT((__HANDLE__)->Instance->WCR, DSI_WCR_DSIEN);\ + /* Delay after an DSI warpper enabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WCR, DSI_WCR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Disable the DSI wrapper. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_WRAPPER_DISABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + CLEAR_BIT((__HANDLE__)->Instance->WCR, DSI_WCR_DSIEN);\ + /* Delay after an DSI warpper disabling*/ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WCR, DSI_WCR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Enables the DSI PLL. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_PLL_ENABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_PLLEN);\ + /* Delay after an DSI PLL enabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_PLLEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Disables the DSI PLL. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_PLL_DISABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + CLEAR_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_PLLEN);\ + /* Delay after an DSI PLL disabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_PLLEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Enables the DSI regulator. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_REG_ENABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_REGEN);\ + /* Delay after an DSI regulator enabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_REGEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Disables the DSI regulator. + * @param __HANDLE__ DSI handle + * @retval None. + */ +#define __HAL_DSI_REG_DISABLE(__HANDLE__) do { \ + __IO uint32_t tmpreg = 0x00U; \ + CLEAR_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_REGEN);\ + /* Delay after an DSI regulator disabling */ \ + tmpreg = READ_BIT((__HANDLE__)->Instance->WRPCR, DSI_WRPCR_REGEN);\ + UNUSED(tmpreg); \ + } while(0U) + +/** + * @brief Get the DSI pending flags. + * @param __HANDLE__ DSI handle. + * @param __FLAG__ Get the specified flag. + * This parameter can be any combination of the following values: + * @arg DSI_FLAG_TE : Tearing Effect Interrupt Flag + * @arg DSI_FLAG_ER : End of Refresh Interrupt Flag + * @arg DSI_FLAG_BUSY : Busy Flag + * @arg DSI_FLAG_PLLLS: PLL Lock Status + * @arg DSI_FLAG_PLLL : PLL Lock Interrupt Flag + * @arg DSI_FLAG_PLLU : PLL Unlock Interrupt Flag + * @arg DSI_FLAG_RRS : Regulator Ready Flag + * @arg DSI_FLAG_RR : Regulator Ready Interrupt Flag + * @retval The state of FLAG (SET or RESET). + */ +#define __HAL_DSI_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->WISR & (__FLAG__)) + +/** + * @brief Clears the DSI pending flags. + * @param __HANDLE__ DSI handle. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DSI_FLAG_TE : Tearing Effect Interrupt Flag + * @arg DSI_FLAG_ER : End of Refresh Interrupt Flag + * @arg DSI_FLAG_PLLL : PLL Lock Interrupt Flag + * @arg DSI_FLAG_PLLU : PLL Unlock Interrupt Flag + * @arg DSI_FLAG_RR : Regulator Ready Interrupt Flag + * @retval None + */ +#define __HAL_DSI_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->WIFCR = (__FLAG__)) + +/** + * @brief Enables the specified DSI interrupts. + * @param __HANDLE__ DSI handle. + * @param __INTERRUPT__ specifies the DSI interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg DSI_IT_TE : Tearing Effect Interrupt + * @arg DSI_IT_ER : End of Refresh Interrupt + * @arg DSI_IT_PLLL: PLL Lock Interrupt + * @arg DSI_IT_PLLU: PLL Unlock Interrupt + * @arg DSI_IT_RR : Regulator Ready Interrupt + * @retval None + */ +#define __HAL_DSI_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->WIER |= (__INTERRUPT__)) + +/** + * @brief Disables the specified DSI interrupts. + * @param __HANDLE__ DSI handle + * @param __INTERRUPT__ specifies the DSI interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg DSI_IT_TE : Tearing Effect Interrupt + * @arg DSI_IT_ER : End of Refresh Interrupt + * @arg DSI_IT_PLLL: PLL Lock Interrupt + * @arg DSI_IT_PLLU: PLL Unlock Interrupt + * @arg DSI_IT_RR : Regulator Ready Interrupt + * @retval None + */ +#define __HAL_DSI_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->WIER &= ~(__INTERRUPT__)) + +/** + * @brief Checks whether the specified DSI interrupt source is enabled or not. + * @param __HANDLE__ DSI handle + * @param __INTERRUPT__ specifies the DSI interrupt source to check. + * This parameter can be one of the following values: + * @arg DSI_IT_TE : Tearing Effect Interrupt + * @arg DSI_IT_ER : End of Refresh Interrupt + * @arg DSI_IT_PLLL: PLL Lock Interrupt + * @arg DSI_IT_PLLU: PLL Unlock Interrupt + * @arg DSI_IT_RR : Regulator Ready Interrupt + * @retval The state of INTERRUPT (SET or RESET). + */ +#define __HAL_DSI_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->WIER & (__INTERRUPT__)) + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DSI_Exported_Functions DSI Exported Functions + * @{ + */ +HAL_StatusTypeDef HAL_DSI_Init(DSI_HandleTypeDef *hdsi, DSI_PLLInitTypeDef *PLLInit); +HAL_StatusTypeDef HAL_DSI_DeInit(DSI_HandleTypeDef *hdsi); +void HAL_DSI_MspInit(DSI_HandleTypeDef *hdsi); +void HAL_DSI_MspDeInit(DSI_HandleTypeDef *hdsi); + +void HAL_DSI_IRQHandler(DSI_HandleTypeDef *hdsi); +void HAL_DSI_TearingEffectCallback(DSI_HandleTypeDef *hdsi); +void HAL_DSI_EndOfRefreshCallback(DSI_HandleTypeDef *hdsi); +void HAL_DSI_ErrorCallback(DSI_HandleTypeDef *hdsi); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_DSI_RegisterCallback(DSI_HandleTypeDef *hdsi, HAL_DSI_CallbackIDTypeDef CallbackID, + pDSI_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_DSI_UnRegisterCallback(DSI_HandleTypeDef *hdsi, HAL_DSI_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + +HAL_StatusTypeDef HAL_DSI_SetGenericVCID(DSI_HandleTypeDef *hdsi, uint32_t VirtualChannelID); +HAL_StatusTypeDef HAL_DSI_ConfigVideoMode(DSI_HandleTypeDef *hdsi, DSI_VidCfgTypeDef *VidCfg); +HAL_StatusTypeDef HAL_DSI_ConfigAdaptedCommandMode(DSI_HandleTypeDef *hdsi, DSI_CmdCfgTypeDef *CmdCfg); +HAL_StatusTypeDef HAL_DSI_ConfigCommand(DSI_HandleTypeDef *hdsi, DSI_LPCmdTypeDef *LPCmd); +HAL_StatusTypeDef HAL_DSI_ConfigFlowControl(DSI_HandleTypeDef *hdsi, uint32_t FlowControl); +HAL_StatusTypeDef HAL_DSI_ConfigPhyTimer(DSI_HandleTypeDef *hdsi, DSI_PHY_TimerTypeDef *PhyTimers); +HAL_StatusTypeDef HAL_DSI_ConfigHostTimeouts(DSI_HandleTypeDef *hdsi, DSI_HOST_TimeoutTypeDef *HostTimeouts); +HAL_StatusTypeDef HAL_DSI_Start(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_Stop(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_Refresh(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_ColorMode(DSI_HandleTypeDef *hdsi, uint32_t ColorMode); +HAL_StatusTypeDef HAL_DSI_Shutdown(DSI_HandleTypeDef *hdsi, uint32_t Shutdown); +HAL_StatusTypeDef HAL_DSI_ShortWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t Param1, + uint32_t Param2); +HAL_StatusTypeDef HAL_DSI_LongWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t NbParams, + uint32_t Param1, + uint8_t *ParametersTable); +HAL_StatusTypeDef HAL_DSI_Read(DSI_HandleTypeDef *hdsi, + uint32_t ChannelNbr, + uint8_t *Array, + uint32_t Size, + uint32_t Mode, + uint32_t DCSCmd, + uint8_t *ParametersTable); +HAL_StatusTypeDef HAL_DSI_EnterULPMData(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_ExitULPMData(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_EnterULPM(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_ExitULPM(DSI_HandleTypeDef *hdsi); + +HAL_StatusTypeDef HAL_DSI_PatternGeneratorStart(DSI_HandleTypeDef *hdsi, uint32_t Mode, uint32_t Orientation); +HAL_StatusTypeDef HAL_DSI_PatternGeneratorStop(DSI_HandleTypeDef *hdsi); + +HAL_StatusTypeDef HAL_DSI_SetSlewRateAndDelayTuning(DSI_HandleTypeDef *hdsi, uint32_t CommDelay, uint32_t Lane, + uint32_t Value); +HAL_StatusTypeDef HAL_DSI_SetLowPowerRXFilter(DSI_HandleTypeDef *hdsi, uint32_t Frequency); +HAL_StatusTypeDef HAL_DSI_SetSDD(DSI_HandleTypeDef *hdsi, FunctionalState State); +HAL_StatusTypeDef HAL_DSI_SetLanePinsConfiguration(DSI_HandleTypeDef *hdsi, uint32_t CustomLane, uint32_t Lane, + FunctionalState State); +HAL_StatusTypeDef HAL_DSI_SetPHYTimings(DSI_HandleTypeDef *hdsi, uint32_t Timing, FunctionalState State, + uint32_t Value); +HAL_StatusTypeDef HAL_DSI_ForceTXStopMode(DSI_HandleTypeDef *hdsi, uint32_t Lane, FunctionalState State); +HAL_StatusTypeDef HAL_DSI_ForceRXLowPower(DSI_HandleTypeDef *hdsi, FunctionalState State); +HAL_StatusTypeDef HAL_DSI_ForceDataLanesInRX(DSI_HandleTypeDef *hdsi, FunctionalState State); +HAL_StatusTypeDef HAL_DSI_SetPullDown(DSI_HandleTypeDef *hdsi, FunctionalState State); +HAL_StatusTypeDef HAL_DSI_SetContentionDetectionOff(DSI_HandleTypeDef *hdsi, FunctionalState State); + +uint32_t HAL_DSI_GetError(DSI_HandleTypeDef *hdsi); +HAL_StatusTypeDef HAL_DSI_ConfigErrorMonitor(DSI_HandleTypeDef *hdsi, uint32_t ActiveErrors); +HAL_DSI_StateTypeDef HAL_DSI_GetState(DSI_HandleTypeDef *hdsi); +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/** @defgroup DSI_Private_Types DSI Private Types + * @{ + */ + +/** + * @} + */ + +/* Private defines -----------------------------------------------------------*/ +/** @defgroup DSI_Private_Defines DSI Private Defines + * @{ + */ + +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @defgroup DSI_Private_Variables DSI Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DSI_Private_Constants DSI Private Constants + * @{ + */ +#define DSI_MAX_RETURN_PKT_SIZE (0x00000037U) /*!< Maximum return packet configuration */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DSI_Private_Macros DSI Private Macros + * @{ + */ +#define IS_DSI_PLL_NDIV(NDIV) ((10U <= (NDIV)) && ((NDIV) <= 125U)) +#define IS_DSI_PLL_IDF(IDF) (((IDF) == DSI_PLL_IN_DIV1) || \ + ((IDF) == DSI_PLL_IN_DIV2) || \ + ((IDF) == DSI_PLL_IN_DIV3) || \ + ((IDF) == DSI_PLL_IN_DIV4) || \ + ((IDF) == DSI_PLL_IN_DIV5) || \ + ((IDF) == DSI_PLL_IN_DIV6) || \ + ((IDF) == DSI_PLL_IN_DIV7)) +#define IS_DSI_PLL_ODF(ODF) (((ODF) == DSI_PLL_OUT_DIV1) || \ + ((ODF) == DSI_PLL_OUT_DIV2) || \ + ((ODF) == DSI_PLL_OUT_DIV4) || \ + ((ODF) == DSI_PLL_OUT_DIV8)) +#define IS_DSI_AUTO_CLKLANE_CONTROL(AutoClkLane) (((AutoClkLane) == DSI_AUTO_CLK_LANE_CTRL_DISABLE) || ((AutoClkLane) == DSI_AUTO_CLK_LANE_CTRL_ENABLE)) +#define IS_DSI_NUMBER_OF_LANES(NumberOfLanes) (((NumberOfLanes) == DSI_ONE_DATA_LANE) || ((NumberOfLanes) == DSI_TWO_DATA_LANES)) +#define IS_DSI_FLOW_CONTROL(FlowControl) (((FlowControl) | DSI_FLOW_CONTROL_ALL) == DSI_FLOW_CONTROL_ALL) +#define IS_DSI_COLOR_CODING(ColorCoding) ((ColorCoding) <= 5U) +#define IS_DSI_LOOSELY_PACKED(LooselyPacked) (((LooselyPacked) == DSI_LOOSELY_PACKED_ENABLE) || ((LooselyPacked) == DSI_LOOSELY_PACKED_DISABLE)) +#define IS_DSI_DE_POLARITY(DataEnable) (((DataEnable) == DSI_DATA_ENABLE_ACTIVE_HIGH) || ((DataEnable) == DSI_DATA_ENABLE_ACTIVE_LOW)) +#define IS_DSI_VSYNC_POLARITY(VSYNC) (((VSYNC) == DSI_VSYNC_ACTIVE_HIGH) || ((VSYNC) == DSI_VSYNC_ACTIVE_LOW)) +#define IS_DSI_HSYNC_POLARITY(HSYNC) (((HSYNC) == DSI_HSYNC_ACTIVE_HIGH) || ((HSYNC) == DSI_HSYNC_ACTIVE_LOW)) +#define IS_DSI_VIDEO_MODE_TYPE(VideoModeType) (((VideoModeType) == DSI_VID_MODE_NB_PULSES) || \ + ((VideoModeType) == DSI_VID_MODE_NB_EVENTS) || \ + ((VideoModeType) == DSI_VID_MODE_BURST)) +#define IS_DSI_COLOR_MODE(ColorMode) (((ColorMode) == DSI_COLOR_MODE_FULL) || ((ColorMode) == DSI_COLOR_MODE_EIGHT)) +#define IS_DSI_SHUT_DOWN(ShutDown) (((ShutDown) == DSI_DISPLAY_ON) || ((ShutDown) == DSI_DISPLAY_OFF)) +#define IS_DSI_LP_COMMAND(LPCommand) (((LPCommand) == DSI_LP_COMMAND_DISABLE) || ((LPCommand) == DSI_LP_COMMAND_ENABLE)) +#define IS_DSI_LP_HFP(LPHFP) (((LPHFP) == DSI_LP_HFP_DISABLE) || ((LPHFP) == DSI_LP_HFP_ENABLE)) +#define IS_DSI_LP_HBP(LPHBP) (((LPHBP) == DSI_LP_HBP_DISABLE) || ((LPHBP) == DSI_LP_HBP_ENABLE)) +#define IS_DSI_LP_VACTIVE(LPVActive) (((LPVActive) == DSI_LP_VACT_DISABLE) || ((LPVActive) == DSI_LP_VACT_ENABLE)) +#define IS_DSI_LP_VFP(LPVFP) (((LPVFP) == DSI_LP_VFP_DISABLE) || ((LPVFP) == DSI_LP_VFP_ENABLE)) +#define IS_DSI_LP_VBP(LPVBP) (((LPVBP) == DSI_LP_VBP_DISABLE) || ((LPVBP) == DSI_LP_VBP_ENABLE)) +#define IS_DSI_LP_VSYNC(LPVSYNC) (((LPVSYNC) == DSI_LP_VSYNC_DISABLE) || ((LPVSYNC) == DSI_LP_VSYNC_ENABLE)) +#define IS_DSI_FBTAA(FrameBTAAcknowledge) (((FrameBTAAcknowledge) == DSI_FBTAA_DISABLE) || ((FrameBTAAcknowledge) == DSI_FBTAA_ENABLE)) +#define IS_DSI_TE_SOURCE(TESource) (((TESource) == DSI_TE_DSILINK) || ((TESource) == DSI_TE_EXTERNAL)) +#define IS_DSI_TE_POLARITY(TEPolarity) (((TEPolarity) == DSI_TE_RISING_EDGE) || ((TEPolarity) == DSI_TE_FALLING_EDGE)) +#define IS_DSI_AUTOMATIC_REFRESH(AutomaticRefresh) (((AutomaticRefresh) == DSI_AR_DISABLE) || ((AutomaticRefresh) == DSI_AR_ENABLE)) +#define IS_DSI_VS_POLARITY(VSPolarity) (((VSPolarity) == DSI_VSYNC_FALLING) || ((VSPolarity) == DSI_VSYNC_RISING)) +#define IS_DSI_TE_ACK_REQUEST(TEAcknowledgeRequest) (((TEAcknowledgeRequest) == DSI_TE_ACKNOWLEDGE_DISABLE) || ((TEAcknowledgeRequest) == DSI_TE_ACKNOWLEDGE_ENABLE)) +#define IS_DSI_ACK_REQUEST(AcknowledgeRequest) (((AcknowledgeRequest) == DSI_ACKNOWLEDGE_DISABLE) || ((AcknowledgeRequest) == DSI_ACKNOWLEDGE_ENABLE)) +#define IS_DSI_LP_GSW0P(LP_GSW0P) (((LP_GSW0P) == DSI_LP_GSW0P_DISABLE) || ((LP_GSW0P) == DSI_LP_GSW0P_ENABLE)) +#define IS_DSI_LP_GSW1P(LP_GSW1P) (((LP_GSW1P) == DSI_LP_GSW1P_DISABLE) || ((LP_GSW1P) == DSI_LP_GSW1P_ENABLE)) +#define IS_DSI_LP_GSW2P(LP_GSW2P) (((LP_GSW2P) == DSI_LP_GSW2P_DISABLE) || ((LP_GSW2P) == DSI_LP_GSW2P_ENABLE)) +#define IS_DSI_LP_GSR0P(LP_GSR0P) (((LP_GSR0P) == DSI_LP_GSR0P_DISABLE) || ((LP_GSR0P) == DSI_LP_GSR0P_ENABLE)) +#define IS_DSI_LP_GSR1P(LP_GSR1P) (((LP_GSR1P) == DSI_LP_GSR1P_DISABLE) || ((LP_GSR1P) == DSI_LP_GSR1P_ENABLE)) +#define IS_DSI_LP_GSR2P(LP_GSR2P) (((LP_GSR2P) == DSI_LP_GSR2P_DISABLE) || ((LP_GSR2P) == DSI_LP_GSR2P_ENABLE)) +#define IS_DSI_LP_GLW(LP_GLW) (((LP_GLW) == DSI_LP_GLW_DISABLE) || ((LP_GLW) == DSI_LP_GLW_ENABLE)) +#define IS_DSI_LP_DSW0P(LP_DSW0P) (((LP_DSW0P) == DSI_LP_DSW0P_DISABLE) || ((LP_DSW0P) == DSI_LP_DSW0P_ENABLE)) +#define IS_DSI_LP_DSW1P(LP_DSW1P) (((LP_DSW1P) == DSI_LP_DSW1P_DISABLE) || ((LP_DSW1P) == DSI_LP_DSW1P_ENABLE)) +#define IS_DSI_LP_DSR0P(LP_DSR0P) (((LP_DSR0P) == DSI_LP_DSR0P_DISABLE) || ((LP_DSR0P) == DSI_LP_DSR0P_ENABLE)) +#define IS_DSI_LP_DLW(LP_DLW) (((LP_DLW) == DSI_LP_DLW_DISABLE) || ((LP_DLW) == DSI_LP_DLW_ENABLE)) +#define IS_DSI_LP_MRDP(LP_MRDP) (((LP_MRDP) == DSI_LP_MRDP_DISABLE) || ((LP_MRDP) == DSI_LP_MRDP_ENABLE)) +#define IS_DSI_SHORT_WRITE_PACKET_TYPE(MODE) (((MODE) == DSI_DCS_SHORT_PKT_WRITE_P0) || \ + ((MODE) == DSI_DCS_SHORT_PKT_WRITE_P1) || \ + ((MODE) == DSI_GEN_SHORT_PKT_WRITE_P0) || \ + ((MODE) == DSI_GEN_SHORT_PKT_WRITE_P1) || \ + ((MODE) == DSI_GEN_SHORT_PKT_WRITE_P2)) +#define IS_DSI_LONG_WRITE_PACKET_TYPE(MODE) (((MODE) == DSI_DCS_LONG_PKT_WRITE) || \ + ((MODE) == DSI_GEN_LONG_PKT_WRITE)) +#define IS_DSI_READ_PACKET_TYPE(MODE) (((MODE) == DSI_DCS_SHORT_PKT_READ) || \ + ((MODE) == DSI_GEN_SHORT_PKT_READ_P0) || \ + ((MODE) == DSI_GEN_SHORT_PKT_READ_P1) || \ + ((MODE) == DSI_GEN_SHORT_PKT_READ_P2)) +#define IS_DSI_COMMUNICATION_DELAY(CommDelay) (((CommDelay) == DSI_SLEW_RATE_HSTX) || ((CommDelay) == DSI_SLEW_RATE_LPTX) || ((CommDelay) == DSI_HS_DELAY)) +#define IS_DSI_LANE_GROUP(Lane) (((Lane) == DSI_CLOCK_LANE) || ((Lane) == DSI_DATA_LANES)) +#define IS_DSI_CUSTOM_LANE(CustomLane) (((CustomLane) == DSI_SWAP_LANE_PINS) || ((CustomLane) == DSI_INVERT_HS_SIGNAL)) +#define IS_DSI_LANE(Lane) (((Lane) == DSI_CLOCK_LANE) || ((Lane) == DSI_DATA_LANE0) || ((Lane) == DSI_DATA_LANE1)) +#define IS_DSI_PHY_TIMING(Timing) (((Timing) == DSI_TCLK_POST ) || \ + ((Timing) == DSI_TLPX_CLK ) || \ + ((Timing) == DSI_THS_EXIT ) || \ + ((Timing) == DSI_TLPX_DATA ) || \ + ((Timing) == DSI_THS_ZERO ) || \ + ((Timing) == DSI_THS_TRAIL ) || \ + ((Timing) == DSI_THS_PREPARE ) || \ + ((Timing) == DSI_TCLK_ZERO ) || \ + ((Timing) == DSI_TCLK_PREPARE)) + +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup DSI_Private_Functions_Prototypes DSI Private Functions Prototypes + * @{ + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DSI_Private_Functions DSI Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* DSI */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_DSI_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h new file mode 100644 index 000000000..fbc7b3547 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h @@ -0,0 +1,2217 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_eth.h + * @author MCD Application Team + * @brief Header file of ETH HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_ETH_H +#define __STM32F7xx_HAL_ETH_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (ETH) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup ETH + * @{ + */ + +/** @addtogroup ETH_Private_Macros + * @{ + */ +#define IS_ETH_PHY_ADDRESS(ADDRESS) ((ADDRESS) <= 0x20) +#define IS_ETH_AUTONEGOTIATION(CMD) (((CMD) == ETH_AUTONEGOTIATION_ENABLE) || \ + ((CMD) == ETH_AUTONEGOTIATION_DISABLE)) +#define IS_ETH_SPEED(SPEED) (((SPEED) == ETH_SPEED_10M) || \ + ((SPEED) == ETH_SPEED_100M)) +#define IS_ETH_DUPLEX_MODE(MODE) (((MODE) == ETH_MODE_FULLDUPLEX) || \ + ((MODE) == ETH_MODE_HALFDUPLEX)) +#define IS_ETH_RX_MODE(MODE) (((MODE) == ETH_RXPOLLING_MODE) || \ + ((MODE) == ETH_RXINTERRUPT_MODE)) +#define IS_ETH_CHECKSUM_MODE(MODE) (((MODE) == ETH_CHECKSUM_BY_HARDWARE) || \ + ((MODE) == ETH_CHECKSUM_BY_SOFTWARE)) +#define IS_ETH_MEDIA_INTERFACE(MODE) (((MODE) == ETH_MEDIA_INTERFACE_MII) || \ + ((MODE) == ETH_MEDIA_INTERFACE_RMII)) +#define IS_ETH_WATCHDOG(CMD) (((CMD) == ETH_WATCHDOG_ENABLE) || \ + ((CMD) == ETH_WATCHDOG_DISABLE)) +#define IS_ETH_JABBER(CMD) (((CMD) == ETH_JABBER_ENABLE) || \ + ((CMD) == ETH_JABBER_DISABLE)) +#define IS_ETH_INTER_FRAME_GAP(GAP) (((GAP) == ETH_INTERFRAMEGAP_96BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_88BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_80BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_72BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_64BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_56BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_48BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_40BIT)) +#define IS_ETH_CARRIER_SENSE(CMD) (((CMD) == ETH_CARRIERSENCE_ENABLE) || \ + ((CMD) == ETH_CARRIERSENCE_DISABLE)) +#define IS_ETH_RECEIVE_OWN(CMD) (((CMD) == ETH_RECEIVEOWN_ENABLE) || \ + ((CMD) == ETH_RECEIVEOWN_DISABLE)) +#define IS_ETH_LOOPBACK_MODE(CMD) (((CMD) == ETH_LOOPBACKMODE_ENABLE) || \ + ((CMD) == ETH_LOOPBACKMODE_DISABLE)) +#define IS_ETH_CHECKSUM_OFFLOAD(CMD) (((CMD) == ETH_CHECKSUMOFFLAOD_ENABLE) || \ + ((CMD) == ETH_CHECKSUMOFFLAOD_DISABLE)) +#define IS_ETH_RETRY_TRANSMISSION(CMD) (((CMD) == ETH_RETRYTRANSMISSION_ENABLE) || \ + ((CMD) == ETH_RETRYTRANSMISSION_DISABLE)) +#define IS_ETH_AUTOMATIC_PADCRC_STRIP(CMD) (((CMD) == ETH_AUTOMATICPADCRCSTRIP_ENABLE) || \ + ((CMD) == ETH_AUTOMATICPADCRCSTRIP_DISABLE)) +#define IS_ETH_BACKOFF_LIMIT(LIMIT) (((LIMIT) == ETH_BACKOFFLIMIT_10) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_8) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_4) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_1)) +#define IS_ETH_DEFERRAL_CHECK(CMD) (((CMD) == ETH_DEFFERRALCHECK_ENABLE) || \ + ((CMD) == ETH_DEFFERRALCHECK_DISABLE)) +#define IS_ETH_RECEIVE_ALL(CMD) (((CMD) == ETH_RECEIVEALL_ENABLE) || \ + ((CMD) == ETH_RECEIVEAll_DISABLE)) +#define IS_ETH_SOURCE_ADDR_FILTER(CMD) (((CMD) == ETH_SOURCEADDRFILTER_NORMAL_ENABLE) || \ + ((CMD) == ETH_SOURCEADDRFILTER_INVERSE_ENABLE) || \ + ((CMD) == ETH_SOURCEADDRFILTER_DISABLE)) +#define IS_ETH_CONTROL_FRAMES(PASS) (((PASS) == ETH_PASSCONTROLFRAMES_BLOCKALL) || \ + ((PASS) == ETH_PASSCONTROLFRAMES_FORWARDALL) || \ + ((PASS) == ETH_PASSCONTROLFRAMES_FORWARDPASSEDADDRFILTER)) +#define IS_ETH_BROADCAST_FRAMES_RECEPTION(CMD) (((CMD) == ETH_BROADCASTFRAMESRECEPTION_ENABLE) || \ + ((CMD) == ETH_BROADCASTFRAMESRECEPTION_DISABLE)) +#define IS_ETH_DESTINATION_ADDR_FILTER(FILTER) (((FILTER) == ETH_DESTINATIONADDRFILTER_NORMAL) || \ + ((FILTER) == ETH_DESTINATIONADDRFILTER_INVERSE)) +#define IS_ETH_PROMISCUOUS_MODE(CMD) (((CMD) == ETH_PROMISCUOUS_MODE_ENABLE) || \ + ((CMD) == ETH_PROMISCUOUS_MODE_DISABLE)) +#define IS_ETH_MULTICAST_FRAMES_FILTER(FILTER) (((FILTER) == ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_HASHTABLE) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_PERFECT) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_NONE)) +#define IS_ETH_UNICAST_FRAMES_FILTER(FILTER) (((FILTER) == ETH_UNICASTFRAMESFILTER_PERFECTHASHTABLE) || \ + ((FILTER) == ETH_UNICASTFRAMESFILTER_HASHTABLE) || \ + ((FILTER) == ETH_UNICASTFRAMESFILTER_PERFECT)) +#define IS_ETH_PAUSE_TIME(TIME) ((TIME) <= 0xFFFF) +#define IS_ETH_ZEROQUANTA_PAUSE(CMD) (((CMD) == ETH_ZEROQUANTAPAUSE_ENABLE) || \ + ((CMD) == ETH_ZEROQUANTAPAUSE_DISABLE)) +#define IS_ETH_PAUSE_LOW_THRESHOLD(THRESHOLD) (((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS4) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS28) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS144) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS256)) +#define IS_ETH_UNICAST_PAUSE_FRAME_DETECT(CMD) (((CMD) == ETH_UNICASTPAUSEFRAMEDETECT_ENABLE) || \ + ((CMD) == ETH_UNICASTPAUSEFRAMEDETECT_DISABLE)) +#define IS_ETH_RECEIVE_FLOWCONTROL(CMD) (((CMD) == ETH_RECEIVEFLOWCONTROL_ENABLE) || \ + ((CMD) == ETH_RECEIVEFLOWCONTROL_DISABLE)) +#define IS_ETH_TRANSMIT_FLOWCONTROL(CMD) (((CMD) == ETH_TRANSMITFLOWCONTROL_ENABLE) || \ + ((CMD) == ETH_TRANSMITFLOWCONTROL_DISABLE)) +#define IS_ETH_VLAN_TAG_COMPARISON(COMPARISON) (((COMPARISON) == ETH_VLANTAGCOMPARISON_12BIT) || \ + ((COMPARISON) == ETH_VLANTAGCOMPARISON_16BIT)) +#define IS_ETH_VLAN_TAG_IDENTIFIER(IDENTIFIER) ((IDENTIFIER) <= 0xFFFF) +#define IS_ETH_MAC_ADDRESS0123(ADDRESS) (((ADDRESS) == ETH_MAC_ADDRESS0) || \ + ((ADDRESS) == ETH_MAC_ADDRESS1) || \ + ((ADDRESS) == ETH_MAC_ADDRESS2) || \ + ((ADDRESS) == ETH_MAC_ADDRESS3)) +#define IS_ETH_MAC_ADDRESS123(ADDRESS) (((ADDRESS) == ETH_MAC_ADDRESS1) || \ + ((ADDRESS) == ETH_MAC_ADDRESS2) || \ + ((ADDRESS) == ETH_MAC_ADDRESS3)) +#define IS_ETH_MAC_ADDRESS_FILTER(FILTER) (((FILTER) == ETH_MAC_ADDRESSFILTER_SA) || \ + ((FILTER) == ETH_MAC_ADDRESSFILTER_DA)) +#define IS_ETH_MAC_ADDRESS_MASK(MASK) (((MASK) == ETH_MAC_ADDRESSMASK_BYTE6) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE5) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE4) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE3) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE2) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE1)) +#define IS_ETH_DROP_TCPIP_CHECKSUM_FRAME(CMD) (((CMD) == ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE) || \ + ((CMD) == ETH_DROPTCPIPCHECKSUMERRORFRAME_DISABLE)) +#define IS_ETH_RECEIVE_STORE_FORWARD(CMD) (((CMD) == ETH_RECEIVESTOREFORWARD_ENABLE) || \ + ((CMD) == ETH_RECEIVESTOREFORWARD_DISABLE)) +#define IS_ETH_FLUSH_RECEIVE_FRAME(CMD) (((CMD) == ETH_FLUSHRECEIVEDFRAME_ENABLE) || \ + ((CMD) == ETH_FLUSHRECEIVEDFRAME_DISABLE)) +#define IS_ETH_TRANSMIT_STORE_FORWARD(CMD) (((CMD) == ETH_TRANSMITSTOREFORWARD_ENABLE) || \ + ((CMD) == ETH_TRANSMITSTOREFORWARD_DISABLE)) +#define IS_ETH_TRANSMIT_THRESHOLD_CONTROL(THRESHOLD) (((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_64BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_128BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_192BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_256BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_40BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_32BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_24BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_16BYTES)) +#define IS_ETH_FORWARD_ERROR_FRAMES(CMD) (((CMD) == ETH_FORWARDERRORFRAMES_ENABLE) || \ + ((CMD) == ETH_FORWARDERRORFRAMES_DISABLE)) +#define IS_ETH_FORWARD_UNDERSIZED_GOOD_FRAMES(CMD) (((CMD) == ETH_FORWARDUNDERSIZEDGOODFRAMES_ENABLE) || \ + ((CMD) == ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE)) +#define IS_ETH_RECEIVE_THRESHOLD_CONTROL(THRESHOLD) (((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_32BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_96BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_128BYTES)) +#define IS_ETH_SECOND_FRAME_OPERATE(CMD) (((CMD) == ETH_SECONDFRAMEOPERARTE_ENABLE) || \ + ((CMD) == ETH_SECONDFRAMEOPERARTE_DISABLE)) +#define IS_ETH_ADDRESS_ALIGNED_BEATS(CMD) (((CMD) == ETH_ADDRESSALIGNEDBEATS_ENABLE) || \ + ((CMD) == ETH_ADDRESSALIGNEDBEATS_DISABLE)) +#define IS_ETH_FIXED_BURST(CMD) (((CMD) == ETH_FIXEDBURST_ENABLE) || \ + ((CMD) == ETH_FIXEDBURST_DISABLE)) +#define IS_ETH_RXDMA_BURST_LENGTH(LENGTH) (((LENGTH) == ETH_RXDMABURSTLENGTH_1BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_2BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_8BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_16BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_32BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_4BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_8BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_16BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_32BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_64BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_128BEAT)) +#define IS_ETH_TXDMA_BURST_LENGTH(LENGTH) (((LENGTH) == ETH_TXDMABURSTLENGTH_1BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_2BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_8BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_16BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_32BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_4BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_8BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_16BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_32BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_64BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_128BEAT)) +#define IS_ETH_DMA_DESC_SKIP_LENGTH(LENGTH) ((LENGTH) <= 0x1F) +#define IS_ETH_DMA_ARBITRATION_ROUNDROBIN_RXTX(RATIO) (((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_2_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_3_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1) || \ + ((RATIO) == ETH_DMAARBITRATION_RXPRIORTX)) +#define IS_ETH_DMATXDESC_GET_FLAG(FLAG) (((FLAG) == ETH_DMATXDESC_OWN) || \ + ((FLAG) == ETH_DMATXDESC_IC) || \ + ((FLAG) == ETH_DMATXDESC_LS) || \ + ((FLAG) == ETH_DMATXDESC_FS) || \ + ((FLAG) == ETH_DMATXDESC_DC) || \ + ((FLAG) == ETH_DMATXDESC_DP) || \ + ((FLAG) == ETH_DMATXDESC_TTSE) || \ + ((FLAG) == ETH_DMATXDESC_TER) || \ + ((FLAG) == ETH_DMATXDESC_TCH) || \ + ((FLAG) == ETH_DMATXDESC_TTSS) || \ + ((FLAG) == ETH_DMATXDESC_IHE) || \ + ((FLAG) == ETH_DMATXDESC_ES) || \ + ((FLAG) == ETH_DMATXDESC_JT) || \ + ((FLAG) == ETH_DMATXDESC_FF) || \ + ((FLAG) == ETH_DMATXDESC_PCE) || \ + ((FLAG) == ETH_DMATXDESC_LCA) || \ + ((FLAG) == ETH_DMATXDESC_NC) || \ + ((FLAG) == ETH_DMATXDESC_LCO) || \ + ((FLAG) == ETH_DMATXDESC_EC) || \ + ((FLAG) == ETH_DMATXDESC_VF) || \ + ((FLAG) == ETH_DMATXDESC_CC) || \ + ((FLAG) == ETH_DMATXDESC_ED) || \ + ((FLAG) == ETH_DMATXDESC_UF) || \ + ((FLAG) == ETH_DMATXDESC_DB)) +#define IS_ETH_DMA_TXDESC_SEGMENT(SEGMENT) (((SEGMENT) == ETH_DMATXDESC_LASTSEGMENTS) || \ + ((SEGMENT) == ETH_DMATXDESC_FIRSTSEGMENT)) +#define IS_ETH_DMA_TXDESC_CHECKSUM(CHECKSUM) (((CHECKSUM) == ETH_DMATXDESC_CHECKSUMBYPASS) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMIPV4HEADER) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL)) +#define IS_ETH_DMATXDESC_BUFFER_SIZE(SIZE) ((SIZE) <= 0x1FFF) +#define IS_ETH_DMARXDESC_GET_FLAG(FLAG) (((FLAG) == ETH_DMARXDESC_OWN) || \ + ((FLAG) == ETH_DMARXDESC_AFM) || \ + ((FLAG) == ETH_DMARXDESC_ES) || \ + ((FLAG) == ETH_DMARXDESC_DE) || \ + ((FLAG) == ETH_DMARXDESC_SAF) || \ + ((FLAG) == ETH_DMARXDESC_LE) || \ + ((FLAG) == ETH_DMARXDESC_OE) || \ + ((FLAG) == ETH_DMARXDESC_VLAN) || \ + ((FLAG) == ETH_DMARXDESC_FS) || \ + ((FLAG) == ETH_DMARXDESC_LS) || \ + ((FLAG) == ETH_DMARXDESC_IPV4HCE) || \ + ((FLAG) == ETH_DMARXDESC_LC) || \ + ((FLAG) == ETH_DMARXDESC_FT) || \ + ((FLAG) == ETH_DMARXDESC_RWT) || \ + ((FLAG) == ETH_DMARXDESC_RE) || \ + ((FLAG) == ETH_DMARXDESC_DBE) || \ + ((FLAG) == ETH_DMARXDESC_CE) || \ + ((FLAG) == ETH_DMARXDESC_MAMPCE)) +#define IS_ETH_DMA_RXDESC_BUFFER(BUFFER) (((BUFFER) == ETH_DMARXDESC_BUFFER1) || \ + ((BUFFER) == ETH_DMARXDESC_BUFFER2)) +#define IS_ETH_PMT_GET_FLAG(FLAG) (((FLAG) == ETH_PMT_FLAG_WUFR) || \ + ((FLAG) == ETH_PMT_FLAG_MPR)) +#define IS_ETH_DMA_FLAG(FLAG) ((((FLAG) & (uint32_t)0xC7FE1800) == 0x00) && ((FLAG) != 0x00)) +#define IS_ETH_DMA_GET_FLAG(FLAG) (((FLAG) == ETH_DMA_FLAG_TST) || ((FLAG) == ETH_DMA_FLAG_PMT) || \ + ((FLAG) == ETH_DMA_FLAG_MMC) || ((FLAG) == ETH_DMA_FLAG_DATATRANSFERERROR) || \ + ((FLAG) == ETH_DMA_FLAG_READWRITEERROR) || ((FLAG) == ETH_DMA_FLAG_ACCESSERROR) || \ + ((FLAG) == ETH_DMA_FLAG_NIS) || ((FLAG) == ETH_DMA_FLAG_AIS) || \ + ((FLAG) == ETH_DMA_FLAG_ER) || ((FLAG) == ETH_DMA_FLAG_FBE) || \ + ((FLAG) == ETH_DMA_FLAG_ET) || ((FLAG) == ETH_DMA_FLAG_RWT) || \ + ((FLAG) == ETH_DMA_FLAG_RPS) || ((FLAG) == ETH_DMA_FLAG_RBU) || \ + ((FLAG) == ETH_DMA_FLAG_R) || ((FLAG) == ETH_DMA_FLAG_TU) || \ + ((FLAG) == ETH_DMA_FLAG_RO) || ((FLAG) == ETH_DMA_FLAG_TJT) || \ + ((FLAG) == ETH_DMA_FLAG_TBU) || ((FLAG) == ETH_DMA_FLAG_TPS) || \ + ((FLAG) == ETH_DMA_FLAG_T)) +#define IS_ETH_MAC_IT(IT) ((((IT) & (uint32_t)0xFFFFFDF1) == 0x00) && ((IT) != 0x00)) +#define IS_ETH_MAC_GET_IT(IT) (((IT) == ETH_MAC_IT_TST) || ((IT) == ETH_MAC_IT_MMCT) || \ + ((IT) == ETH_MAC_IT_MMCR) || ((IT) == ETH_MAC_IT_MMC) || \ + ((IT) == ETH_MAC_IT_PMT)) +#define IS_ETH_MAC_GET_FLAG(FLAG) (((FLAG) == ETH_MAC_FLAG_TST) || ((FLAG) == ETH_MAC_FLAG_MMCT) || \ + ((FLAG) == ETH_MAC_FLAG_MMCR) || ((FLAG) == ETH_MAC_FLAG_MMC) || \ + ((FLAG) == ETH_MAC_FLAG_PMT)) +#define IS_ETH_DMA_IT(IT) ((((IT) & (uint32_t)0xC7FE1800) == 0x00) && ((IT) != 0x00)) +#define IS_ETH_DMA_GET_IT(IT) (((IT) == ETH_DMA_IT_TST) || ((IT) == ETH_DMA_IT_PMT) || \ + ((IT) == ETH_DMA_IT_MMC) || ((IT) == ETH_DMA_IT_NIS) || \ + ((IT) == ETH_DMA_IT_AIS) || ((IT) == ETH_DMA_IT_ER) || \ + ((IT) == ETH_DMA_IT_FBE) || ((IT) == ETH_DMA_IT_ET) || \ + ((IT) == ETH_DMA_IT_RWT) || ((IT) == ETH_DMA_IT_RPS) || \ + ((IT) == ETH_DMA_IT_RBU) || ((IT) == ETH_DMA_IT_R) || \ + ((IT) == ETH_DMA_IT_TU) || ((IT) == ETH_DMA_IT_RO) || \ + ((IT) == ETH_DMA_IT_TJT) || ((IT) == ETH_DMA_IT_TBU) || \ + ((IT) == ETH_DMA_IT_TPS) || ((IT) == ETH_DMA_IT_T)) +#define IS_ETH_DMA_GET_OVERFLOW(OVERFLOW) (((OVERFLOW) == ETH_DMA_OVERFLOW_RXFIFOCOUNTER) || \ + ((OVERFLOW) == ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER)) +#define IS_ETH_MMC_IT(IT) (((((IT) & (uint32_t)0xFFDF3FFF) == 0x00) || (((IT) & (uint32_t)0xEFFDFF9F) == 0x00)) && \ + ((IT) != 0x00)) +#define IS_ETH_MMC_GET_IT(IT) (((IT) == ETH_MMC_IT_TGF) || ((IT) == ETH_MMC_IT_TGFMSC) || \ + ((IT) == ETH_MMC_IT_TGFSC) || ((IT) == ETH_MMC_IT_RGUF) || \ + ((IT) == ETH_MMC_IT_RFAE) || ((IT) == ETH_MMC_IT_RFCE)) +#define IS_ETH_ENHANCED_DESCRIPTOR_FORMAT(CMD) (((CMD) == ETH_DMAENHANCEDDESCRIPTOR_ENABLE) || \ + ((CMD) == ETH_DMAENHANCEDDESCRIPTOR_DISABLE)) + + +/** + * @} + */ + +/** @addtogroup ETH_Private_Defines + * @{ + */ +/* Delay to wait when writing to some Ethernet registers */ +#define ETH_REG_WRITE_DELAY ((uint32_t)0x00000001U) + +/* Ethernet Errors */ +#define ETH_SUCCESS ((uint32_t)0U) +#define ETH_ERROR ((uint32_t)1U) + +/* Ethernet DMA Tx descriptors Collision Count Shift */ +#define ETH_DMATXDESC_COLLISION_COUNTSHIFT ((uint32_t)3U) + +/* Ethernet DMA Tx descriptors Buffer2 Size Shift */ +#define ETH_DMATXDESC_BUFFER2_SIZESHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Frame Length Shift */ +#define ETH_DMARXDESC_FRAME_LENGTHSHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Buffer2 Size Shift */ +#define ETH_DMARXDESC_BUFFER2_SIZESHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Frame length Shift */ +#define ETH_DMARXDESC_FRAMELENGTHSHIFT ((uint32_t)16) + +/* Ethernet MAC address offsets */ +#define ETH_MAC_ADDR_HBASE (uint32_t)(ETH_MAC_BASE + (uint32_t)0x40U) /* Ethernet MAC address high offset */ +#define ETH_MAC_ADDR_LBASE (uint32_t)(ETH_MAC_BASE + (uint32_t)0x44U) /* Ethernet MAC address low offset */ + +/* Ethernet MACMIIAR register Mask */ +#define ETH_MACMIIAR_CR_MASK ((uint32_t)0xFFFFFFE3U) + +/* Ethernet MACCR register Mask */ +#define ETH_MACCR_CLEAR_MASK ((uint32_t)0xFF20810FU) + +/* Ethernet MACFCR register Mask */ +#define ETH_MACFCR_CLEAR_MASK ((uint32_t)0x0000FF41U) + +/* Ethernet DMAOMR register Mask */ +#define ETH_DMAOMR_CLEAR_MASK ((uint32_t)0xF8DE3F23U) + +/* Ethernet Remote Wake-up frame register length */ +#define ETH_WAKEUP_REGISTER_LENGTH 8U + +/* Ethernet Missed frames counter Shift */ +#define ETH_DMA_RX_OVERFLOW_MISSEDFRAMES_COUNTERSHIFT 17U + /** + * @} + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup ETH_Exported_Types ETH Exported Types + * @{ + */ + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_ETH_STATE_RESET = 0x00U, /*!< Peripheral not yet Initialized or disabled */ + HAL_ETH_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_ETH_STATE_BUSY = 0x02U, /*!< an internal process is ongoing */ + HAL_ETH_STATE_BUSY_TX = 0x12U, /*!< Data Transmission process is ongoing */ + HAL_ETH_STATE_BUSY_RX = 0x22U, /*!< Data Reception process is ongoing */ + HAL_ETH_STATE_BUSY_TX_RX = 0x32U, /*!< Data Transmission and Reception process is ongoing */ + HAL_ETH_STATE_BUSY_WR = 0x42U, /*!< Write process is ongoing */ + HAL_ETH_STATE_BUSY_RD = 0x82U, /*!< Read process is ongoing */ + HAL_ETH_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_ETH_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ +}HAL_ETH_StateTypeDef; + +/** + * @brief ETH Init Structure definition + */ + +typedef struct +{ + uint32_t AutoNegotiation; /*!< Selects or not the AutoNegotiation mode for the external PHY + The AutoNegotiation allows an automatic setting of the Speed (10/100Mbps) + and the mode (half/full-duplex). + This parameter can be a value of @ref ETH_AutoNegotiation */ + + uint32_t Speed; /*!< Sets the Ethernet speed: 10/100 Mbps. + This parameter can be a value of @ref ETH_Speed */ + + uint32_t DuplexMode; /*!< Selects the MAC duplex mode: Half-Duplex or Full-Duplex mode + This parameter can be a value of @ref ETH_Duplex_Mode */ + + uint16_t PhyAddress; /*!< Ethernet PHY address. + This parameter must be a number between Min_Data = 0 and Max_Data = 32 */ + + uint8_t *MACAddr; /*!< MAC Address of used Hardware: must be pointer on an array of 6 bytes */ + + uint32_t RxMode; /*!< Selects the Ethernet Rx mode: Polling mode, Interrupt mode. + This parameter can be a value of @ref ETH_Rx_Mode */ + + uint32_t ChecksumMode; /*!< Selects if the checksum is check by hardware or by software. + This parameter can be a value of @ref ETH_Checksum_Mode */ + + uint32_t MediaInterface ; /*!< Selects the media-independent interface or the reduced media-independent interface. + This parameter can be a value of @ref ETH_Media_Interface */ + +} ETH_InitTypeDef; + + + /** + * @brief ETH MAC Configuration Structure definition + */ + +typedef struct +{ + uint32_t Watchdog; /*!< Selects or not the Watchdog timer + When enabled, the MAC allows no more then 2048 bytes to be received. + When disabled, the MAC can receive up to 16384 bytes. + This parameter can be a value of @ref ETH_Watchdog */ + + uint32_t Jabber; /*!< Selects or not Jabber timer + When enabled, the MAC allows no more then 2048 bytes to be sent. + When disabled, the MAC can send up to 16384 bytes. + This parameter can be a value of @ref ETH_Jabber */ + + uint32_t InterFrameGap; /*!< Selects the minimum IFG between frames during transmission. + This parameter can be a value of @ref ETH_Inter_Frame_Gap */ + + uint32_t CarrierSense; /*!< Selects or not the Carrier Sense. + This parameter can be a value of @ref ETH_Carrier_Sense */ + + uint32_t ReceiveOwn; /*!< Selects or not the ReceiveOwn, + ReceiveOwn allows the reception of frames when the TX_EN signal is asserted + in Half-Duplex mode. + This parameter can be a value of @ref ETH_Receive_Own */ + + uint32_t LoopbackMode; /*!< Selects or not the internal MAC MII Loopback mode. + This parameter can be a value of @ref ETH_Loop_Back_Mode */ + + uint32_t ChecksumOffload; /*!< Selects or not the IPv4 checksum checking for received frame payloads' TCP/UDP/ICMP headers. + This parameter can be a value of @ref ETH_Checksum_Offload */ + + uint32_t RetryTransmission; /*!< Selects or not the MAC attempt retries transmission, based on the settings of BL, + when a collision occurs (Half-Duplex mode). + This parameter can be a value of @ref ETH_Retry_Transmission */ + + uint32_t AutomaticPadCRCStrip; /*!< Selects or not the Automatic MAC Pad/CRC Stripping. + This parameter can be a value of @ref ETH_Automatic_Pad_CRC_Strip */ + + uint32_t BackOffLimit; /*!< Selects the BackOff limit value. + This parameter can be a value of @ref ETH_Back_Off_Limit */ + + uint32_t DeferralCheck; /*!< Selects or not the deferral check function (Half-Duplex mode). + This parameter can be a value of @ref ETH_Deferral_Check */ + + uint32_t ReceiveAll; /*!< Selects or not all frames reception by the MAC (No filtering). + This parameter can be a value of @ref ETH_Receive_All */ + + uint32_t SourceAddrFilter; /*!< Selects the Source Address Filter mode. + This parameter can be a value of @ref ETH_Source_Addr_Filter */ + + uint32_t PassControlFrames; /*!< Sets the forwarding mode of the control frames (including unicast and multicast PAUSE frames) + This parameter can be a value of @ref ETH_Pass_Control_Frames */ + + uint32_t BroadcastFramesReception; /*!< Selects or not the reception of Broadcast Frames. + This parameter can be a value of @ref ETH_Broadcast_Frames_Reception */ + + uint32_t DestinationAddrFilter; /*!< Sets the destination filter mode for both unicast and multicast frames. + This parameter can be a value of @ref ETH_Destination_Addr_Filter */ + + uint32_t PromiscuousMode; /*!< Selects or not the Promiscuous Mode + This parameter can be a value of @ref ETH_Promiscuous_Mode */ + + uint32_t MulticastFramesFilter; /*!< Selects the Multicast Frames filter mode: None/HashTableFilter/PerfectFilter/PerfectHashTableFilter. + This parameter can be a value of @ref ETH_Multicast_Frames_Filter */ + + uint32_t UnicastFramesFilter; /*!< Selects the Unicast Frames filter mode: HashTableFilter/PerfectFilter/PerfectHashTableFilter. + This parameter can be a value of @ref ETH_Unicast_Frames_Filter */ + + uint32_t HashTableHigh; /*!< This field holds the higher 32 bits of Hash table. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFFFFFF */ + + uint32_t HashTableLow; /*!< This field holds the lower 32 bits of Hash table. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFFFFFF */ + + uint32_t PauseTime; /*!< This field holds the value to be used in the Pause Time field in the transmit control frame. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFF */ + + uint32_t ZeroQuantaPause; /*!< Selects or not the automatic generation of Zero-Quanta Pause Control frames. + This parameter can be a value of @ref ETH_Zero_Quanta_Pause */ + + uint32_t PauseLowThreshold; /*!< This field configures the threshold of the PAUSE to be checked for + automatic retransmission of PAUSE Frame. + This parameter can be a value of @ref ETH_Pause_Low_Threshold */ + + uint32_t UnicastPauseFrameDetect; /*!< Selects or not the MAC detection of the Pause frames (with MAC Address0 + unicast address and unique multicast address). + This parameter can be a value of @ref ETH_Unicast_Pause_Frame_Detect */ + + uint32_t ReceiveFlowControl; /*!< Enables or disables the MAC to decode the received Pause frame and + disable its transmitter for a specified time (Pause Time) + This parameter can be a value of @ref ETH_Receive_Flow_Control */ + + uint32_t TransmitFlowControl; /*!< Enables or disables the MAC to transmit Pause frames (Full-Duplex mode) + or the MAC back-pressure operation (Half-Duplex mode) + This parameter can be a value of @ref ETH_Transmit_Flow_Control */ + + uint32_t VLANTagComparison; /*!< Selects the 12-bit VLAN identifier or the complete 16-bit VLAN tag for + comparison and filtering. + This parameter can be a value of @ref ETH_VLAN_Tag_Comparison */ + + uint32_t VLANTagIdentifier; /*!< Holds the VLAN tag identifier for receive frames */ + +} ETH_MACInitTypeDef; + + +/** + * @brief ETH DMA Configuration Structure definition + */ + +typedef struct +{ + uint32_t DropTCPIPChecksumErrorFrame; /*!< Selects or not the Dropping of TCP/IP Checksum Error Frames. + This parameter can be a value of @ref ETH_Drop_TCP_IP_Checksum_Error_Frame */ + + uint32_t ReceiveStoreForward; /*!< Enables or disables the Receive store and forward mode. + This parameter can be a value of @ref ETH_Receive_Store_Forward */ + + uint32_t FlushReceivedFrame; /*!< Enables or disables the flushing of received frames. + This parameter can be a value of @ref ETH_Flush_Received_Frame */ + + uint32_t TransmitStoreForward; /*!< Enables or disables Transmit store and forward mode. + This parameter can be a value of @ref ETH_Transmit_Store_Forward */ + + uint32_t TransmitThresholdControl; /*!< Selects or not the Transmit Threshold Control. + This parameter can be a value of @ref ETH_Transmit_Threshold_Control */ + + uint32_t ForwardErrorFrames; /*!< Selects or not the forward to the DMA of erroneous frames. + This parameter can be a value of @ref ETH_Forward_Error_Frames */ + + uint32_t ForwardUndersizedGoodFrames; /*!< Enables or disables the Rx FIFO to forward Undersized frames (frames with no Error + and length less than 64 bytes) including pad-bytes and CRC) + This parameter can be a value of @ref ETH_Forward_Undersized_Good_Frames */ + + uint32_t ReceiveThresholdControl; /*!< Selects the threshold level of the Receive FIFO. + This parameter can be a value of @ref ETH_Receive_Threshold_Control */ + + uint32_t SecondFrameOperate; /*!< Selects or not the Operate on second frame mode, which allows the DMA to process a second + frame of Transmit data even before obtaining the status for the first frame. + This parameter can be a value of @ref ETH_Second_Frame_Operate */ + + uint32_t AddressAlignedBeats; /*!< Enables or disables the Address Aligned Beats. + This parameter can be a value of @ref ETH_Address_Aligned_Beats */ + + uint32_t FixedBurst; /*!< Enables or disables the AHB Master interface fixed burst transfers. + This parameter can be a value of @ref ETH_Fixed_Burst */ + + uint32_t RxDMABurstLength; /*!< Indicates the maximum number of beats to be transferred in one Rx DMA transaction. + This parameter can be a value of @ref ETH_Rx_DMA_Burst_Length */ + + uint32_t TxDMABurstLength; /*!< Indicates the maximum number of beats to be transferred in one Tx DMA transaction. + This parameter can be a value of @ref ETH_Tx_DMA_Burst_Length */ + + uint32_t EnhancedDescriptorFormat; /*!< Enables the enhanced descriptor format. + This parameter can be a value of @ref ETH_DMA_Enhanced_descriptor_format */ + + uint32_t DescriptorSkipLength; /*!< Specifies the number of word to skip between two unchained descriptors (Ring mode) + This parameter must be a number between Min_Data = 0 and Max_Data = 32 */ + + uint32_t DMAArbitration; /*!< Selects the DMA Tx/Rx arbitration. + This parameter can be a value of @ref ETH_DMA_Arbitration */ +} ETH_DMAInitTypeDef; + + +/** + * @brief ETH DMA Descriptors data structure definition + */ + +typedef struct +{ + __IO uint32_t Status; /*!< Status */ + + uint32_t ControlBufferSize; /*!< Control and Buffer1, Buffer2 lengths */ + + uint32_t Buffer1Addr; /*!< Buffer1 address pointer */ + + uint32_t Buffer2NextDescAddr; /*!< Buffer2 or next descriptor address pointer */ + + /*!< Enhanced Ethernet DMA PTP Descriptors */ + uint32_t ExtendedStatus; /*!< Extended status for PTP receive descriptor */ + + uint32_t Reserved1; /*!< Reserved */ + + uint32_t TimeStampLow; /*!< Time Stamp Low value for transmit and receive */ + + uint32_t TimeStampHigh; /*!< Time Stamp High value for transmit and receive */ + +} ETH_DMADescTypeDef; + + +/** + * @brief Received Frame Informations structure definition + */ +typedef struct +{ + ETH_DMADescTypeDef *FSRxDesc; /*!< First Segment Rx Desc */ + + ETH_DMADescTypeDef *LSRxDesc; /*!< Last Segment Rx Desc */ + + uint32_t SegCount; /*!< Segment count */ + + uint32_t length; /*!< Frame length */ + + uint32_t buffer; /*!< Frame buffer */ + +} ETH_DMARxFrameInfos; + + +/** + * @brief ETH Handle Structure definition + */ + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +typedef struct __ETH_HandleTypeDef +#else +typedef struct +#endif +{ + ETH_TypeDef *Instance; /*!< Register base address */ + + ETH_InitTypeDef Init; /*!< Ethernet Init Configuration */ + + uint32_t LinkStatus; /*!< Ethernet link status */ + + ETH_DMADescTypeDef *RxDesc; /*!< Rx descriptor to Get */ + + ETH_DMADescTypeDef *TxDesc; /*!< Tx descriptor to Set */ + + ETH_DMARxFrameInfos RxFrameInfos; /*!< last Rx frame infos */ + + __IO HAL_ETH_StateTypeDef State; /*!< ETH communication state */ + + HAL_LockTypeDef Lock; /*!< ETH Lock */ + + #if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + void (* TxCpltCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Tx Complete Callback */ + void (* RxCpltCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Rx Complete Callback */ + void (* DMAErrorCallback) ( struct __ETH_HandleTypeDef * heth); /*!< DMA Error Callback */ + void (* MspInitCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Msp Init callback */ + void (* MspDeInitCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Msp DeInit callback */ + +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +} ETH_HandleTypeDef; + + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief HAL ETH Callback ID enumeration definition + */ +typedef enum +{ + HAL_ETH_MSPINIT_CB_ID = 0x00U, /*!< ETH MspInit callback ID */ + HAL_ETH_MSPDEINIT_CB_ID = 0x01U, /*!< ETH MspDeInit callback ID */ + HAL_ETH_TX_COMPLETE_CB_ID = 0x02U, /*!< ETH Tx Complete Callback ID */ + HAL_ETH_RX_COMPLETE_CB_ID = 0x03U, /*!< ETH Rx Complete Callback ID */ + HAL_ETH_DMA_ERROR_CB_ID = 0x04U, /*!< ETH DMA Error Callback ID */ + +}HAL_ETH_CallbackIDTypeDef; + +/** + * @brief HAL ETH Callback pointer definition + */ +typedef void (*pETH_CallbackTypeDef)(ETH_HandleTypeDef * heth); /*!< pointer to an ETH callback function */ + +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup ETH_Exported_Constants ETH Exported Constants + * @{ + */ + +/** @defgroup ETH_Buffers_setting ETH Buffers setting + * @{ + */ +#define ETH_MAX_PACKET_SIZE ((uint32_t)1524U) /*!< ETH_HEADER + ETH_EXTRA + ETH_VLAN_TAG + ETH_MAX_ETH_PAYLOAD + ETH_CRC */ +#define ETH_HEADER ((uint32_t)14U) /*!< 6 byte Dest addr, 6 byte Src addr, 2 byte length/type */ +#define ETH_CRC ((uint32_t)4U) /*!< Ethernet CRC */ +#define ETH_EXTRA ((uint32_t)2U) /*!< Extra bytes in some cases */ +#define ETH_VLAN_TAG ((uint32_t)4U) /*!< optional 802.1q VLAN Tag */ +#define ETH_MIN_ETH_PAYLOAD ((uint32_t)46U) /*!< Minimum Ethernet payload size */ +#define ETH_MAX_ETH_PAYLOAD ((uint32_t)1500U) /*!< Maximum Ethernet payload size */ +#define ETH_JUMBO_FRAME_PAYLOAD ((uint32_t)9000U) /*!< Jumbo frame payload size */ + + /* Ethernet driver receive buffers are organized in a chained linked-list, when + an Ethernet packet is received, the Rx-DMA will transfer the packet from RxFIFO + to the driver receive buffers memory. + + Depending on the size of the received Ethernet packet and the size of + each Ethernet driver receive buffer, the received packet can take one or more + Ethernet driver receive buffer. + + In below are defined the size of one Ethernet driver receive buffer ETH_RX_BUF_SIZE + and the total count of the driver receive buffers ETH_RXBUFNB. + + The configured value for ETH_RX_BUF_SIZE and ETH_RXBUFNB are only provided as + example, they can be reconfigured in the application layer to fit the application + needs */ + +/* Here we configure each Ethernet driver receive buffer to fit the Max size Ethernet + packet */ +#ifndef ETH_RX_BUF_SIZE + #define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE +#endif + +/* 5 Ethernet driver receive buffers are used (in a chained linked list)*/ +#ifndef ETH_RXBUFNB + #define ETH_RXBUFNB ((uint32_t)5U) /* 5 Rx buffers of size ETH_RX_BUF_SIZE */ +#endif + + + /* Ethernet driver transmit buffers are organized in a chained linked-list, when + an Ethernet packet is transmitted, Tx-DMA will transfer the packet from the + driver transmit buffers memory to the TxFIFO. + + Depending on the size of the Ethernet packet to be transmitted and the size of + each Ethernet driver transmit buffer, the packet to be transmitted can take + one or more Ethernet driver transmit buffer. + + In below are defined the size of one Ethernet driver transmit buffer ETH_TX_BUF_SIZE + and the total count of the driver transmit buffers ETH_TXBUFNB. + + The configured value for ETH_TX_BUF_SIZE and ETH_TXBUFNB are only provided as + example, they can be reconfigured in the application layer to fit the application + needs */ + +/* Here we configure each Ethernet driver transmit buffer to fit the Max size Ethernet + packet */ +#ifndef ETH_TX_BUF_SIZE + #define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE +#endif + +/* 5 Ethernet driver transmit buffers are used (in a chained linked list)*/ +#ifndef ETH_TXBUFNB + #define ETH_TXBUFNB ((uint32_t)5U) /* 5 Tx buffers of size ETH_TX_BUF_SIZE */ +#endif + + /** + * @} + */ + +/** @defgroup ETH_DMA_TX_Descriptor ETH DMA TX Descriptor + * @{ + */ + +/* + DMA Tx Descriptor + ----------------------------------------------------------------------------------------------- + TDES0 | OWN(31) | CTRL[30:26] | Reserved[25:24] | CTRL[23:20] | Reserved[19:17] | Status[16:0] | + ----------------------------------------------------------------------------------------------- + TDES1 | Reserved[31:29] | Buffer2 ByteCount[28:16] | Reserved[15:13] | Buffer1 ByteCount[12:0] | + ----------------------------------------------------------------------------------------------- + TDES2 | Buffer1 Address [31:0] | + ----------------------------------------------------------------------------------------------- + TDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] | + ----------------------------------------------------------------------------------------------- +*/ + +/** + * @brief Bit definition of TDES0 register: DMA Tx descriptor status register + */ +#define ETH_DMATXDESC_OWN ((uint32_t)0x80000000U) /*!< OWN bit: descriptor is owned by DMA engine */ +#define ETH_DMATXDESC_IC ((uint32_t)0x40000000U) /*!< Interrupt on Completion */ +#define ETH_DMATXDESC_LS ((uint32_t)0x20000000U) /*!< Last Segment */ +#define ETH_DMATXDESC_FS ((uint32_t)0x10000000U) /*!< First Segment */ +#define ETH_DMATXDESC_DC ((uint32_t)0x08000000U) /*!< Disable CRC */ +#define ETH_DMATXDESC_DP ((uint32_t)0x04000000U) /*!< Disable Padding */ +#define ETH_DMATXDESC_TTSE ((uint32_t)0x02000000U) /*!< Transmit Time Stamp Enable */ +#define ETH_DMATXDESC_CIC ((uint32_t)0x00C00000U) /*!< Checksum Insertion Control: 4 cases */ +#define ETH_DMATXDESC_CIC_BYPASS ((uint32_t)0x00000000U) /*!< Do Nothing: Checksum Engine is bypassed */ +#define ETH_DMATXDESC_CIC_IPV4HEADER ((uint32_t)0x00400000U) /*!< IPV4 header Checksum Insertion */ +#define ETH_DMATXDESC_CIC_TCPUDPICMP_SEGMENT ((uint32_t)0x00800000U) /*!< TCP/UDP/ICMP Checksum Insertion calculated over segment only */ +#define ETH_DMATXDESC_CIC_TCPUDPICMP_FULL ((uint32_t)0x00C00000U) /*!< TCP/UDP/ICMP Checksum Insertion fully calculated */ +#define ETH_DMATXDESC_TER ((uint32_t)0x00200000U) /*!< Transmit End of Ring */ +#define ETH_DMATXDESC_TCH ((uint32_t)0x00100000U) /*!< Second Address Chained */ +#define ETH_DMATXDESC_TTSS ((uint32_t)0x00020000U) /*!< Tx Time Stamp Status */ +#define ETH_DMATXDESC_IHE ((uint32_t)0x00010000U) /*!< IP Header Error */ +#define ETH_DMATXDESC_ES ((uint32_t)0x00008000U) /*!< Error summary: OR of the following bits: UE || ED || EC || LCO || NC || LCA || FF || JT */ +#define ETH_DMATXDESC_JT ((uint32_t)0x00004000U) /*!< Jabber Timeout */ +#define ETH_DMATXDESC_FF ((uint32_t)0x00002000U) /*!< Frame Flushed: DMA/MTL flushed the frame due to SW flush */ +#define ETH_DMATXDESC_PCE ((uint32_t)0x00001000U) /*!< Payload Checksum Error */ +#define ETH_DMATXDESC_LCA ((uint32_t)0x00000800U) /*!< Loss of Carrier: carrier lost during transmission */ +#define ETH_DMATXDESC_NC ((uint32_t)0x00000400U) /*!< No Carrier: no carrier signal from the transceiver */ +#define ETH_DMATXDESC_LCO ((uint32_t)0x00000200U) /*!< Late Collision: transmission aborted due to collision */ +#define ETH_DMATXDESC_EC ((uint32_t)0x00000100U) /*!< Excessive Collision: transmission aborted after 16 collisions */ +#define ETH_DMATXDESC_VF ((uint32_t)0x00000080U) /*!< VLAN Frame */ +#define ETH_DMATXDESC_CC ((uint32_t)0x00000078U) /*!< Collision Count */ +#define ETH_DMATXDESC_ED ((uint32_t)0x00000004U) /*!< Excessive Deferral */ +#define ETH_DMATXDESC_UF ((uint32_t)0x00000002U) /*!< Underflow Error: late data arrival from the memory */ +#define ETH_DMATXDESC_DB ((uint32_t)0x00000001U) /*!< Deferred Bit */ + +/** + * @brief Bit definition of TDES1 register + */ +#define ETH_DMATXDESC_TBS2 ((uint32_t)0x1FFF0000U) /*!< Transmit Buffer2 Size */ +#define ETH_DMATXDESC_TBS1 ((uint32_t)0x00001FFFU) /*!< Transmit Buffer1 Size */ + +/** + * @brief Bit definition of TDES2 register + */ +#define ETH_DMATXDESC_B1AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer1 Address Pointer */ + +/** + * @brief Bit definition of TDES3 register + */ +#define ETH_DMATXDESC_B2AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer2 Address Pointer */ + + /*--------------------------------------------------------------------------------------------- + TDES6 | Transmit Time Stamp Low [31:0] | + ----------------------------------------------------------------------------------------------- + TDES7 | Transmit Time Stamp High [31:0] | + ----------------------------------------------------------------------------------------------*/ + +/* Bit definition of TDES6 register */ + #define ETH_DMAPTPTXDESC_TTSL ((uint32_t)0xFFFFFFFFU) /* Transmit Time Stamp Low */ + +/* Bit definition of TDES7 register */ + #define ETH_DMAPTPTXDESC_TTSH ((uint32_t)0xFFFFFFFFU) /* Transmit Time Stamp High */ + +/** + * @} + */ +/** @defgroup ETH_DMA_RX_Descriptor ETH DMA RX Descriptor + * @{ + */ + +/* + DMA Rx Descriptor + -------------------------------------------------------------------------------------------------------------------- + RDES0 | OWN(31) | Status [30:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES1 | CTRL(31) | Reserved[30:29] | Buffer2 ByteCount[28:16] | CTRL[15:14] | Reserved(13) | Buffer1 ByteCount[12:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES2 | Buffer1 Address [31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] | + --------------------------------------------------------------------------------------------------------------------- +*/ + +/** + * @brief Bit definition of RDES0 register: DMA Rx descriptor status register + */ +#define ETH_DMARXDESC_OWN ((uint32_t)0x80000000U) /*!< OWN bit: descriptor is owned by DMA engine */ +#define ETH_DMARXDESC_AFM ((uint32_t)0x40000000U) /*!< DA Filter Fail for the rx frame */ +#define ETH_DMARXDESC_FL ((uint32_t)0x3FFF0000U) /*!< Receive descriptor frame length */ +#define ETH_DMARXDESC_ES ((uint32_t)0x00008000U) /*!< Error summary: OR of the following bits: DE || OE || IPC || LC || RWT || RE || CE */ +#define ETH_DMARXDESC_DE ((uint32_t)0x00004000U) /*!< Descriptor error: no more descriptors for receive frame */ +#define ETH_DMARXDESC_SAF ((uint32_t)0x00002000U) /*!< SA Filter Fail for the received frame */ +#define ETH_DMARXDESC_LE ((uint32_t)0x00001000U) /*!< Frame size not matching with length field */ +#define ETH_DMARXDESC_OE ((uint32_t)0x00000800U) /*!< Overflow Error: Frame was damaged due to buffer overflow */ +#define ETH_DMARXDESC_VLAN ((uint32_t)0x00000400U) /*!< VLAN Tag: received frame is a VLAN frame */ +#define ETH_DMARXDESC_FS ((uint32_t)0x00000200U) /*!< First descriptor of the frame */ +#define ETH_DMARXDESC_LS ((uint32_t)0x00000100U) /*!< Last descriptor of the frame */ +#define ETH_DMARXDESC_IPV4HCE ((uint32_t)0x00000080U) /*!< IPC Checksum Error: Rx Ipv4 header checksum error */ +#define ETH_DMARXDESC_LC ((uint32_t)0x00000040U) /*!< Late collision occurred during reception */ +#define ETH_DMARXDESC_FT ((uint32_t)0x00000020U) /*!< Frame type - Ethernet, otherwise 802.3 */ +#define ETH_DMARXDESC_RWT ((uint32_t)0x00000010U) /*!< Receive Watchdog Timeout: watchdog timer expired during reception */ +#define ETH_DMARXDESC_RE ((uint32_t)0x00000008U) /*!< Receive error: error reported by MII interface */ +#define ETH_DMARXDESC_DBE ((uint32_t)0x00000004U) /*!< Dribble bit error: frame contains non int multiple of 8 bits */ +#define ETH_DMARXDESC_CE ((uint32_t)0x00000002U) /*!< CRC error */ +#define ETH_DMARXDESC_MAMPCE ((uint32_t)0x00000001U) /*!< Rx MAC Address/Payload Checksum Error: Rx MAC address matched/ Rx Payload Checksum Error */ + +/** + * @brief Bit definition of RDES1 register + */ +#define ETH_DMARXDESC_DIC ((uint32_t)0x80000000U) /*!< Disable Interrupt on Completion */ +#define ETH_DMARXDESC_RBS2 ((uint32_t)0x1FFF0000U) /*!< Receive Buffer2 Size */ +#define ETH_DMARXDESC_RER ((uint32_t)0x00008000U) /*!< Receive End of Ring */ +#define ETH_DMARXDESC_RCH ((uint32_t)0x00004000U) /*!< Second Address Chained */ +#define ETH_DMARXDESC_RBS1 ((uint32_t)0x00001FFFU) /*!< Receive Buffer1 Size */ + +/** + * @brief Bit definition of RDES2 register + */ +#define ETH_DMARXDESC_B1AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer1 Address Pointer */ + +/** + * @brief Bit definition of RDES3 register + */ +#define ETH_DMARXDESC_B2AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer2 Address Pointer */ + +/*--------------------------------------------------------------------------------------------------------------------- + RDES4 | Reserved[31:15] | Extended Status [14:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES5 | Reserved[31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES6 | Receive Time Stamp Low [31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES7 | Receive Time Stamp High [31:0] | + --------------------------------------------------------------------------------------------------------------------*/ + +/* Bit definition of RDES4 register */ +#define ETH_DMAPTPRXDESC_PTPV ((uint32_t)0x00002000U) /* PTP Version */ +#define ETH_DMAPTPRXDESC_PTPFT ((uint32_t)0x00001000U) /* PTP Frame Type */ +#define ETH_DMAPTPRXDESC_PTPMT ((uint32_t)0x00000F00U) /* PTP Message Type */ +#define ETH_DMAPTPRXDESC_PTPMT_SYNC ((uint32_t)0x00000100U) /* SYNC message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_FOLLOWUP ((uint32_t)0x00000200U) /* FollowUp message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_DELAYREQ ((uint32_t)0x00000300U) /* DelayReq message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_DELAYRESP ((uint32_t)0x00000400U) /* DelayResp message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYREQ_ANNOUNCE ((uint32_t)0x00000500U) /* PdelayReq message (peer-to-peer transparent clock) or Announce message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYRESP_MANAG ((uint32_t)0x00000600U) /* PdelayResp message (peer-to-peer transparent clock) or Management message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYRESPFOLLOWUP_SIGNAL ((uint32_t)0x00000700U) /* PdelayRespFollowUp message (peer-to-peer transparent clock) or Signaling message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_IPV6PR ((uint32_t)0x00000080U) /* IPv6 Packet Received */ +#define ETH_DMAPTPRXDESC_IPV4PR ((uint32_t)0x00000040U) /* IPv4 Packet Received */ +#define ETH_DMAPTPRXDESC_IPCB ((uint32_t)0x00000020U) /* IP Checksum Bypassed */ +#define ETH_DMAPTPRXDESC_IPPE ((uint32_t)0x00000010U) /* IP Payload Error */ +#define ETH_DMAPTPRXDESC_IPHE ((uint32_t)0x00000008U) /* IP Header Error */ +#define ETH_DMAPTPRXDESC_IPPT ((uint32_t)0x00000007U) /* IP Payload Type */ +#define ETH_DMAPTPRXDESC_IPPT_UDP ((uint32_t)0x00000001U) /* UDP payload encapsulated in the IP datagram */ +#define ETH_DMAPTPRXDESC_IPPT_TCP ((uint32_t)0x00000002U) /* TCP payload encapsulated in the IP datagram */ +#define ETH_DMAPTPRXDESC_IPPT_ICMP ((uint32_t)0x00000003U) /* ICMP payload encapsulated in the IP datagram */ + +/* Bit definition of RDES6 register */ +#define ETH_DMAPTPRXDESC_RTSL ((uint32_t)0xFFFFFFFFU) /* Receive Time Stamp Low */ + +/* Bit definition of RDES7 register */ +#define ETH_DMAPTPRXDESC_RTSH ((uint32_t)0xFFFFFFFFU) /* Receive Time Stamp High */ +/** + * @} + */ + /** @defgroup ETH_AutoNegotiation ETH AutoNegotiation + * @{ + */ +#define ETH_AUTONEGOTIATION_ENABLE ((uint32_t)0x00000001U) +#define ETH_AUTONEGOTIATION_DISABLE ((uint32_t)0x00000000U) + +/** + * @} + */ +/** @defgroup ETH_Speed ETH Speed + * @{ + */ +#define ETH_SPEED_10M ((uint32_t)0x00000000U) +#define ETH_SPEED_100M ((uint32_t)0x00004000U) + +/** + * @} + */ +/** @defgroup ETH_Duplex_Mode ETH Duplex Mode + * @{ + */ +#define ETH_MODE_FULLDUPLEX ((uint32_t)0x00000800U) +#define ETH_MODE_HALFDUPLEX ((uint32_t)0x00000000U) +/** + * @} + */ +/** @defgroup ETH_Rx_Mode ETH Rx Mode + * @{ + */ +#define ETH_RXPOLLING_MODE ((uint32_t)0x00000000U) +#define ETH_RXINTERRUPT_MODE ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup ETH_Checksum_Mode ETH Checksum Mode + * @{ + */ +#define ETH_CHECKSUM_BY_HARDWARE ((uint32_t)0x00000000U) +#define ETH_CHECKSUM_BY_SOFTWARE ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup ETH_Media_Interface ETH Media Interface + * @{ + */ +#define ETH_MEDIA_INTERFACE_MII ((uint32_t)0x00000000U) +#define ETH_MEDIA_INTERFACE_RMII ((uint32_t)SYSCFG_PMC_MII_RMII_SEL) +/** + * @} + */ + +/** @defgroup ETH_Watchdog ETH Watchdog + * @{ + */ +#define ETH_WATCHDOG_ENABLE ((uint32_t)0x00000000U) +#define ETH_WATCHDOG_DISABLE ((uint32_t)0x00800000U) +/** + * @} + */ + +/** @defgroup ETH_Jabber ETH Jabber + * @{ + */ +#define ETH_JABBER_ENABLE ((uint32_t)0x00000000U) +#define ETH_JABBER_DISABLE ((uint32_t)0x00400000U) +/** + * @} + */ + +/** @defgroup ETH_Inter_Frame_Gap ETH Inter Frame Gap + * @{ + */ +#define ETH_INTERFRAMEGAP_96BIT ((uint32_t)0x00000000U) /*!< minimum IFG between frames during transmission is 96Bit */ +#define ETH_INTERFRAMEGAP_88BIT ((uint32_t)0x00020000U) /*!< minimum IFG between frames during transmission is 88Bit */ +#define ETH_INTERFRAMEGAP_80BIT ((uint32_t)0x00040000U) /*!< minimum IFG between frames during transmission is 80Bit */ +#define ETH_INTERFRAMEGAP_72BIT ((uint32_t)0x00060000U) /*!< minimum IFG between frames during transmission is 72Bit */ +#define ETH_INTERFRAMEGAP_64BIT ((uint32_t)0x00080000U) /*!< minimum IFG between frames during transmission is 64Bit */ +#define ETH_INTERFRAMEGAP_56BIT ((uint32_t)0x000A0000U) /*!< minimum IFG between frames during transmission is 56Bit */ +#define ETH_INTERFRAMEGAP_48BIT ((uint32_t)0x000C0000U) /*!< minimum IFG between frames during transmission is 48Bit */ +#define ETH_INTERFRAMEGAP_40BIT ((uint32_t)0x000E0000U) /*!< minimum IFG between frames during transmission is 40Bit */ +/** + * @} + */ + +/** @defgroup ETH_Carrier_Sense ETH Carrier Sense + * @{ + */ +#define ETH_CARRIERSENCE_ENABLE ((uint32_t)0x00000000U) +#define ETH_CARRIERSENCE_DISABLE ((uint32_t)0x00010000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Own ETH Receive Own + * @{ + */ +#define ETH_RECEIVEOWN_ENABLE ((uint32_t)0x00000000U) +#define ETH_RECEIVEOWN_DISABLE ((uint32_t)0x00002000U) +/** + * @} + */ + +/** @defgroup ETH_Loop_Back_Mode ETH Loop Back Mode + * @{ + */ +#define ETH_LOOPBACKMODE_ENABLE ((uint32_t)0x00001000U) +#define ETH_LOOPBACKMODE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Checksum_Offload ETH Checksum Offload + * @{ + */ +#define ETH_CHECKSUMOFFLAOD_ENABLE ((uint32_t)0x00000400U) +#define ETH_CHECKSUMOFFLAOD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Retry_Transmission ETH Retry Transmission + * @{ + */ +#define ETH_RETRYTRANSMISSION_ENABLE ((uint32_t)0x00000000U) +#define ETH_RETRYTRANSMISSION_DISABLE ((uint32_t)0x00000200U) +/** + * @} + */ + +/** @defgroup ETH_Automatic_Pad_CRC_Strip ETH Automatic Pad CRC Strip + * @{ + */ +#define ETH_AUTOMATICPADCRCSTRIP_ENABLE ((uint32_t)0x00000080U) +#define ETH_AUTOMATICPADCRCSTRIP_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Back_Off_Limit ETH Back Off Limit + * @{ + */ +#define ETH_BACKOFFLIMIT_10 ((uint32_t)0x00000000U) +#define ETH_BACKOFFLIMIT_8 ((uint32_t)0x00000020U) +#define ETH_BACKOFFLIMIT_4 ((uint32_t)0x00000040U) +#define ETH_BACKOFFLIMIT_1 ((uint32_t)0x00000060U) +/** + * @} + */ + +/** @defgroup ETH_Deferral_Check ETH Deferral Check + * @{ + */ +#define ETH_DEFFERRALCHECK_ENABLE ((uint32_t)0x00000010U) +#define ETH_DEFFERRALCHECK_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_All ETH Receive All + * @{ + */ +#define ETH_RECEIVEALL_ENABLE ((uint32_t)0x80000000U) +#define ETH_RECEIVEAll_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Source_Addr_Filter ETH Source Addr Filter + * @{ + */ +#define ETH_SOURCEADDRFILTER_NORMAL_ENABLE ((uint32_t)0x00000200U) +#define ETH_SOURCEADDRFILTER_INVERSE_ENABLE ((uint32_t)0x00000300U) +#define ETH_SOURCEADDRFILTER_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Pass_Control_Frames ETH Pass Control Frames + * @{ + */ +#define ETH_PASSCONTROLFRAMES_BLOCKALL ((uint32_t)0x00000040U) /*!< MAC filters all control frames from reaching the application */ +#define ETH_PASSCONTROLFRAMES_FORWARDALL ((uint32_t)0x00000080U) /*!< MAC forwards all control frames to application even if they fail the Address Filter */ +#define ETH_PASSCONTROLFRAMES_FORWARDPASSEDADDRFILTER ((uint32_t)0x000000C0U) /*!< MAC forwards control frames that pass the Address Filter. */ +/** + * @} + */ + +/** @defgroup ETH_Broadcast_Frames_Reception ETH Broadcast Frames Reception + * @{ + */ +#define ETH_BROADCASTFRAMESRECEPTION_ENABLE ((uint32_t)0x00000000U) +#define ETH_BROADCASTFRAMESRECEPTION_DISABLE ((uint32_t)0x00000020U) +/** + * @} + */ + +/** @defgroup ETH_Destination_Addr_Filter ETH Destination Addr Filter + * @{ + */ +#define ETH_DESTINATIONADDRFILTER_NORMAL ((uint32_t)0x00000000U) +#define ETH_DESTINATIONADDRFILTER_INVERSE ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup ETH_Promiscuous_Mode ETH Promiscuous Mode + * @{ + */ +#define ETH_PROMISCUOUS_MODE_ENABLE ((uint32_t)0x00000001U) +#define ETH_PROMISCUOUS_MODE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Multicast_Frames_Filter ETH Multicast Frames Filter + * @{ + */ +#define ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE ((uint32_t)0x00000404U) +#define ETH_MULTICASTFRAMESFILTER_HASHTABLE ((uint32_t)0x00000004U) +#define ETH_MULTICASTFRAMESFILTER_PERFECT ((uint32_t)0x00000000U) +#define ETH_MULTICASTFRAMESFILTER_NONE ((uint32_t)0x00000010U) +/** + * @} + */ + +/** @defgroup ETH_Unicast_Frames_Filter ETH Unicast Frames Filter + * @{ + */ +#define ETH_UNICASTFRAMESFILTER_PERFECTHASHTABLE ((uint32_t)0x00000402U) +#define ETH_UNICASTFRAMESFILTER_HASHTABLE ((uint32_t)0x00000002U) +#define ETH_UNICASTFRAMESFILTER_PERFECT ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Zero_Quanta_Pause ETH Zero Quanta Pause + * @{ + */ +#define ETH_ZEROQUANTAPAUSE_ENABLE ((uint32_t)0x00000000U) +#define ETH_ZEROQUANTAPAUSE_DISABLE ((uint32_t)0x00000080U) +/** + * @} + */ + +/** @defgroup ETH_Pause_Low_Threshold ETH Pause Low Threshold + * @{ + */ +#define ETH_PAUSELOWTHRESHOLD_MINUS4 ((uint32_t)0x00000000U) /*!< Pause time minus 4 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS28 ((uint32_t)0x00000010U) /*!< Pause time minus 28 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS144 ((uint32_t)0x00000020U) /*!< Pause time minus 144 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS256 ((uint32_t)0x00000030U) /*!< Pause time minus 256 slot times */ +/** + * @} + */ + +/** @defgroup ETH_Unicast_Pause_Frame_Detect ETH Unicast Pause Frame Detect + * @{ + */ +#define ETH_UNICASTPAUSEFRAMEDETECT_ENABLE ((uint32_t)0x00000008U) +#define ETH_UNICASTPAUSEFRAMEDETECT_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Flow_Control ETH Receive Flow Control + * @{ + */ +#define ETH_RECEIVEFLOWCONTROL_ENABLE ((uint32_t)0x00000004U) +#define ETH_RECEIVEFLOWCONTROL_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Flow_Control ETH Transmit Flow Control + * @{ + */ +#define ETH_TRANSMITFLOWCONTROL_ENABLE ((uint32_t)0x00000002U) +#define ETH_TRANSMITFLOWCONTROL_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_VLAN_Tag_Comparison ETH VLAN Tag Comparison + * @{ + */ +#define ETH_VLANTAGCOMPARISON_12BIT ((uint32_t)0x00010000U) +#define ETH_VLANTAGCOMPARISON_16BIT ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses ETH MAC addresses + * @{ + */ +#define ETH_MAC_ADDRESS0 ((uint32_t)0x00000000U) +#define ETH_MAC_ADDRESS1 ((uint32_t)0x00000008U) +#define ETH_MAC_ADDRESS2 ((uint32_t)0x00000010U) +#define ETH_MAC_ADDRESS3 ((uint32_t)0x00000018U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses_filter_SA_DA ETH MAC addresses filter SA DA + * @{ + */ +#define ETH_MAC_ADDRESSFILTER_SA ((uint32_t)0x00000000U) +#define ETH_MAC_ADDRESSFILTER_DA ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses_filter_Mask_bytes ETH MAC addresses filter Mask bytes + * @{ + */ +#define ETH_MAC_ADDRESSMASK_BYTE6 ((uint32_t)0x20000000U) /*!< Mask MAC Address high reg bits [15:8] */ +#define ETH_MAC_ADDRESSMASK_BYTE5 ((uint32_t)0x10000000U) /*!< Mask MAC Address high reg bits [7:0] */ +#define ETH_MAC_ADDRESSMASK_BYTE4 ((uint32_t)0x08000000U) /*!< Mask MAC Address low reg bits [31:24] */ +#define ETH_MAC_ADDRESSMASK_BYTE3 ((uint32_t)0x04000000U) /*!< Mask MAC Address low reg bits [23:16] */ +#define ETH_MAC_ADDRESSMASK_BYTE2 ((uint32_t)0x02000000U) /*!< Mask MAC Address low reg bits [15:8] */ +#define ETH_MAC_ADDRESSMASK_BYTE1 ((uint32_t)0x01000000U) /*!< Mask MAC Address low reg bits [70] */ +/** + * @} + */ + +/** @defgroup ETH_Drop_TCP_IP_Checksum_Error_Frame ETH Drop TCP IP Checksum Error Frame + * @{ + */ +#define ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE ((uint32_t)0x00000000U) +#define ETH_DROPTCPIPCHECKSUMERRORFRAME_DISABLE ((uint32_t)0x04000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Store_Forward ETH Receive Store Forward + * @{ + */ +#define ETH_RECEIVESTOREFORWARD_ENABLE ((uint32_t)0x02000000U) +#define ETH_RECEIVESTOREFORWARD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Flush_Received_Frame ETH Flush Received Frame + * @{ + */ +#define ETH_FLUSHRECEIVEDFRAME_ENABLE ((uint32_t)0x00000000U) +#define ETH_FLUSHRECEIVEDFRAME_DISABLE ((uint32_t)0x01000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Store_Forward ETH Transmit Store Forward + * @{ + */ +#define ETH_TRANSMITSTOREFORWARD_ENABLE ((uint32_t)0x00200000U) +#define ETH_TRANSMITSTOREFORWARD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Threshold_Control ETH Transmit Threshold Control + * @{ + */ +#define ETH_TRANSMITTHRESHOLDCONTROL_64BYTES ((uint32_t)0x00000000U) /*!< threshold level of the MTL Transmit FIFO is 64 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_128BYTES ((uint32_t)0x00004000U) /*!< threshold level of the MTL Transmit FIFO is 128 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_192BYTES ((uint32_t)0x00008000U) /*!< threshold level of the MTL Transmit FIFO is 192 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_256BYTES ((uint32_t)0x0000C000U) /*!< threshold level of the MTL Transmit FIFO is 256 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_40BYTES ((uint32_t)0x00010000U) /*!< threshold level of the MTL Transmit FIFO is 40 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_32BYTES ((uint32_t)0x00014000U) /*!< threshold level of the MTL Transmit FIFO is 32 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_24BYTES ((uint32_t)0x00018000U) /*!< threshold level of the MTL Transmit FIFO is 24 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_16BYTES ((uint32_t)0x0001C000U) /*!< threshold level of the MTL Transmit FIFO is 16 Bytes */ +/** + * @} + */ + +/** @defgroup ETH_Forward_Error_Frames ETH Forward Error Frames + * @{ + */ +#define ETH_FORWARDERRORFRAMES_ENABLE ((uint32_t)0x00000080U) +#define ETH_FORWARDERRORFRAMES_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Forward_Undersized_Good_Frames ETH Forward Undersized Good Frames + * @{ + */ +#define ETH_FORWARDUNDERSIZEDGOODFRAMES_ENABLE ((uint32_t)0x00000040U) +#define ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Threshold_Control ETH Receive Threshold Control + * @{ + */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES ((uint32_t)0x00000000U) /*!< threshold level of the MTL Receive FIFO is 64 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_32BYTES ((uint32_t)0x00000008U) /*!< threshold level of the MTL Receive FIFO is 32 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_96BYTES ((uint32_t)0x00000010U) /*!< threshold level of the MTL Receive FIFO is 96 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_128BYTES ((uint32_t)0x00000018U) /*!< threshold level of the MTL Receive FIFO is 128 Bytes */ +/** + * @} + */ + +/** @defgroup ETH_Second_Frame_Operate ETH Second Frame Operate + * @{ + */ +#define ETH_SECONDFRAMEOPERARTE_ENABLE ((uint32_t)0x00000004U) +#define ETH_SECONDFRAMEOPERARTE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Address_Aligned_Beats ETH Address Aligned Beats + * @{ + */ +#define ETH_ADDRESSALIGNEDBEATS_ENABLE ((uint32_t)0x02000000U) +#define ETH_ADDRESSALIGNEDBEATS_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Fixed_Burst ETH Fixed Burst + * @{ + */ +#define ETH_FIXEDBURST_ENABLE ((uint32_t)0x00010000U) +#define ETH_FIXEDBURST_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Rx_DMA_Burst_Length ETH Rx DMA Burst Length + * @{ + */ +#define ETH_RXDMABURSTLENGTH_1BEAT ((uint32_t)0x00020000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 1 */ +#define ETH_RXDMABURSTLENGTH_2BEAT ((uint32_t)0x00040000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 2 */ +#define ETH_RXDMABURSTLENGTH_4BEAT ((uint32_t)0x00080000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 4 */ +#define ETH_RXDMABURSTLENGTH_8BEAT ((uint32_t)0x00100000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 8 */ +#define ETH_RXDMABURSTLENGTH_16BEAT ((uint32_t)0x00200000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 16 */ +#define ETH_RXDMABURSTLENGTH_32BEAT ((uint32_t)0x00400000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 32 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_4BEAT ((uint32_t)0x01020000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 4 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_8BEAT ((uint32_t)0x01040000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 8 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_16BEAT ((uint32_t)0x01080000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 16 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_32BEAT ((uint32_t)0x01100000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 32 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_64BEAT ((uint32_t)0x01200000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 64 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_128BEAT ((uint32_t)0x01400000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 128 */ +/** + * @} + */ + +/** @defgroup ETH_Tx_DMA_Burst_Length ETH Tx DMA Burst Length + * @{ + */ +#define ETH_TXDMABURSTLENGTH_1BEAT ((uint32_t)0x00000100U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 1 */ +#define ETH_TXDMABURSTLENGTH_2BEAT ((uint32_t)0x00000200U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 2 */ +#define ETH_TXDMABURSTLENGTH_4BEAT ((uint32_t)0x00000400U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 4 */ +#define ETH_TXDMABURSTLENGTH_8BEAT ((uint32_t)0x00000800U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 8 */ +#define ETH_TXDMABURSTLENGTH_16BEAT ((uint32_t)0x00001000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 16 */ +#define ETH_TXDMABURSTLENGTH_32BEAT ((uint32_t)0x00002000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 32 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_4BEAT ((uint32_t)0x01000100U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 4 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_8BEAT ((uint32_t)0x01000200U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 8 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_16BEAT ((uint32_t)0x01000400U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 16 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_32BEAT ((uint32_t)0x01000800U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 32 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_64BEAT ((uint32_t)0x01001000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 64 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_128BEAT ((uint32_t)0x01002000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 128 */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Enhanced_descriptor_format ETH DMA Enhanced descriptor format + * @{ + */ +#define ETH_DMAENHANCEDDESCRIPTOR_ENABLE ((uint32_t)0x00000080U) +#define ETH_DMAENHANCEDDESCRIPTOR_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_DMA_Arbitration ETH DMA Arbitration + * @{ + */ +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1 ((uint32_t)0x00000000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_2_1 ((uint32_t)0x00004000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_3_1 ((uint32_t)0x00008000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1 ((uint32_t)0x0000C000U) +#define ETH_DMAARBITRATION_RXPRIORTX ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup ETH_DMA_Tx_descriptor_segment ETH DMA Tx descriptor segment + * @{ + */ +#define ETH_DMATXDESC_LASTSEGMENTS ((uint32_t)0x40000000U) /*!< Last Segment */ +#define ETH_DMATXDESC_FIRSTSEGMENT ((uint32_t)0x20000000U) /*!< First Segment */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Tx_descriptor_Checksum_Insertion_Control ETH DMA Tx descriptor Checksum Insertion Control + * @{ + */ +#define ETH_DMATXDESC_CHECKSUMBYPASS ((uint32_t)0x00000000U) /*!< Checksum engine bypass */ +#define ETH_DMATXDESC_CHECKSUMIPV4HEADER ((uint32_t)0x00400000U) /*!< IPv4 header checksum insertion */ +#define ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT ((uint32_t)0x00800000U) /*!< TCP/UDP/ICMP checksum insertion. Pseudo header checksum is assumed to be present */ +#define ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL ((uint32_t)0x00C00000U) /*!< TCP/UDP/ICMP checksum fully in hardware including pseudo header */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Rx_descriptor_buffers ETH DMA Rx descriptor buffers + * @{ + */ +#define ETH_DMARXDESC_BUFFER1 ((uint32_t)0x00000000U) /*!< DMA Rx Desc Buffer1 */ +#define ETH_DMARXDESC_BUFFER2 ((uint32_t)0x00000001U) /*!< DMA Rx Desc Buffer2 */ +/** + * @} + */ + +/** @defgroup ETH_PMT_Flags ETH PMT Flags + * @{ + */ +#define ETH_PMT_FLAG_WUFFRPR ((uint32_t)0x80000000U) /*!< Wake-Up Frame Filter Register Pointer Reset */ +#define ETH_PMT_FLAG_WUFR ((uint32_t)0x00000040U) /*!< Wake-Up Frame Received */ +#define ETH_PMT_FLAG_MPR ((uint32_t)0x00000020U) /*!< Magic Packet Received */ +/** + * @} + */ + +/** @defgroup ETH_MMC_Tx_Interrupts ETH MMC Tx Interrupts + * @{ + */ +#define ETH_MMC_IT_TGF ((uint32_t)0x00200000U) /*!< When Tx good frame counter reaches half the maximum value */ +#define ETH_MMC_IT_TGFMSC ((uint32_t)0x00008000U) /*!< When Tx good multi col counter reaches half the maximum value */ +#define ETH_MMC_IT_TGFSC ((uint32_t)0x00004000U) /*!< When Tx good single col counter reaches half the maximum value */ +/** + * @} + */ + +/** @defgroup ETH_MMC_Rx_Interrupts ETH MMC Rx Interrupts + * @{ + */ +#define ETH_MMC_IT_RGUF ((uint32_t)0x10020000U) /*!< When Rx good unicast frames counter reaches half the maximum value */ +#define ETH_MMC_IT_RFAE ((uint32_t)0x10000040U) /*!< When Rx alignment error counter reaches half the maximum value */ +#define ETH_MMC_IT_RFCE ((uint32_t)0x10000020U) /*!< When Rx crc error counter reaches half the maximum value */ +/** + * @} + */ + +/** @defgroup ETH_MAC_Flags ETH MAC Flags + * @{ + */ +#define ETH_MAC_FLAG_TST ((uint32_t)0x00000200U) /*!< Time stamp trigger flag (on MAC) */ +#define ETH_MAC_FLAG_MMCT ((uint32_t)0x00000040U) /*!< MMC transmit flag */ +#define ETH_MAC_FLAG_MMCR ((uint32_t)0x00000020U) /*!< MMC receive flag */ +#define ETH_MAC_FLAG_MMC ((uint32_t)0x00000010U) /*!< MMC flag (on MAC) */ +#define ETH_MAC_FLAG_PMT ((uint32_t)0x00000008U) /*!< PMT flag (on MAC) */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Flags ETH DMA Flags + * @{ + */ +#define ETH_DMA_FLAG_TST ((uint32_t)0x20000000U) /*!< Time-stamp trigger interrupt (on DMA) */ +#define ETH_DMA_FLAG_PMT ((uint32_t)0x10000000U) /*!< PMT interrupt (on DMA) */ +#define ETH_DMA_FLAG_MMC ((uint32_t)0x08000000U) /*!< MMC interrupt (on DMA) */ +#define ETH_DMA_FLAG_DATATRANSFERERROR ((uint32_t)0x00800000U) /*!< Error bits 0-Rx DMA, 1-Tx DMA */ +#define ETH_DMA_FLAG_READWRITEERROR ((uint32_t)0x01000000U) /*!< Error bits 0-write transfer, 1-read transfer */ +#define ETH_DMA_FLAG_ACCESSERROR ((uint32_t)0x02000000U) /*!< Error bits 0-data buffer, 1-desc. access */ +#define ETH_DMA_FLAG_NIS ((uint32_t)0x00010000U) /*!< Normal interrupt summary flag */ +#define ETH_DMA_FLAG_AIS ((uint32_t)0x00008000U) /*!< Abnormal interrupt summary flag */ +#define ETH_DMA_FLAG_ER ((uint32_t)0x00004000U) /*!< Early receive flag */ +#define ETH_DMA_FLAG_FBE ((uint32_t)0x00002000U) /*!< Fatal bus error flag */ +#define ETH_DMA_FLAG_ET ((uint32_t)0x00000400U) /*!< Early transmit flag */ +#define ETH_DMA_FLAG_RWT ((uint32_t)0x00000200U) /*!< Receive watchdog timeout flag */ +#define ETH_DMA_FLAG_RPS ((uint32_t)0x00000100U) /*!< Receive process stopped flag */ +#define ETH_DMA_FLAG_RBU ((uint32_t)0x00000080U) /*!< Receive buffer unavailable flag */ +#define ETH_DMA_FLAG_R ((uint32_t)0x00000040U) /*!< Receive flag */ +#define ETH_DMA_FLAG_TU ((uint32_t)0x00000020U) /*!< Underflow flag */ +#define ETH_DMA_FLAG_RO ((uint32_t)0x00000010U) /*!< Overflow flag */ +#define ETH_DMA_FLAG_TJT ((uint32_t)0x00000008U) /*!< Transmit jabber timeout flag */ +#define ETH_DMA_FLAG_TBU ((uint32_t)0x00000004U) /*!< Transmit buffer unavailable flag */ +#define ETH_DMA_FLAG_TPS ((uint32_t)0x00000002U) /*!< Transmit process stopped flag */ +#define ETH_DMA_FLAG_T ((uint32_t)0x00000001U) /*!< Transmit flag */ +/** + * @} + */ + +/** @defgroup ETH_MAC_Interrupts ETH MAC Interrupts + * @{ + */ +#define ETH_MAC_IT_TST ((uint32_t)0x00000200U) /*!< Time stamp trigger interrupt (on MAC) */ +#define ETH_MAC_IT_MMCT ((uint32_t)0x00000040U) /*!< MMC transmit interrupt */ +#define ETH_MAC_IT_MMCR ((uint32_t)0x00000020U) /*!< MMC receive interrupt */ +#define ETH_MAC_IT_MMC ((uint32_t)0x00000010U) /*!< MMC interrupt (on MAC) */ +#define ETH_MAC_IT_PMT ((uint32_t)0x00000008U) /*!< PMT interrupt (on MAC) */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Interrupts ETH DMA Interrupts + * @{ + */ +#define ETH_DMA_IT_TST ((uint32_t)0x20000000U) /*!< Time-stamp trigger interrupt (on DMA) */ +#define ETH_DMA_IT_PMT ((uint32_t)0x10000000U) /*!< PMT interrupt (on DMA) */ +#define ETH_DMA_IT_MMC ((uint32_t)0x08000000U) /*!< MMC interrupt (on DMA) */ +#define ETH_DMA_IT_NIS ((uint32_t)0x00010000U) /*!< Normal interrupt summary */ +#define ETH_DMA_IT_AIS ((uint32_t)0x00008000U) /*!< Abnormal interrupt summary */ +#define ETH_DMA_IT_ER ((uint32_t)0x00004000U) /*!< Early receive interrupt */ +#define ETH_DMA_IT_FBE ((uint32_t)0x00002000U) /*!< Fatal bus error interrupt */ +#define ETH_DMA_IT_ET ((uint32_t)0x00000400U) /*!< Early transmit interrupt */ +#define ETH_DMA_IT_RWT ((uint32_t)0x00000200U) /*!< Receive watchdog timeout interrupt */ +#define ETH_DMA_IT_RPS ((uint32_t)0x00000100U) /*!< Receive process stopped interrupt */ +#define ETH_DMA_IT_RBU ((uint32_t)0x00000080U) /*!< Receive buffer unavailable interrupt */ +#define ETH_DMA_IT_R ((uint32_t)0x00000040U) /*!< Receive interrupt */ +#define ETH_DMA_IT_TU ((uint32_t)0x00000020U) /*!< Underflow interrupt */ +#define ETH_DMA_IT_RO ((uint32_t)0x00000010U) /*!< Overflow interrupt */ +#define ETH_DMA_IT_TJT ((uint32_t)0x00000008U) /*!< Transmit jabber timeout interrupt */ +#define ETH_DMA_IT_TBU ((uint32_t)0x00000004U) /*!< Transmit buffer unavailable interrupt */ +#define ETH_DMA_IT_TPS ((uint32_t)0x00000002U) /*!< Transmit process stopped interrupt */ +#define ETH_DMA_IT_T ((uint32_t)0x00000001U) /*!< Transmit interrupt */ +/** + * @} + */ + +/** @defgroup ETH_DMA_transmit_process_state ETH DMA transmit process state + * @{ + */ +#define ETH_DMA_TRANSMITPROCESS_STOPPED ((uint32_t)0x00000000U) /*!< Stopped - Reset or Stop Tx Command issued */ +#define ETH_DMA_TRANSMITPROCESS_FETCHING ((uint32_t)0x00100000U) /*!< Running - fetching the Tx descriptor */ +#define ETH_DMA_TRANSMITPROCESS_WAITING ((uint32_t)0x00200000U) /*!< Running - waiting for status */ +#define ETH_DMA_TRANSMITPROCESS_READING ((uint32_t)0x00300000U) /*!< Running - reading the data from host memory */ +#define ETH_DMA_TRANSMITPROCESS_SUSPENDED ((uint32_t)0x00600000U) /*!< Suspended - Tx Descriptor unavailable */ +#define ETH_DMA_TRANSMITPROCESS_CLOSING ((uint32_t)0x00700000U) /*!< Running - closing Rx descriptor */ + +/** + * @} + */ + + +/** @defgroup ETH_DMA_receive_process_state ETH DMA receive process state + * @{ + */ +#define ETH_DMA_RECEIVEPROCESS_STOPPED ((uint32_t)0x00000000U) /*!< Stopped - Reset or Stop Rx Command issued */ +#define ETH_DMA_RECEIVEPROCESS_FETCHING ((uint32_t)0x00020000U) /*!< Running - fetching the Rx descriptor */ +#define ETH_DMA_RECEIVEPROCESS_WAITING ((uint32_t)0x00060000U) /*!< Running - waiting for packet */ +#define ETH_DMA_RECEIVEPROCESS_SUSPENDED ((uint32_t)0x00080000U) /*!< Suspended - Rx Descriptor unavailable */ +#define ETH_DMA_RECEIVEPROCESS_CLOSING ((uint32_t)0x000A0000U) /*!< Running - closing descriptor */ +#define ETH_DMA_RECEIVEPROCESS_QUEUING ((uint32_t)0x000E0000U) /*!< Running - queuing the receive frame into host memory */ + +/** + * @} + */ + +/** @defgroup ETH_DMA_overflow ETH DMA overflow + * @{ + */ +#define ETH_DMA_OVERFLOW_RXFIFOCOUNTER ((uint32_t)0x10000000U) /*!< Overflow bit for FIFO overflow counter */ +#define ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER ((uint32_t)0x00010000U) /*!< Overflow bit for missed frame counter */ +/** + * @} + */ + +/** @defgroup ETH_EXTI_LINE_WAKEUP ETH EXTI LINE WAKEUP + * @{ + */ +#define ETH_EXTI_LINE_WAKEUP ((uint32_t)0x00080000U) /*!< External interrupt line 19 Connected to the ETH EXTI Line */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup ETH_Exported_Macros ETH Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ + +/** @brief Reset ETH handle state + * @param __HANDLE__ specifies the ETH handle. + * @retval None + */ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_ETH_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_ETH_STATE_RESET) +#endif /*USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @brief Checks whether the specified Ethernet DMA Tx Desc flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag of TDES0 to check. + * @retval the ETH_DMATxDescFlag (SET or RESET). + */ +#define __HAL_ETH_DMATXDESC_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->TxDesc->Status & (__FLAG__) == (__FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA Rx Desc flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag of RDES0 to check. + * @retval the ETH_DMATxDescFlag (SET or RESET). + */ +#define __HAL_ETH_DMARXDESC_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->RxDesc->Status & (__FLAG__) == (__FLAG__)) + +/** + * @brief Enables the specified DMA Rx Desc receive interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_ENABLE_IT(__HANDLE__) ((__HANDLE__)->RxDesc->ControlBufferSize &=(~(uint32_t)ETH_DMARXDESC_DIC)) + +/** + * @brief Disables the specified DMA Rx Desc receive interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_DISABLE_IT(__HANDLE__) ((__HANDLE__)->RxDesc->ControlBufferSize |= ETH_DMARXDESC_DIC) + +/** + * @brief Set the specified DMA Rx Desc Own bit. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_SET_OWN_BIT(__HANDLE__) ((__HANDLE__)->RxDesc->Status |= ETH_DMARXDESC_OWN) + +/** + * @brief Returns the specified Ethernet DMA Tx Desc collision count. + * @param __HANDLE__ ETH Handle + * @retval The Transmit descriptor collision counter value. + */ +#define __HAL_ETH_DMATXDESC_GET_COLLISION_COUNT(__HANDLE__) (((__HANDLE__)->TxDesc->Status & ETH_DMATXDESC_CC) >> ETH_DMATXDESC_COLLISION_COUNTSHIFT) + +/** + * @brief Set the specified DMA Tx Desc Own bit. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SET_OWN_BIT(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_OWN) + +/** + * @brief Enables the specified DMA Tx Desc Transmit interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_ENABLE_IT(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_IC) + +/** + * @brief Disables the specified DMA Tx Desc Transmit interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_DISABLE_IT(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_IC) + +/** + * @brief Selects the specified Ethernet DMA Tx Desc Checksum Insertion. + * @param __HANDLE__ ETH Handle + * @param __CHECKSUM__ specifies is the DMA Tx desc checksum insertion. + * This parameter can be one of the following values: + * @arg ETH_DMATXDESC_CHECKSUMBYPASS : Checksum bypass + * @arg ETH_DMATXDESC_CHECKSUMIPV4HEADER : IPv4 header checksum + * @arg ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT : TCP/UDP/ICMP checksum. Pseudo header checksum is assumed to be present + * @arg ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL : TCP/UDP/ICMP checksum fully in hardware including pseudo header + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CHECKSUM_INSERTION(__HANDLE__, __CHECKSUM__) ((__HANDLE__)->TxDesc->Status |= (__CHECKSUM__)) + +/** + * @brief Enables the DMA Tx Desc CRC. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CRC_ENABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_DC) + +/** + * @brief Disables the DMA Tx Desc CRC. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CRC_DISABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_DC) + +/** + * @brief Enables the DMA Tx Desc padding for frame shorter than 64 bytes. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SHORT_FRAME_PADDING_ENABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_DP) + +/** + * @brief Disables the DMA Tx Desc padding for frame shorter than 64 bytes. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SHORT_FRAME_PADDING_DISABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_DP) + +/** + * @brief Enables the specified Ethernet MAC interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet MAC interrupt sources to be + * enabled or disabled. + * This parameter can be any combination of the following values: + * @arg ETH_MAC_IT_TST : Time stamp trigger interrupt + * @arg ETH_MAC_IT_PMT : PMT interrupt + * @retval None + */ +#define __HAL_ETH_MAC_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MACIMR |= (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet MAC interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet MAC interrupt sources to be + * enabled or disabled. + * This parameter can be any combination of the following values: + * @arg ETH_MAC_IT_TST : Time stamp trigger interrupt + * @arg ETH_MAC_IT_PMT : PMT interrupt + * @retval None + */ +#define __HAL_ETH_MAC_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MACIMR &= ~(__INTERRUPT__)) + +/** + * @brief Initiate a Pause Control Frame (Full-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_INITIATE_PAUSE_CONTROL_FRAME(__HANDLE__) ((__HANDLE__)->Instance->MACFCR |= ETH_MACFCR_FCBBPA) + +/** + * @brief Checks whether the Ethernet flow control busy bit is set or not. + * @param __HANDLE__ ETH Handle + * @retval The new state of flow control busy status bit (SET or RESET). + */ +#define __HAL_ETH_GET_FLOW_CONTROL_BUSY_STATUS(__HANDLE__) (((__HANDLE__)->Instance->MACFCR & ETH_MACFCR_FCBBPA) == ETH_MACFCR_FCBBPA) + +/** + * @brief Enables the MAC Back Pressure operation activation (Half-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_BACK_PRESSURE_ACTIVATION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACFCR |= ETH_MACFCR_FCBBPA) + +/** + * @brief Disables the MAC BackPressure operation activation (Half-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_BACK_PRESSURE_ACTIVATION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACFCR &= ~ETH_MACFCR_FCBBPA) + +/** + * @brief Checks whether the specified Ethernet MAC flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg ETH_MAC_FLAG_TST : Time stamp trigger flag + * @arg ETH_MAC_FLAG_MMCT : MMC transmit flag + * @arg ETH_MAC_FLAG_MMCR : MMC receive flag + * @arg ETH_MAC_FLAG_MMC : MMC flag + * @arg ETH_MAC_FLAG_PMT : PMT flag + * @retval The state of Ethernet MAC flag. + */ +#define __HAL_ETH_MAC_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->MACSR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Enables the specified Ethernet DMA interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet DMA interrupt sources to be + * enabled @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER |= (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet DMA interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet DMA interrupt sources to be + * disabled. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER &= ~(__INTERRUPT__)) + +/** + * @brief Clears the Ethernet DMA IT pending bit. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMASR =(__INTERRUPT__)) + +/** + * @brief Checks whether the specified Ethernet DMA flag is set or not. +* @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to check. @ref ETH_DMA_Flags + * @retval The new state of ETH_DMA_FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->DMASR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to clear. @ref ETH_DMA_Flags + * @retval The new state of ETH_DMA_FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->DMASR = (__FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA overflow flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __OVERFLOW__ specifies the DMA overflow flag to check. + * This parameter can be one of the following values: + * @arg ETH_DMA_OVERFLOW_RXFIFOCOUNTER : Overflow for FIFO Overflows Counter + * @arg ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER : Overflow for Buffer Unavailable Missed Frame Counter + * @retval The state of Ethernet DMA overflow Flag (SET or RESET). + */ +#define __HAL_ETH_GET_DMA_OVERFLOW_STATUS(__HANDLE__, __OVERFLOW__) (((__HANDLE__)->Instance->DMAMFBOCR & (__OVERFLOW__)) == (__OVERFLOW__)) + +/** + * @brief Set the DMA Receive status watchdog timer register value + * @param __HANDLE__ ETH Handle + * @param __VALUE__ DMA Receive status watchdog timer register value + * @retval None + */ +#define __HAL_ETH_SET_RECEIVE_WATCHDOG_TIMER(__HANDLE__, __VALUE__) ((__HANDLE__)->Instance->DMARSWTR = (__VALUE__)) + +/** + * @brief Enables any unicast packet filtered by the MAC address + * recognition to be a wake-up frame. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_GLOBAL_UNICAST_WAKEUP_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_GU) + +/** + * @brief Disables any unicast packet filtered by the MAC address + * recognition to be a wake-up frame. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_GLOBAL_UNICAST_WAKEUP_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_GU) + +/** + * @brief Enables the MAC Wake-Up Frame Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_WAKEUP_FRAME_DETECTION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_WFE) + +/** + * @brief Disables the MAC Wake-Up Frame Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_WAKEUP_FRAME_DETECTION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_WFE) + +/** + * @brief Enables the MAC Magic Packet Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MAGIC_PACKET_DETECTION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_MPE) + +/** + * @brief Disables the MAC Magic Packet Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MAGIC_PACKET_DETECTION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_WFE) + +/** + * @brief Enables the MAC Power Down. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_POWER_DOWN_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_PD) + +/** + * @brief Disables the MAC Power Down. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_POWER_DOWN_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_PD) + +/** + * @brief Checks whether the specified Ethernet PMT flag is set or not. + * @param __HANDLE__ ETH Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg ETH_PMT_FLAG_WUFFRPR : Wake-Up Frame Filter Register Pointer Reset + * @arg ETH_PMT_FLAG_WUFR : Wake-Up Frame Received + * @arg ETH_PMT_FLAG_MPR : Magic Packet Received + * @retval The new state of Ethernet PMT Flag (SET or RESET). + */ +#define __HAL_ETH_GET_PMT_FLAG_STATUS(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->MACPMTCSR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Preset and Initialize the MMC counters to almost-full value: 0xFFFF_FFF0 (full - 16) + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FULL_PRESET(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= (ETH_MMCCR_MCFHP | ETH_MMCCR_MCP)) + +/** + * @brief Preset and Initialize the MMC counters to almost-half value: 0x7FFF_FFF0 (half - 16) + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_HALF_PRESET(__HANDLE__) do{(__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_MCFHP;\ + (__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_MCP;} while (0) + +/** + * @brief Enables the MMC Counter Freeze. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FREEZE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_MCF) + +/** + * @brief Disables the MMC Counter Freeze. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FREEZE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_MCF) + +/** + * @brief Enables the MMC Reset On Read. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_RESET_ONREAD_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_ROR) + +/** + * @brief Disables the MMC Reset On Read. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_RESET_ONREAD_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_ROR) + +/** + * @brief Enables the MMC Counter Stop Rollover. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_COUNTER_ROLLOVER_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_CSR) + +/** + * @brief Disables the MMC Counter Stop Rollover. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_COUNTER_ROLLOVER_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_CSR) + +/** + * @brief Resets the MMC Counters. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTERS_RESET(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_CR) + +/** + * @brief Enables the specified Ethernet MMC Rx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_RGUF : When Rx good unicast frames counter reaches half the maximum value + * @arg ETH_MMC_IT_RFAE : When Rx alignment error counter reaches half the maximum value + * @arg ETH_MMC_IT_RFCE : When Rx crc error counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_RX_IT_ENABLE(__HANDLE__, __INTERRUPT__) (__HANDLE__)->Instance->MMCRIMR &= ~((__INTERRUPT__) & 0xEFFFFFFF) +/** + * @brief Disables the specified Ethernet MMC Rx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_RGUF : When Rx good unicast frames counter reaches half the maximum value + * @arg ETH_MMC_IT_RFAE : When Rx alignment error counter reaches half the maximum value + * @arg ETH_MMC_IT_RFCE : When Rx crc error counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_RX_IT_DISABLE(__HANDLE__, __INTERRUPT__) (__HANDLE__)->Instance->MMCRIMR |= ((__INTERRUPT__) & 0xEFFFFFFF) +/** + * @brief Enables the specified Ethernet MMC Tx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_TGF : When Tx good frame counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFMSC: When Tx good multi col counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFSC : When Tx good single col counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_TX_IT_ENABLE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MMCRIMR &= ~ (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet MMC Tx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_TGF : When Tx good frame counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFMSC: When Tx good multi col counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFSC : When Tx good single col counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_TX_IT_DISABLE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MMCRIMR |= (__INTERRUPT__)) + +/** + * @brief Enables the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disables the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enable event on ETH External event line. + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_EVENT() EXTI->EMR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disable event on ETH External event line + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_EVENT() EXTI->EMR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Get flag of the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Clear flag of the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables rising edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE_TRIGGER() EXTI->RTSR |= ETH_EXTI_LINE_WAKEUP + +/** + * @brief Disables the rising edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_RISING_EDGE_TRIGGER() EXTI->RTSR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE_TRIGGER() EXTI->FTSR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disables falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_FALLING_EDGE_TRIGGER() EXTI->FTSR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables rising/falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLINGRISING_TRIGGER() do{EXTI->RTSR |= ETH_EXTI_LINE_WAKEUP;\ + EXTI->FTSR |= ETH_EXTI_LINE_WAKEUP;\ + }while(0) + +/** + * @brief Disables rising/falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_FALLINGRISING_TRIGGER() do{EXTI->RTSR &= ~(ETH_EXTI_LINE_WAKEUP);\ + EXTI->FTSR &= ~(ETH_EXTI_LINE_WAKEUP);\ + }while(0) + +/** + * @brief Generate a Software interrupt on selected EXTI line. + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_GENERATE_SWIT() EXTI->SWIER|= ETH_EXTI_LINE_WAKEUP + +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup ETH_Exported_Functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ + +/** @addtogroup ETH_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DMATxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMATxDescTab, uint8_t* TxBuff, uint32_t TxBuffCount); +HAL_StatusTypeDef HAL_ETH_DMARxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMARxDescTab, uint8_t *RxBuff, uint32_t RxBuffCount); +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, pETH_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* IO operation functions ****************************************************/ + +/** @addtogroup ETH_Exported_Functions_Group2 + * @{ + */ +HAL_StatusTypeDef HAL_ETH_TransmitFrame(ETH_HandleTypeDef *heth, uint32_t FrameLength); +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame(ETH_HandleTypeDef *heth); +/* Communication with PHY functions*/ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t *RegValue); +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t RegValue); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame_IT(ETH_HandleTypeDef *heth); +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth); +/* Callback in non blocking modes (Interrupt) */ +void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth); +/** + * @} + */ + +/* Peripheral Control functions **********************************************/ + +/** @addtogroup ETH_Exported_Functions_Group3 + * @{ + */ + +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_ConfigMAC(ETH_HandleTypeDef *heth, ETH_MACInitTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_ConfigDMA(ETH_HandleTypeDef *heth, ETH_DMAInitTypeDef *dmaconf); +/** + * @} + */ + +/* Peripheral State functions ************************************************/ + +/** @addtogroup ETH_Exported_Functions_Group4 + * @{ + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(ETH_HandleTypeDef *heth); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* ETH */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_ETH_H */ + + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h new file mode 100644 index 000000000..e64414ffc --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h @@ -0,0 +1,319 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_exti.h + * @author MCD Application Team + * @brief Header file of EXTI HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2018 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_EXTI_H +#define STM32F7xx_HAL_EXTI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup EXTI EXTI + * @brief EXTI HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup EXTI_Exported_Types EXTI Exported Types + * @{ + */ +typedef enum +{ + HAL_EXTI_COMMON_CB_ID = 0x00U +} EXTI_CallbackIDTypeDef; + +/** + * @brief EXTI Handle structure definition + */ +typedef struct +{ + uint32_t Line; /*!< Exti line number */ + void (* PendingCallback)(void); /*!< Exti pending callback */ +} EXTI_HandleTypeDef; + +/** + * @brief EXTI Configuration structure definition + */ +typedef struct +{ + uint32_t Line; /*!< The Exti line to be configured. This parameter + can be a value of @ref EXTI_Line */ + uint32_t Mode; /*!< The Exit Mode to be configured for a core. + This parameter can be a combination of @ref EXTI_Mode */ + uint32_t Trigger; /*!< The Exti Trigger to be configured. This parameter + can be a value of @ref EXTI_Trigger */ + uint32_t GPIOSel; /*!< The Exti GPIO multiplexer selection to be configured. + This parameter is only possible for line 0 to 15. It + can be a value of @ref EXTI_GPIOSel */ +} EXTI_ConfigTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Constants EXTI Exported Constants + * @{ + */ + +/** @defgroup EXTI_Line EXTI Line + * @{ + */ +#define EXTI_LINE_0 (EXTI_GPIO | 0x00u) /*!< External interrupt line 0 */ +#define EXTI_LINE_1 (EXTI_GPIO | 0x01u) /*!< External interrupt line 1 */ +#define EXTI_LINE_2 (EXTI_GPIO | 0x02u) /*!< External interrupt line 2 */ +#define EXTI_LINE_3 (EXTI_GPIO | 0x03u) /*!< External interrupt line 3 */ +#define EXTI_LINE_4 (EXTI_GPIO | 0x04u) /*!< External interrupt line 4 */ +#define EXTI_LINE_5 (EXTI_GPIO | 0x05u) /*!< External interrupt line 5 */ +#define EXTI_LINE_6 (EXTI_GPIO | 0x06u) /*!< External interrupt line 6 */ +#define EXTI_LINE_7 (EXTI_GPIO | 0x07u) /*!< External interrupt line 7 */ +#define EXTI_LINE_8 (EXTI_GPIO | 0x08u) /*!< External interrupt line 8 */ +#define EXTI_LINE_9 (EXTI_GPIO | 0x09u) /*!< External interrupt line 9 */ +#define EXTI_LINE_10 (EXTI_GPIO | 0x0Au) /*!< External interrupt line 10 */ +#define EXTI_LINE_11 (EXTI_GPIO | 0x0Bu) /*!< External interrupt line 11 */ +#define EXTI_LINE_12 (EXTI_GPIO | 0x0Cu) /*!< External interrupt line 12 */ +#define EXTI_LINE_13 (EXTI_GPIO | 0x0Du) /*!< External interrupt line 13 */ +#define EXTI_LINE_14 (EXTI_GPIO | 0x0Eu) /*!< External interrupt line 14 */ +#define EXTI_LINE_15 (EXTI_GPIO | 0x0Fu) /*!< External interrupt line 15 */ +#define EXTI_LINE_16 (EXTI_CONFIG | 0x10u) /*!< External interrupt line 16 Connected to the PVD Output */ +#define EXTI_LINE_17 (EXTI_CONFIG | 0x11u) /*!< External interrupt line 17 Connected to the RTC Alarm event */ +#define EXTI_LINE_18 (EXTI_CONFIG | 0x12u) /*!< External interrupt line 18 Connected to the USB OTG FS Wakeup from suspend event */ +#if defined(ETH) +#define EXTI_LINE_19 (EXTI_CONFIG | 0x13u) /*!< External interrupt line 19 Connected to the Ethernet Wakeup event */ +#else +#define EXTI_LINE_19 (EXTI_RESERVED | 0x13u) /*!< No interrupt supported in this line */ +#endif /* ETH */ +#define EXTI_LINE_20 (EXTI_CONFIG | 0x14u) /*!< External interrupt line 20 Connected to the USB OTG HS (configured in FS) Wakeup event */ +#define EXTI_LINE_21 (EXTI_CONFIG | 0x15u) /*!< External interrupt line 21 Connected to the RTC Tamper and Time Stamp events */ +#define EXTI_LINE_22 (EXTI_CONFIG | 0x16u) /*!< External interrupt line 22 Connected to the RTC Wakeup event */ +#define EXTI_LINE_23 (EXTI_CONFIG | 0x17u) /*!< External interrupt line 23 Connected to the LPTIM Wakeup event */ +#if defined(EXTI_IMR_IM24) +#define EXTI_LINE_24 (EXTI_CONFIG | 0x18u) /*!< External interrupt line 24 Connected to the MDIO Slave global Interrupt Wakeup event */ +#endif /* EXTI_IMR_IM24 */ +/** + * @} + */ + +/** @defgroup EXTI_Mode EXTI Mode + * @{ + */ +#define EXTI_MODE_NONE 0x00000000u +#define EXTI_MODE_INTERRUPT 0x00000001u +#define EXTI_MODE_EVENT 0x00000002u +/** + * @} + */ + +/** @defgroup EXTI_Trigger EXTI Trigger + * @{ + */ + +#define EXTI_TRIGGER_NONE 0x00000000u +#define EXTI_TRIGGER_RISING 0x00000001u +#define EXTI_TRIGGER_FALLING 0x00000002u +#define EXTI_TRIGGER_RISING_FALLING (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) +/** + * @} + */ + +/** @defgroup EXTI_GPIOSel EXTI GPIOSel + * @brief + * @{ + */ +#define EXTI_GPIOA 0x00000000u +#define EXTI_GPIOB 0x00000001u +#define EXTI_GPIOC 0x00000002u +#define EXTI_GPIOD 0x00000003u +#define EXTI_GPIOE 0x00000004u +#define EXTI_GPIOF 0x00000005u +#define EXTI_GPIOG 0x00000006u +#define EXTI_GPIOH 0x00000007u +#define EXTI_GPIOI 0x00000008u +#define EXTI_GPIOJ 0x00000009u +#if defined (GPIOK) +#define EXTI_GPIOK 0x0000000Au +#endif /* GPIOK */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Macros EXTI Exported Macros + * @{ + */ + +/** + * @} + */ + +/* Private constants --------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ +/** + * @brief EXTI Line property definition + */ +#define EXTI_PROPERTY_SHIFT 24u +#define EXTI_CONFIG (0x02uL << EXTI_PROPERTY_SHIFT) +#define EXTI_GPIO ((0x04uL << EXTI_PROPERTY_SHIFT) | EXTI_CONFIG) +#define EXTI_RESERVED (0x08uL << EXTI_PROPERTY_SHIFT) +#define EXTI_PROPERTY_MASK (EXTI_CONFIG | EXTI_GPIO) + +/** + * @brief EXTI bit usage + */ +#define EXTI_PIN_MASK 0x0000001Fu + +/** + * @brief EXTI Mask for interrupt & event mode + */ +#define EXTI_MODE_MASK (EXTI_MODE_EVENT | EXTI_MODE_INTERRUPT) + +/** + * @brief EXTI Mask for trigger possibilities + */ +#define EXTI_TRIGGER_MASK (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) + +/** + * @brief EXTI Line number + */ +#if defined(EXTI_IMR_IM24) +#define EXTI_LINE_NB 25u +#else +#define EXTI_LINE_NB 24u +#endif /* EXTI_IMR_IM24 */ + + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup EXTI_Private_Macros EXTI Private Macros + * @{ + */ +#define IS_EXTI_LINE(__LINE__) ((((__LINE__) & ~(EXTI_PROPERTY_MASK | EXTI_PIN_MASK)) == 0x00u) && \ + ((((__LINE__) & EXTI_PROPERTY_MASK) == EXTI_CONFIG) || \ + (((__LINE__) & EXTI_PROPERTY_MASK) == EXTI_GPIO)) && \ + (((__LINE__) & EXTI_PIN_MASK) < EXTI_LINE_NB)) + +#define IS_EXTI_MODE(__LINE__) ((((__LINE__) & EXTI_MODE_MASK) != 0x00u) && \ + (((__LINE__) & ~EXTI_MODE_MASK) == 0x00u)) + +#define IS_EXTI_TRIGGER(__LINE__) (((__LINE__) & ~EXTI_TRIGGER_MASK) == 0x00u) + +#define IS_EXTI_PENDING_EDGE(__LINE__) (((__LINE__) == EXTI_TRIGGER_FALLING) || \ + ((__LINE__) == EXTI_TRIGGER_RISING) || \ + ((__LINE__) == EXTI_TRIGGER_RISING_FALLING)) + +#define IS_EXTI_CONFIG_LINE(__LINE__) (((__LINE__) & EXTI_CONFIG) != 0x00u) + +#if defined (GPIOK) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI) || \ + ((__PORT__) == EXTI_GPIOJ) || \ + ((__PORT__) == EXTI_GPIOK)) +#else +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI) || \ + ((__PORT__) == EXTI_GPIOJ)) +#endif /* GPIOK */ + +#define IS_EXTI_GPIO_PIN(__PIN__) ((__PIN__) < 16U) +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Functions EXTI Exported Functions + * @brief EXTI Exported Functions + * @{ + */ + +/** @defgroup EXTI_Exported_Functions_Group1 Configuration functions + * @brief Configuration functions + * @{ + */ +/* Configuration functions ****************************************************/ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti); +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)); +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine); +/** + * @} + */ + +/** @defgroup EXTI_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * @{ + */ +/* IO operation functions *****************************************************/ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti); +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_EXTI_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h new file mode 100644 index 000000000..ed073e939 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h @@ -0,0 +1,418 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash.h + * @author MCD Application Team + * @brief Header file of FLASH HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_FLASH_H +#define __STM32F7xx_HAL_FLASH_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASH + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Procedure structure definition + */ +typedef enum +{ + FLASH_PROC_NONE = 0U, + FLASH_PROC_SECTERASE, + FLASH_PROC_MASSERASE, + FLASH_PROC_PROGRAM +} FLASH_ProcedureTypeDef; + + +/** + * @brief FLASH handle Structure definition + */ +typedef struct +{ + __IO FLASH_ProcedureTypeDef ProcedureOnGoing; /* Internal variable to indicate which procedure is ongoing or not in IT context */ + + __IO uint32_t NbSectorsToErase; /* Internal variable to save the remaining sectors to erase in IT context */ + + __IO uint8_t VoltageForErase; /* Internal variable to provide voltage range selected by user in IT context */ + + __IO uint32_t Sector; /* Internal variable to define the current sector which is erasing */ + + __IO uint32_t Address; /* Internal variable to save address selected for program */ + + HAL_LockTypeDef Lock; /* FLASH locking object */ + + __IO uint32_t ErrorCode; /* FLASH error code */ + +}FLASH_ProcessTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Constants FLASH Exported Constants + * @{ + */ + +/** @defgroup FLASH_Error_Code FLASH Error Code + * @brief FLASH Error Code + * @{ + */ +#define HAL_FLASH_ERROR_NONE ((uint32_t)0x00000000U) /*!< No error */ +#define HAL_FLASH_ERROR_ERS ((uint32_t)0x00000002U) /*!< Programming Sequence error */ +#define HAL_FLASH_ERROR_PGP ((uint32_t)0x00000004U) /*!< Programming Parallelism error */ +#define HAL_FLASH_ERROR_PGA ((uint32_t)0x00000008U) /*!< Programming Alignment error */ +#define HAL_FLASH_ERROR_WRP ((uint32_t)0x00000010U) /*!< Write protection error */ +#define HAL_FLASH_ERROR_OPERATION ((uint32_t)0x00000020U) /*!< Operation Error */ +#define HAL_FLASH_ERROR_RD ((uint32_t)0x00000040U) /*!< Read Protection Error */ +/** + * @} + */ + +/** @defgroup FLASH_Type_Program FLASH Type Program + * @{ + */ +#define FLASH_TYPEPROGRAM_BYTE ((uint32_t)0x00U) /*!< Program byte (8-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_HALFWORD ((uint32_t)0x01U) /*!< Program a half-word (16-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_WORD ((uint32_t)0x02U) /*!< Program a word (32-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_DOUBLEWORD ((uint32_t)0x03U) /*!< Program a double word (64-bit) at a specified address */ +/** + * @} + */ + +/** @defgroup FLASH_Flag_definition FLASH Flag definition + * @brief Flag definition + * @{ + */ +#define FLASH_FLAG_EOP FLASH_SR_EOP /*!< FLASH End of Operation flag */ +#define FLASH_FLAG_OPERR FLASH_SR_OPERR /*!< FLASH operation Error flag */ +#define FLASH_FLAG_WRPERR FLASH_SR_WRPERR /*!< FLASH Write protected error flag */ +#define FLASH_FLAG_PGAERR FLASH_SR_PGAERR /*!< FLASH Programming Alignment error flag */ +#define FLASH_FLAG_PGPERR FLASH_SR_PGPERR /*!< FLASH Programming Parallelism error flag */ +#define FLASH_FLAG_ERSERR FLASH_SR_ERSERR /*!< FLASH Erasing Sequence error flag */ +#define FLASH_FLAG_BSY FLASH_SR_BSY /*!< FLASH Busy flag */ + +#if defined (FLASH_OPTCR2_PCROP) +#define FLASH_FLAG_RDERR FLASH_SR_RDERR /*!< FLASH Read protection error flag */ +#define FLASH_FLAG_ALL_ERRORS (FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_ERSERR | FLASH_FLAG_RDERR) +#else +#define FLASH_FLAG_ALL_ERRORS (FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_ERSERR) +#endif /* FLASH_OPTCR2_PCROP */ +/** + * @} + */ + +/** @defgroup FLASH_Interrupt_definition FLASH Interrupt definition + * @brief FLASH Interrupt definition + * @{ + */ +#define FLASH_IT_EOP FLASH_CR_EOPIE /*!< End of FLASH Operation Interrupt source */ +#define FLASH_IT_ERR ((uint32_t)0x02000000U) /*!< Error Interrupt source */ +/** + * @} + */ + +/** @defgroup FLASH_Program_Parallelism FLASH Program Parallelism + * @{ + */ +#define FLASH_PSIZE_BYTE ((uint32_t)0x00000000U) +#define FLASH_PSIZE_HALF_WORD ((uint32_t)FLASH_CR_PSIZE_0) +#define FLASH_PSIZE_WORD ((uint32_t)FLASH_CR_PSIZE_1) +#define FLASH_PSIZE_DOUBLE_WORD ((uint32_t)FLASH_CR_PSIZE) +#define CR_PSIZE_MASK ((uint32_t)0xFFFFFCFFU) +/** + * @} + */ + +/** @defgroup FLASH_Keys FLASH Keys + * @{ + */ +#define FLASH_KEY1 ((uint32_t)0x45670123U) +#define FLASH_KEY2 ((uint32_t)0xCDEF89ABU) +#define FLASH_OPT_KEY1 ((uint32_t)0x08192A3BU) +#define FLASH_OPT_KEY2 ((uint32_t)0x4C5D6E7FU) +/** + * @} + */ + +/** @defgroup FLASH_Sectors FLASH Sectors + * @{ + */ +#if (FLASH_SECTOR_TOTAL == 2) +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#elif (FLASH_SECTOR_TOTAL == 4) +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#define FLASH_SECTOR_2 ((uint32_t)2U) /*!< Sector Number 2 */ +#define FLASH_SECTOR_3 ((uint32_t)3U) /*!< Sector Number 3 */ +#else +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#define FLASH_SECTOR_2 ((uint32_t)2U) /*!< Sector Number 2 */ +#define FLASH_SECTOR_3 ((uint32_t)3U) /*!< Sector Number 3 */ +#define FLASH_SECTOR_4 ((uint32_t)4U) /*!< Sector Number 4 */ +#define FLASH_SECTOR_5 ((uint32_t)5U) /*!< Sector Number 5 */ +#define FLASH_SECTOR_6 ((uint32_t)6U) /*!< Sector Number 6 */ +#define FLASH_SECTOR_7 ((uint32_t)7U) /*!< Sector Number 7 */ +#endif /* FLASH_SECTOR_TOTAL */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Macros FLASH Exported Macros + * @{ + */ +/** + * @brief Set the FLASH Latency. + * @param __LATENCY__ FLASH Latency + * The value of this parameter depend on device used within the same series + * @retval none + */ +#define __HAL_FLASH_SET_LATENCY(__LATENCY__) \ + MODIFY_REG(FLASH->ACR, FLASH_ACR_LATENCY, (uint32_t)(__LATENCY__)) + +/** + * @brief Get the FLASH Latency. + * @retval FLASH Latency + * The value of this parameter depend on device used within the same series + */ +#define __HAL_FLASH_GET_LATENCY() (READ_BIT((FLASH->ACR), FLASH_ACR_LATENCY)) + +/** + * @brief Enable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_ENABLE() (FLASH->ACR |= FLASH_ACR_PRFTEN) + +/** + * @brief Disable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_DISABLE() (FLASH->ACR &= (~FLASH_ACR_PRFTEN)) + +/** + * @brief Enable the FLASH Adaptive Real-Time memory accelerator. + * @note The ART accelerator is available only for flash access on ITCM interface. + * @retval none + */ +#define __HAL_FLASH_ART_ENABLE() SET_BIT(FLASH->ACR, FLASH_ACR_ARTEN) + +/** + * @brief Disable the FLASH Adaptive Real-Time memory accelerator. + * @retval none + */ +#define __HAL_FLASH_ART_DISABLE() CLEAR_BIT(FLASH->ACR, FLASH_ACR_ARTEN) + +/** + * @brief Resets the FLASH Adaptive Real-Time memory accelerator. + * @note This function must be used only when the Adaptive Real-Time memory accelerator + * is disabled. + * @retval None + */ +#define __HAL_FLASH_ART_RESET() (FLASH->ACR |= FLASH_ACR_ARTRST) + +/** + * @brief Enable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_ENABLE_IT(__INTERRUPT__) (FLASH->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_DISABLE_IT(__INTERRUPT__) (FLASH->CR &= ~(uint32_t)(__INTERRUPT__)) + +/** + * @brief Get the specified FLASH flag status. + * @param __FLAG__ specifies the FLASH flag to check. + * This parameter can be one of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_ERSERR : FLASH Erasing Sequence error flag + * @arg FLASH_FLAG_BSY : FLASH Busy flag + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define __HAL_FLASH_GET_FLAG(__FLAG__) ((FLASH->SR & (__FLAG__))) + +/** + * @brief Clear the specified FLASH flag. + * @param __FLAG__ specifies the FLASH flags to clear. + * This parameter can be any combination of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_ERSERR : FLASH Erasing Sequence error flag + * @retval none + */ +#define __HAL_FLASH_CLEAR_FLAG(__FLAG__) (FLASH->SR = (__FLAG__)) +/** + * @} + */ + +/* Include FLASH HAL Extension module */ +#include "stm32f7xx_hal_flash_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASH_Exported_Functions + * @{ + */ +/** @addtogroup FLASH_Exported_Functions_Group1 + * @{ + */ +/* Program operation functions ***********************************************/ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +/* FLASH IRQ handler method */ +void HAL_FLASH_IRQHandler(void); +/* Callbacks in non blocking modes */ +void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue); +void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions **********************************************/ +HAL_StatusTypeDef HAL_FLASH_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_Lock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void); +/* Option bytes control */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group3 + * @{ + */ +/* Peripheral State functions ************************************************/ +uint32_t HAL_FLASH_GetError(void); +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Variables FLASH Private Variables + * @{ + */ + +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Constants FLASH Private Constants + * @{ + */ + +/** + * @brief OPTCR register byte 1 (Bits[15:8]) base address + */ +#define OPTCR_BYTE1_ADDRESS ((uint32_t)0x40023C15) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup FLASH_Private_Macros FLASH Private Macros + * @{ + */ + +/** @defgroup FLASH_IS_FLASH_Definitions FLASH Private macros to check input parameters + * @{ + */ +#define IS_FLASH_TYPEPROGRAM(VALUE)(((VALUE) == FLASH_TYPEPROGRAM_BYTE) || \ + ((VALUE) == FLASH_TYPEPROGRAM_HALFWORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_WORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_DOUBLEWORD)) +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Functions FLASH Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_FLASH_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h new file mode 100644 index 000000000..572d7e500 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h @@ -0,0 +1,700 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash_ex.h + * @author MCD Application Team + * @brief Header file of FLASH HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_FLASH_EX_H +#define __STM32F7xx_HAL_FLASH_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASHEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Erase structure definition + */ +typedef struct +{ + uint32_t TypeErase; /*!< Mass erase or sector Erase. + This parameter can be a value of @ref FLASHEx_Type_Erase */ + +#if defined (FLASH_OPTCR_nDBANK) + uint32_t Banks; /*!< Select banks to erase when Mass erase is enabled. + This parameter must be a value of @ref FLASHEx_Banks */ +#endif /* FLASH_OPTCR_nDBANK */ + + uint32_t Sector; /*!< Initial FLASH sector to erase when Mass erase is disabled + This parameter must be a value of @ref FLASHEx_Sectors */ + + uint32_t NbSectors; /*!< Number of sectors to be erased. + This parameter must be a value between 1 and (max number of sectors - value of Initial sector)*/ + + uint32_t VoltageRange;/*!< The device voltage range which defines the erase parallelism + This parameter must be a value of @ref FLASHEx_Voltage_Range */ + +} FLASH_EraseInitTypeDef; + +/** + * @brief FLASH Option Bytes Program structure definition + */ +typedef struct +{ + uint32_t OptionType; /*!< Option byte to be configured. + This parameter can be a value of @ref FLASHEx_Option_Type */ + + uint32_t WRPState; /*!< Write protection activation or deactivation. + This parameter can be a value of @ref FLASHEx_WRP_State */ + + uint32_t WRPSector; /*!< Specifies the sector(s) to be write protected. + The value of this parameter depend on device used within the same series */ + + uint32_t RDPLevel; /*!< Set the read protection level. + This parameter can be a value of @ref FLASHEx_Option_Bytes_Read_Protection */ + + uint32_t BORLevel; /*!< Set the BOR Level. + This parameter can be a value of @ref FLASHEx_BOR_Reset_Level */ + + uint32_t USERConfig; /*!< Program the FLASH User Option Byte: WWDG_SW / IWDG_SW / RST_STOP / RST_STDBY / + IWDG_FREEZE_STOP / IWDG_FREEZE_SANDBY / nDBANK / nDBOOT. + nDBANK / nDBOOT are only available for STM32F76xxx/STM32F77xxx devices */ + + uint32_t BootAddr0; /*!< Boot base address when Boot pin = 0. + This parameter can be a value of @ref FLASHEx_Boot_Address */ + + uint32_t BootAddr1; /*!< Boot base address when Boot pin = 1. + This parameter can be a value of @ref FLASHEx_Boot_Address */ + +#if defined (FLASH_OPTCR2_PCROP) + uint32_t PCROPSector; /*!< Set the PCROP sector. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PCROP_Sectors */ + + uint32_t PCROPRdp; /*!< Set the PCROP_RDP option. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PCROP_RDP */ +#endif /* FLASH_OPTCR2_PCROP */ + +} FLASH_OBProgramInitTypeDef; + +/** + * @} + */ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup FLASHEx_Exported_Constants FLASH Exported Constants + * @{ + */ + +/** @defgroup FLASHEx_Type_Erase FLASH Type Erase + * @{ + */ +#define FLASH_TYPEERASE_SECTORS ((uint32_t)0x00U) /*!< Sectors erase only */ +#define FLASH_TYPEERASE_MASSERASE ((uint32_t)0x01U) /*!< Flash Mass erase activation */ +/** + * @} + */ + +/** @defgroup FLASHEx_Voltage_Range FLASH Voltage Range + * @{ + */ +#define FLASH_VOLTAGE_RANGE_1 ((uint32_t)0x00U) /*!< Device operating range: 1.8V to 2.1V */ +#define FLASH_VOLTAGE_RANGE_2 ((uint32_t)0x01U) /*!< Device operating range: 2.1V to 2.7V */ +#define FLASH_VOLTAGE_RANGE_3 ((uint32_t)0x02U) /*!< Device operating range: 2.7V to 3.6V */ +#define FLASH_VOLTAGE_RANGE_4 ((uint32_t)0x03U) /*!< Device operating range: 2.7V to 3.6V + External Vpp */ +/** + * @} + */ + +/** @defgroup FLASHEx_WRP_State FLASH WRP State + * @{ + */ +#define OB_WRPSTATE_DISABLE ((uint32_t)0x00U) /*!< Disable the write protection of the desired bank 1 sectors */ +#define OB_WRPSTATE_ENABLE ((uint32_t)0x01U) /*!< Enable the write protection of the desired bank 1 sectors */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Type FLASH Option Type + * @{ + */ +#define OPTIONBYTE_WRP ((uint32_t)0x01U) /*!< WRP option byte configuration */ +#define OPTIONBYTE_RDP ((uint32_t)0x02U) /*!< RDP option byte configuration */ +#define OPTIONBYTE_USER ((uint32_t)0x04U) /*!< USER option byte configuration */ +#define OPTIONBYTE_BOR ((uint32_t)0x08U) /*!< BOR option byte configuration */ +#define OPTIONBYTE_BOOTADDR_0 ((uint32_t)0x10U) /*!< Boot 0 Address configuration */ +#define OPTIONBYTE_BOOTADDR_1 ((uint32_t)0x20U) /*!< Boot 1 Address configuration */ +#if defined (FLASH_OPTCR2_PCROP) +#define OPTIONBYTE_PCROP ((uint32_t)0x40U) /*!< PCROP configuration */ +#define OPTIONBYTE_PCROP_RDP ((uint32_t)0x80U) /*!< PCROP_RDP configuration */ +#endif /* FLASH_OPTCR2_PCROP */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_Read_Protection FLASH Option Bytes Read Protection + * @{ + */ +#define OB_RDP_LEVEL_0 ((uint8_t)0xAAU) +#define OB_RDP_LEVEL_1 ((uint8_t)0x55U) +#define OB_RDP_LEVEL_2 ((uint8_t)0xCCU) /*!< Warning: When enabling read protection level 2 + it s no more possible to go back to level 1 or 0 */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_WWatchdog FLASH Option Bytes WWatchdog + * @{ + */ +#define OB_WWDG_SW ((uint32_t)0x10U) /*!< Software WWDG selected */ +#define OB_WWDG_HW ((uint32_t)0x00U) /*!< Hardware WWDG selected */ +/** + * @} + */ + + +/** @defgroup FLASHEx_Option_Bytes_IWatchdog FLASH Option Bytes IWatchdog + * @{ + */ +#define OB_IWDG_SW ((uint32_t)0x20U) /*!< Software IWDG selected */ +#define OB_IWDG_HW ((uint32_t)0x00U) /*!< Hardware IWDG selected */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_nRST_STOP FLASH Option Bytes nRST_STOP + * @{ + */ +#define OB_STOP_NO_RST ((uint32_t)0x40U) /*!< No reset generated when entering in STOP */ +#define OB_STOP_RST ((uint32_t)0x00U) /*!< Reset generated when entering in STOP */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_nRST_STDBY FLASH Option Bytes nRST_STDBY + * @{ + */ +#define OB_STDBY_NO_RST ((uint32_t)0x80U) /*!< No reset generated when entering in STANDBY */ +#define OB_STDBY_RST ((uint32_t)0x00U) /*!< Reset generated when entering in STANDBY */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_IWDG_FREEZE_STOP FLASH IWDG Counter Freeze in STOP + * @{ + */ +#define OB_IWDG_STOP_FREEZE ((uint32_t)0x00000000U) /*!< Freeze IWDG counter in STOP mode */ +#define OB_IWDG_STOP_ACTIVE ((uint32_t)0x80000000U) /*!< IWDG counter active in STOP mode */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_IWDG_FREEZE_SANDBY FLASH IWDG Counter Freeze in STANDBY + * @{ + */ +#define OB_IWDG_STDBY_FREEZE ((uint32_t)0x00000000U) /*!< Freeze IWDG counter in STANDBY mode */ +#define OB_IWDG_STDBY_ACTIVE ((uint32_t)0x40000000U) /*!< IWDG counter active in STANDBY mode */ +/** + * @} + */ + +/** @defgroup FLASHEx_BOR_Reset_Level FLASH BOR Reset Level + * @{ + */ +#define OB_BOR_LEVEL3 ((uint32_t)0x00U) /*!< Supply voltage ranges from 2.70 to 3.60 V */ +#define OB_BOR_LEVEL2 ((uint32_t)0x04U) /*!< Supply voltage ranges from 2.40 to 2.70 V */ +#define OB_BOR_LEVEL1 ((uint32_t)0x08U) /*!< Supply voltage ranges from 2.10 to 2.40 V */ +#define OB_BOR_OFF ((uint32_t)0x0CU) /*!< Supply voltage ranges from 1.62 to 2.10 V */ +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBOOT) +/** @defgroup FLASHEx_Option_Bytes_nDBOOT FLASH Option Bytes nDBOOT + * @{ + */ +#define OB_DUAL_BOOT_DISABLE ((uint32_t)0x10000000U) /* !< Dual Boot disable. Boot according to boot address option */ +#define OB_DUAL_BOOT_ENABLE ((uint32_t)0x00000000U) /* !< Dual Boot enable. Boot always from system memory if boot address in flash + (Dual bank Boot mode), or RAM if Boot address option in RAM */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBOOT */ + +#if defined (FLASH_OPTCR_nDBANK) +/** @defgroup FLASHEx_Option_Bytes_nDBank FLASH Single Bank or Dual Bank + * @{ + */ +#define OB_NDBANK_SINGLE_BANK ((uint32_t)0x20000000U) /*!< NDBANK bit is set : Single Bank mode */ +#define OB_NDBANK_DUAL_BANK ((uint32_t)0x00000000U) /*!< NDBANK bit is reset : Dual Bank mode */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBANK */ + +/** @defgroup FLASHEx_Boot_Address FLASH Boot Address + * @{ + */ +#define OB_BOOTADDR_ITCM_RAM ((uint32_t)0x0000U) /*!< Boot from ITCM RAM (0x00000000) */ +#define OB_BOOTADDR_SYSTEM ((uint32_t)0x0040U) /*!< Boot from System memory bootloader (0x00100000) */ +#define OB_BOOTADDR_ITCM_FLASH ((uint32_t)0x0080U) /*!< Boot from Flash on ITCM interface (0x00200000) */ +#define OB_BOOTADDR_AXIM_FLASH ((uint32_t)0x2000U) /*!< Boot from Flash on AXIM interface (0x08000000) */ +#define OB_BOOTADDR_DTCM_RAM ((uint32_t)0x8000U) /*!< Boot from DTCM RAM (0x20000000) */ +#define OB_BOOTADDR_SRAM1 ((uint32_t)0x8004U) /*!< Boot from SRAM1 (0x20010000) */ +#if (SRAM2_BASE == 0x2003C000U) +#define OB_BOOTADDR_SRAM2 ((uint32_t)0x800FU) /*!< Boot from SRAM2 (0x2003C000) */ +#else +#define OB_BOOTADDR_SRAM2 ((uint32_t)0x8013U) /*!< Boot from SRAM2 (0x2004C000) */ +#endif /* SRAM2_BASE == 0x2003C000U */ +/** + * @} + */ + +/** @defgroup FLASH_Latency FLASH Latency + * @{ + */ +#define FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero Latency cycle */ +#define FLASH_LATENCY_1 FLASH_ACR_LATENCY_1WS /*!< FLASH One Latency cycle */ +#define FLASH_LATENCY_2 FLASH_ACR_LATENCY_2WS /*!< FLASH Two Latency cycles */ +#define FLASH_LATENCY_3 FLASH_ACR_LATENCY_3WS /*!< FLASH Three Latency cycles */ +#define FLASH_LATENCY_4 FLASH_ACR_LATENCY_4WS /*!< FLASH Four Latency cycles */ +#define FLASH_LATENCY_5 FLASH_ACR_LATENCY_5WS /*!< FLASH Five Latency cycles */ +#define FLASH_LATENCY_6 FLASH_ACR_LATENCY_6WS /*!< FLASH Six Latency cycles */ +#define FLASH_LATENCY_7 FLASH_ACR_LATENCY_7WS /*!< FLASH Seven Latency cycles */ +#define FLASH_LATENCY_8 FLASH_ACR_LATENCY_8WS /*!< FLASH Eight Latency cycles */ +#define FLASH_LATENCY_9 FLASH_ACR_LATENCY_9WS /*!< FLASH Nine Latency cycles */ +#define FLASH_LATENCY_10 FLASH_ACR_LATENCY_10WS /*!< FLASH Ten Latency cycles */ +#define FLASH_LATENCY_11 FLASH_ACR_LATENCY_11WS /*!< FLASH Eleven Latency cycles */ +#define FLASH_LATENCY_12 FLASH_ACR_LATENCY_12WS /*!< FLASH Twelve Latency cycles */ +#define FLASH_LATENCY_13 FLASH_ACR_LATENCY_13WS /*!< FLASH Thirteen Latency cycles */ +#define FLASH_LATENCY_14 FLASH_ACR_LATENCY_14WS /*!< FLASH Fourteen Latency cycles */ +#define FLASH_LATENCY_15 FLASH_ACR_LATENCY_15WS /*!< FLASH Fifteen Latency cycles */ +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBANK) +/** @defgroup FLASHEx_Banks FLASH Banks + * @{ + */ +#define FLASH_BANK_1 ((uint32_t)0x01U) /*!< Bank 1 */ +#define FLASH_BANK_2 ((uint32_t)0x02U) /*!< Bank 2 */ +#define FLASH_BANK_BOTH ((uint32_t)(FLASH_BANK_1 | FLASH_BANK_2)) /*!< Bank1 and Bank2 */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBANK */ + +/** @defgroup FLASHEx_MassErase_bit FLASH Mass Erase bit + * @{ + */ +#if defined (FLASH_OPTCR_nDBANK) +#define FLASH_MER_BIT (FLASH_CR_MER1 | FLASH_CR_MER2) /*!< 2 MER bits */ +#else +#define FLASH_MER_BIT (FLASH_CR_MER) /*!< only 1 MER bit */ +#endif /* FLASH_OPTCR_nDBANK */ +/** + * @} + */ + +/** @defgroup FLASHEx_Sectors FLASH Sectors + * @{ + */ +#if (FLASH_SECTOR_TOTAL == 24) +#define FLASH_SECTOR_8 ((uint32_t)8U) /*!< Sector Number 8 */ +#define FLASH_SECTOR_9 ((uint32_t)9U) /*!< Sector Number 9 */ +#define FLASH_SECTOR_10 ((uint32_t)10U) /*!< Sector Number 10 */ +#define FLASH_SECTOR_11 ((uint32_t)11U) /*!< Sector Number 11 */ +#define FLASH_SECTOR_12 ((uint32_t)12U) /*!< Sector Number 12 */ +#define FLASH_SECTOR_13 ((uint32_t)13U) /*!< Sector Number 13 */ +#define FLASH_SECTOR_14 ((uint32_t)14U) /*!< Sector Number 14 */ +#define FLASH_SECTOR_15 ((uint32_t)15U) /*!< Sector Number 15 */ +#define FLASH_SECTOR_16 ((uint32_t)16U) /*!< Sector Number 16 */ +#define FLASH_SECTOR_17 ((uint32_t)17U) /*!< Sector Number 17 */ +#define FLASH_SECTOR_18 ((uint32_t)18U) /*!< Sector Number 18 */ +#define FLASH_SECTOR_19 ((uint32_t)19U) /*!< Sector Number 19 */ +#define FLASH_SECTOR_20 ((uint32_t)20U) /*!< Sector Number 20 */ +#define FLASH_SECTOR_21 ((uint32_t)21U) /*!< Sector Number 21 */ +#define FLASH_SECTOR_22 ((uint32_t)22U) /*!< Sector Number 22 */ +#define FLASH_SECTOR_23 ((uint32_t)23U) /*!< Sector Number 23 */ +#endif /* FLASH_SECTOR_TOTAL == 24 */ +/** + * @} + */ + +#if (FLASH_SECTOR_TOTAL == 24) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @note For Single Bank mode, use OB_WRP_SECTOR_x defines: In fact, in FLASH_OPTCR register, + * nWRP[11:0] bits contain the value of the write-protection option bytes for sectors 0 to 11. + * For Dual Bank mode, use OB_WRP_DB_SECTOR_x defines: In fact, in FLASH_OPTCR register, + * nWRP[11:0] bits are divided on two groups, one group dedicated for bank 1 and + * a second one dedicated for bank 2 (nWRP[i] activates Write protection on sector 2*i and 2*i+1). + * This behavior is applicable only for STM32F76xxx / STM32F77xxx devices. + * @{ + */ +/* Single Bank Sectors */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Single Bank Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Single Bank Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Single Bank Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Single Bank Sector3 */ +#define OB_WRP_SECTOR_4 ((uint32_t)0x00100000U) /*!< Write protection of Single Bank Sector4 */ +#define OB_WRP_SECTOR_5 ((uint32_t)0x00200000U) /*!< Write protection of Single Bank Sector5 */ +#define OB_WRP_SECTOR_6 ((uint32_t)0x00400000U) /*!< Write protection of Single Bank Sector6 */ +#define OB_WRP_SECTOR_7 ((uint32_t)0x00800000U) /*!< Write protection of Single Bank Sector7 */ +#define OB_WRP_SECTOR_8 ((uint32_t)0x01000000U) /*!< Write protection of Single Bank Sector8 */ +#define OB_WRP_SECTOR_9 ((uint32_t)0x02000000U) /*!< Write protection of Single Bank Sector9 */ +#define OB_WRP_SECTOR_10 ((uint32_t)0x04000000U) /*!< Write protection of Single Bank Sector10 */ +#define OB_WRP_SECTOR_11 ((uint32_t)0x08000000U) /*!< Write protection of Single Bank Sector11 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x0FFF0000U) /*!< Write protection of all Sectors for Single Bank Flash */ + +/* Dual Bank Sectors */ +#define OB_WRP_DB_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Dual Bank Sector0 */ +#define OB_WRP_DB_SECTOR_1 ((uint32_t)0x00010000U) /*!< Write protection of Dual Bank Sector1 */ +#define OB_WRP_DB_SECTOR_2 ((uint32_t)0x00020000U) /*!< Write protection of Dual Bank Sector2 */ +#define OB_WRP_DB_SECTOR_3 ((uint32_t)0x00020000U) /*!< Write protection of Dual Bank Sector3 */ +#define OB_WRP_DB_SECTOR_4 ((uint32_t)0x00040000U) /*!< Write protection of Dual Bank Sector4 */ +#define OB_WRP_DB_SECTOR_5 ((uint32_t)0x00040000U) /*!< Write protection of Dual Bank Sector5 */ +#define OB_WRP_DB_SECTOR_6 ((uint32_t)0x00080000U) /*!< Write protection of Dual Bank Sector6 */ +#define OB_WRP_DB_SECTOR_7 ((uint32_t)0x00080000U) /*!< Write protection of Dual Bank Sector7 */ +#define OB_WRP_DB_SECTOR_8 ((uint32_t)0x00100000U) /*!< Write protection of Dual Bank Sector8 */ +#define OB_WRP_DB_SECTOR_9 ((uint32_t)0x00100000U) /*!< Write protection of Dual Bank Sector9 */ +#define OB_WRP_DB_SECTOR_10 ((uint32_t)0x00200000U) /*!< Write protection of Dual Bank Sector10 */ +#define OB_WRP_DB_SECTOR_11 ((uint32_t)0x00200000U) /*!< Write protection of Dual Bank Sector11 */ +#define OB_WRP_DB_SECTOR_12 ((uint32_t)0x00400000U) /*!< Write protection of Dual Bank Sector12 */ +#define OB_WRP_DB_SECTOR_13 ((uint32_t)0x00400000U) /*!< Write protection of Dual Bank Sector13 */ +#define OB_WRP_DB_SECTOR_14 ((uint32_t)0x00800000U) /*!< Write protection of Dual Bank Sector14 */ +#define OB_WRP_DB_SECTOR_15 ((uint32_t)0x00800000U) /*!< Write protection of Dual Bank Sector15 */ +#define OB_WRP_DB_SECTOR_16 ((uint32_t)0x01000000U) /*!< Write protection of Dual Bank Sector16 */ +#define OB_WRP_DB_SECTOR_17 ((uint32_t)0x01000000U) /*!< Write protection of Dual Bank Sector17 */ +#define OB_WRP_DB_SECTOR_18 ((uint32_t)0x02000000U) /*!< Write protection of Dual Bank Sector18 */ +#define OB_WRP_DB_SECTOR_19 ((uint32_t)0x02000000U) /*!< Write protection of Dual Bank Sector19 */ +#define OB_WRP_DB_SECTOR_20 ((uint32_t)0x04000000U) /*!< Write protection of Dual Bank Sector20 */ +#define OB_WRP_DB_SECTOR_21 ((uint32_t)0x04000000U) /*!< Write protection of Dual Bank Sector21 */ +#define OB_WRP_DB_SECTOR_22 ((uint32_t)0x08000000U) /*!< Write protection of Dual Bank Sector22 */ +#define OB_WRP_DB_SECTOR_23 ((uint32_t)0x08000000U) /*!< Write protection of Dual Bank Sector23 */ +#define OB_WRP_DB_SECTOR_All ((uint32_t)0x0FFF0000U) /*!< Write protection of all Sectors for Dual Bank Flash */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 24 */ + +#if (FLASH_SECTOR_TOTAL == 8) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Sector3 */ +#define OB_WRP_SECTOR_4 ((uint32_t)0x00100000U) /*!< Write protection of Sector4 */ +#define OB_WRP_SECTOR_5 ((uint32_t)0x00200000U) /*!< Write protection of Sector5 */ +#define OB_WRP_SECTOR_6 ((uint32_t)0x00400000U) /*!< Write protection of Sector6 */ +#define OB_WRP_SECTOR_7 ((uint32_t)0x00800000U) /*!< Write protection of Sector7 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x00FF0000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 4) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Sector3 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x000F0000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +#if (FLASH_SECTOR_TOTAL == 2) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x00030000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 2 */ + +#if defined (FLASH_OPTCR2_PCROP) +#if (FLASH_SECTOR_TOTAL == 8) +/** @defgroup FLASHEx_Option_Bytes_PCROP_Sectors FLASH Option Bytes PCROP Sectors + * @{ + */ +#define OB_PCROP_SECTOR_0 ((uint32_t)0x00000001U) /*!< PC Readout protection of Sector0 */ +#define OB_PCROP_SECTOR_1 ((uint32_t)0x00000002U) /*!< PC Readout protection of Sector1 */ +#define OB_PCROP_SECTOR_2 ((uint32_t)0x00000004U) /*!< PC Readout protection of Sector2 */ +#define OB_PCROP_SECTOR_3 ((uint32_t)0x00000008U) /*!< PC Readout protection of Sector3 */ +#define OB_PCROP_SECTOR_4 ((uint32_t)0x00000010U) /*!< PC Readout protection of Sector4 */ +#define OB_PCROP_SECTOR_5 ((uint32_t)0x00000020U) /*!< PC Readout protection of Sector5 */ +#define OB_PCROP_SECTOR_6 ((uint32_t)0x00000040U) /*!< PC Readout protection of Sector6 */ +#define OB_PCROP_SECTOR_7 ((uint32_t)0x00000080U) /*!< PC Readout protection of Sector7 */ +#define OB_PCROP_SECTOR_All ((uint32_t)0x000000FFU) /*!< PC Readout protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 4) +/** @defgroup FLASHEx_Option_Bytes_PCROP_Sectors FLASH Option Bytes PCROP Sectors + * @{ + */ +#define OB_PCROP_SECTOR_0 ((uint32_t)0x00000001U) /*!< PC Readout protection of Sector0 */ +#define OB_PCROP_SECTOR_1 ((uint32_t)0x00000002U) /*!< PC Readout protection of Sector1 */ +#define OB_PCROP_SECTOR_2 ((uint32_t)0x00000004U) /*!< PC Readout protection of Sector2 */ +#define OB_PCROP_SECTOR_3 ((uint32_t)0x00000008U) /*!< PC Readout protection of Sector3 */ +#define OB_PCROP_SECTOR_All ((uint32_t)0x0000000FU) /*!< PC Readout protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +/** @defgroup FLASHEx_Option_Bytes_PCROP_RDP FLASH Option Bytes PCROP_RDP Bit + * @{ + */ +#define OB_PCROP_RDP_ENABLE ((uint32_t)0x80000000U) /*!< PCROP_RDP Enable */ +#define OB_PCROP_RDP_DISABLE ((uint32_t)0x00000000U) /*!< PCROP_RDP Disable */ +/** + * @} + */ +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Macros FLASH Exported Macros + * @{ + */ +/** + * @brief Calculate the FLASH Boot Base Adress (BOOT_ADD0 or BOOT_ADD1) + * @note Returned value BOOT_ADDx[15:0] corresponds to boot address [29:14]. + * @param __ADDRESS__ FLASH Boot Address (in the range 0x0000 0000 to 0x2004 FFFF with a granularity of 16KB) + * @retval The FLASH Boot Base Adress + */ +#define __HAL_FLASH_CALC_BOOT_BASE_ADR(__ADDRESS__) ((__ADDRESS__) >> 14) + /** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASHEx_Exported_Functions + * @{ + */ + +/** @addtogroup FLASHEx_Exported_Functions_Group1 + * @{ + */ +/* Extension Program operation functions *************************************/ +HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError); +HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit); +HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit); +void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup FLASHEx_Private_Macros FLASH Private Macros + * @{ + */ + +/** @defgroup FLASHEx_IS_FLASH_Definitions FLASH Private macros to check input parameters + * @{ + */ + +#define IS_FLASH_TYPEERASE(VALUE)(((VALUE) == FLASH_TYPEERASE_SECTORS) || \ + ((VALUE) == FLASH_TYPEERASE_MASSERASE)) + +#define IS_VOLTAGERANGE(RANGE)(((RANGE) == FLASH_VOLTAGE_RANGE_1) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_2) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_3) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_4)) + +#define IS_WRPSTATE(VALUE)(((VALUE) == OB_WRPSTATE_DISABLE) || \ + ((VALUE) == OB_WRPSTATE_ENABLE)) + +#if defined (FLASH_OPTCR2_PCROP) +#define IS_OPTIONBYTE(VALUE)(((VALUE) <= (OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1 |\ + OPTIONBYTE_PCROP | OPTIONBYTE_PCROP_RDP))) +#else +#define IS_OPTIONBYTE(VALUE)(((VALUE) <= (OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1))) +#endif /* FLASH_OPTCR2_PCROP */ + +#define IS_OB_BOOT_ADDRESS(ADDRESS) ((ADDRESS) <= 0x8013) + +#define IS_OB_RDP_LEVEL(LEVEL) (((LEVEL) == OB_RDP_LEVEL_0) ||\ + ((LEVEL) == OB_RDP_LEVEL_1) ||\ + ((LEVEL) == OB_RDP_LEVEL_2)) + +#define IS_OB_WWDG_SOURCE(SOURCE) (((SOURCE) == OB_WWDG_SW) || ((SOURCE) == OB_WWDG_HW)) + +#define IS_OB_IWDG_SOURCE(SOURCE) (((SOURCE) == OB_IWDG_SW) || ((SOURCE) == OB_IWDG_HW)) + +#define IS_OB_STOP_SOURCE(SOURCE) (((SOURCE) == OB_STOP_NO_RST) || ((SOURCE) == OB_STOP_RST)) + +#define IS_OB_STDBY_SOURCE(SOURCE) (((SOURCE) == OB_STDBY_NO_RST) || ((SOURCE) == OB_STDBY_RST)) + +#define IS_OB_IWDG_STOP_FREEZE(FREEZE) (((FREEZE) == OB_IWDG_STOP_FREEZE) || ((FREEZE) == OB_IWDG_STOP_ACTIVE)) + +#define IS_OB_IWDG_STDBY_FREEZE(FREEZE) (((FREEZE) == OB_IWDG_STDBY_FREEZE) || ((FREEZE) == OB_IWDG_STDBY_ACTIVE)) + +#define IS_OB_BOR_LEVEL(LEVEL) (((LEVEL) == OB_BOR_LEVEL1) || ((LEVEL) == OB_BOR_LEVEL2) ||\ + ((LEVEL) == OB_BOR_LEVEL3) || ((LEVEL) == OB_BOR_OFF)) + +#define IS_FLASH_LATENCY(LATENCY) (((LATENCY) == FLASH_LATENCY_0) || \ + ((LATENCY) == FLASH_LATENCY_1) || \ + ((LATENCY) == FLASH_LATENCY_2) || \ + ((LATENCY) == FLASH_LATENCY_3) || \ + ((LATENCY) == FLASH_LATENCY_4) || \ + ((LATENCY) == FLASH_LATENCY_5) || \ + ((LATENCY) == FLASH_LATENCY_6) || \ + ((LATENCY) == FLASH_LATENCY_7) || \ + ((LATENCY) == FLASH_LATENCY_8) || \ + ((LATENCY) == FLASH_LATENCY_9) || \ + ((LATENCY) == FLASH_LATENCY_10) || \ + ((LATENCY) == FLASH_LATENCY_11) || \ + ((LATENCY) == FLASH_LATENCY_12) || \ + ((LATENCY) == FLASH_LATENCY_13) || \ + ((LATENCY) == FLASH_LATENCY_14) || \ + ((LATENCY) == FLASH_LATENCY_15)) + +#define IS_FLASH_ADDRESS(ADDRESS) ((((ADDRESS) >= FLASH_BASE) && ((ADDRESS) <= FLASH_END)) || \ + (((ADDRESS) >= FLASH_OTP_BASE) && ((ADDRESS) <= FLASH_OTP_END))) +#define IS_FLASH_NBSECTORS(NBSECTORS) (((NBSECTORS) != 0U) && ((NBSECTORS) <= FLASH_SECTOR_TOTAL)) + +#if (FLASH_SECTOR_TOTAL == 8) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ + ((SECTOR) == FLASH_SECTOR_4) || ((SECTOR) == FLASH_SECTOR_5) ||\ + ((SECTOR) == FLASH_SECTOR_6) || ((SECTOR) == FLASH_SECTOR_7)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFF00FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 24) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ + ((SECTOR) == FLASH_SECTOR_4) || ((SECTOR) == FLASH_SECTOR_5) ||\ + ((SECTOR) == FLASH_SECTOR_6) || ((SECTOR) == FLASH_SECTOR_7) ||\ + ((SECTOR) == FLASH_SECTOR_8) || ((SECTOR) == FLASH_SECTOR_9) ||\ + ((SECTOR) == FLASH_SECTOR_10) || ((SECTOR) == FLASH_SECTOR_11) ||\ + ((SECTOR) == FLASH_SECTOR_12) || ((SECTOR) == FLASH_SECTOR_13) ||\ + ((SECTOR) == FLASH_SECTOR_14) || ((SECTOR) == FLASH_SECTOR_15) ||\ + ((SECTOR) == FLASH_SECTOR_16) || ((SECTOR) == FLASH_SECTOR_17) ||\ + ((SECTOR) == FLASH_SECTOR_18) || ((SECTOR) == FLASH_SECTOR_19) ||\ + ((SECTOR) == FLASH_SECTOR_20) || ((SECTOR) == FLASH_SECTOR_21) ||\ + ((SECTOR) == FLASH_SECTOR_22) || ((SECTOR) == FLASH_SECTOR_23)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xF000FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 24 */ + +#if (FLASH_SECTOR_TOTAL == 4) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFFF0FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +#if (FLASH_SECTOR_TOTAL == 2) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFFFCFFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 2 */ + +#if defined (FLASH_OPTCR_nDBANK) +#define IS_OB_NDBANK(VALUE) (((VALUE) == OB_NDBANK_SINGLE_BANK) || \ + ((VALUE) == OB_NDBANK_DUAL_BANK)) + +#define IS_FLASH_BANK(BANK) (((BANK) == FLASH_BANK_1) || \ + ((BANK) == FLASH_BANK_2) || \ + ((BANK) == FLASH_BANK_BOTH)) +#endif /* FLASH_OPTCR_nDBANK */ + +#if defined (FLASH_OPTCR_nDBOOT) +#define IS_OB_NDBOOT(VALUE) (((VALUE) == OB_DUAL_BOOT_DISABLE) || \ + ((VALUE) == OB_DUAL_BOOT_ENABLE)) +#endif /* FLASH_OPTCR_nDBOOT */ + +#if defined (FLASH_OPTCR2_PCROP) +#define IS_OB_PCROP_SECTOR(SECTOR) (((SECTOR) & (uint32_t)0xFFFFFF00U) == 0x00000000U) +#define IS_OB_PCROP_RDP_VALUE(VALUE) (((VALUE) == OB_PCROP_RDP_DISABLE) || \ + ((VALUE) == OB_PCROP_RDP_ENABLE)) +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASHEx_Private_Functions FLASH Private Functions + * @{ + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_FLASH_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h new file mode 100644 index 000000000..46a64983d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h @@ -0,0 +1,309 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio.h + * @author MCD Application Team + * @brief Header file of GPIO HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_GPIO_H +#define __STM32F7xx_HAL_GPIO_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup GPIO + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Types GPIO Exported Types + * @{ + */ + +/** + * @brief GPIO Init structure definition + */ +typedef struct +{ + uint32_t Pin; /*!< Specifies the GPIO pins to be configured. + This parameter can be any value of @ref GPIO_pins_define */ + + uint32_t Mode; /*!< Specifies the operating mode for the selected pins. + This parameter can be a value of @ref GPIO_mode_define */ + + uint32_t Pull; /*!< Specifies the Pull-up or Pull-Down activation for the selected pins. + This parameter can be a value of @ref GPIO_pull_define */ + + uint32_t Speed; /*!< Specifies the speed for the selected pins. + This parameter can be a value of @ref GPIO_speed_define */ + + uint32_t Alternate; /*!< Peripheral to be connected to the selected pins. + This parameter can be a value of @ref GPIO_Alternate_function_selection */ +}GPIO_InitTypeDef; + +/** + * @brief GPIO Bit SET and Bit RESET enumeration + */ +typedef enum +{ + GPIO_PIN_RESET = 0, + GPIO_PIN_SET +}GPIO_PinState; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup GPIO_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_pins_define GPIO pins define + * @{ + */ +#define GPIO_PIN_0 ((uint16_t)0x0001U) /* Pin 0 selected */ +#define GPIO_PIN_1 ((uint16_t)0x0002U) /* Pin 1 selected */ +#define GPIO_PIN_2 ((uint16_t)0x0004U) /* Pin 2 selected */ +#define GPIO_PIN_3 ((uint16_t)0x0008U) /* Pin 3 selected */ +#define GPIO_PIN_4 ((uint16_t)0x0010U) /* Pin 4 selected */ +#define GPIO_PIN_5 ((uint16_t)0x0020U) /* Pin 5 selected */ +#define GPIO_PIN_6 ((uint16_t)0x0040U) /* Pin 6 selected */ +#define GPIO_PIN_7 ((uint16_t)0x0080U) /* Pin 7 selected */ +#define GPIO_PIN_8 ((uint16_t)0x0100U) /* Pin 8 selected */ +#define GPIO_PIN_9 ((uint16_t)0x0200U) /* Pin 9 selected */ +#define GPIO_PIN_10 ((uint16_t)0x0400U) /* Pin 10 selected */ +#define GPIO_PIN_11 ((uint16_t)0x0800U) /* Pin 11 selected */ +#define GPIO_PIN_12 ((uint16_t)0x1000U) /* Pin 12 selected */ +#define GPIO_PIN_13 ((uint16_t)0x2000U) /* Pin 13 selected */ +#define GPIO_PIN_14 ((uint16_t)0x4000U) /* Pin 14 selected */ +#define GPIO_PIN_15 ((uint16_t)0x8000U) /* Pin 15 selected */ +#define GPIO_PIN_All ((uint16_t)0xFFFFU) /* All pins selected */ + +#define GPIO_PIN_MASK ((uint32_t)0x0000FFFFU) /* PIN mask for assert test */ +/** + * @} + */ + +/** @defgroup GPIO_mode_define GPIO mode define + * @brief GPIO Configuration Mode + * Elements values convention: 0xX0yz00YZ + * - X : GPIO mode or EXTI Mode + * - y : External IT or Event trigger detection + * - z : IO configuration on External IT or Event + * - Y : Output type (Push Pull or Open Drain) + * - Z : IO Direction mode (Input, Output, Alternate or Analog) + * @{ + */ +#define GPIO_MODE_INPUT ((uint32_t)0x00000000U) /*!< Input Floating Mode */ +#define GPIO_MODE_OUTPUT_PP ((uint32_t)0x00000001U) /*!< Output Push Pull Mode */ +#define GPIO_MODE_OUTPUT_OD ((uint32_t)0x00000011U) /*!< Output Open Drain Mode */ +#define GPIO_MODE_AF_PP ((uint32_t)0x00000002U) /*!< Alternate Function Push Pull Mode */ +#define GPIO_MODE_AF_OD ((uint32_t)0x00000012U) /*!< Alternate Function Open Drain Mode */ + +#define GPIO_MODE_ANALOG ((uint32_t)0x00000003U) /*!< Analog Mode */ + +#define GPIO_MODE_IT_RISING ((uint32_t)0x10110000U) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define GPIO_MODE_IT_FALLING ((uint32_t)0x10210000U) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define GPIO_MODE_IT_RISING_FALLING ((uint32_t)0x10310000U) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ + +#define GPIO_MODE_EVT_RISING ((uint32_t)0x10120000U) /*!< External Event Mode with Rising edge trigger detection */ +#define GPIO_MODE_EVT_FALLING ((uint32_t)0x10220000U) /*!< External Event Mode with Falling edge trigger detection */ +#define GPIO_MODE_EVT_RISING_FALLING ((uint32_t)0x10320000U) /*!< External Event Mode with Rising/Falling edge trigger detection */ +/** + * @} + */ + +/** @defgroup GPIO_speed_define GPIO speed define + * @brief GPIO Output Maximum frequency + * @{ + */ +#define GPIO_SPEED_FREQ_LOW ((uint32_t)0x00000000U) /*!< Low speed */ +#define GPIO_SPEED_FREQ_MEDIUM ((uint32_t)0x00000001U) /*!< Medium speed */ +#define GPIO_SPEED_FREQ_HIGH ((uint32_t)0x00000002U) /*!< Fast speed */ +#define GPIO_SPEED_FREQ_VERY_HIGH ((uint32_t)0x00000003U) /*!< High speed */ +/** + * @} + */ + + /** @defgroup GPIO_pull_define GPIO pull define + * @brief GPIO Pull-Up or Pull-Down Activation + * @{ + */ +#define GPIO_NOPULL ((uint32_t)0x00000000U) /*!< No Pull-up or Pull-down activation */ +#define GPIO_PULLUP ((uint32_t)0x00000001U) /*!< Pull-up activation */ +#define GPIO_PULLDOWN ((uint32_t)0x00000002U) /*!< Pull-down activation */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Macros GPIO Exported Macros + * @{ + */ + +/** + * @brief Checks whether the specified EXTI line flag is set or not. + * @param __EXTI_LINE__ specifies the EXTI line flag to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending flags. + * @param __EXTI_LINE__ specifies the EXTI lines flags to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Checks whether the specified EXTI line is asserted or not. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_IT(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending bits. + * @param __EXTI_LINE__ specifies the EXTI lines to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_IT(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Generates a Software interrupt on selected EXTI line. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) +/** + * @} + */ + +/* Include GPIO HAL Extension module */ +#include "stm32f7xx_hal_gpio_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup GPIO_Exported_Functions + * @{ + */ + +/** @addtogroup GPIO_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init); +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin); +/** + * @} + */ + +/** @addtogroup GPIO_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *****************************************************/ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState); +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIO_Private_Macros GPIO Private Macros + * @{ + */ +#define IS_GPIO_PIN_ACTION(ACTION) (((ACTION) == GPIO_PIN_RESET) || ((ACTION) == GPIO_PIN_SET)) +#define IS_GPIO_PIN(__PIN__) ((((uint32_t)(__PIN__) & GPIO_PIN_MASK) != 0x00U)) +#define IS_GPIO_MODE(MODE) (((MODE) == GPIO_MODE_INPUT) ||\ + ((MODE) == GPIO_MODE_OUTPUT_PP) ||\ + ((MODE) == GPIO_MODE_OUTPUT_OD) ||\ + ((MODE) == GPIO_MODE_AF_PP) ||\ + ((MODE) == GPIO_MODE_AF_OD) ||\ + ((MODE) == GPIO_MODE_IT_RISING) ||\ + ((MODE) == GPIO_MODE_IT_FALLING) ||\ + ((MODE) == GPIO_MODE_IT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING) ||\ + ((MODE) == GPIO_MODE_EVT_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_ANALOG)) +#define IS_GPIO_SPEED(SPEED) (((SPEED) == GPIO_SPEED_LOW) || ((SPEED) == GPIO_SPEED_MEDIUM) || \ + ((SPEED) == GPIO_SPEED_FAST) || ((SPEED) == GPIO_SPEED_HIGH)) +#define IS_GPIO_PULL(PULL) (((PULL) == GPIO_NOPULL) || ((PULL) == GPIO_PULLUP) || \ + ((PULL) == GPIO_PULLDOWN)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_GPIO_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h new file mode 100644 index 000000000..f93badbdd --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h @@ -0,0 +1,658 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio_ex.h + * @author MCD Application Team + * @brief Header file of GPIO HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_GPIO_EX_H +#define __STM32F7xx_HAL_GPIO_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIOEx GPIOEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_Alternate_function_selection GPIO Alternate Function Selection + * @{ + */ +/*--------------- STM32F74xxx/STM32F75xxx/STM32F76xxx/STM32F77xxx -------------*/ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) ||\ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00U) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00U) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00U) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00U) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01U) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01U) /* TIM2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF1_UART5 ((uint8_t)0x01U) /* UART5 Alternate Function mapping */ +#define GPIO_AF1_I2C4 ((uint8_t)0x01U) /* I2C4 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02U) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02U) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02U) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03U) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03U) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03U) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03U) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_LPTIM1 ((uint8_t)0x03U) /* LPTIM1 Alternate Function mapping */ +#define GPIO_AF3_CEC ((uint8_t)0x03U) /* CEC Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF3_DFSDM1 ((uint8_t)0x03U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04U) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04U) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04U) /* I2C3 Alternate Function mapping */ +#define GPIO_AF4_I2C4 ((uint8_t)0x04U) /* I2C4 Alternate Function mapping */ +#define GPIO_AF4_CEC ((uint8_t)0x04U) /* CEC Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF4_USART1 ((uint8_t)0x04) /* USART1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05U) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05U) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05U) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05U) /* SPI5 Alternate Function mapping */ +#define GPIO_AF5_SPI6 ((uint8_t)0x05U) /* SPI6 Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06U) /* SAI1 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF6_UART4 ((uint8_t)0x06U) /* UART4 Alternate Function mapping */ +#define GPIO_AF6_DFSDM1 ((uint8_t)0x06U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07U) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07U) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07U) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_UART5 ((uint8_t)0x07U) /* UART5 Alternate Function mapping */ +#define GPIO_AF7_SPDIFRX ((uint8_t)0x07U) /* SPDIF-RX Alternate Function mapping */ +#define GPIO_AF7_SPI2 ((uint8_t)0x07U) /* SPI2 Alternate Function mapping */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07U) /* SPI3 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF7_SPI6 ((uint8_t)0x07U) /* SPI6 Alternate Function mapping */ +#define GPIO_AF7_DFSDM1 ((uint8_t)0x07U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08U) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08U) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08U) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08U) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08U) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_SPDIFRX ((uint8_t)0x08U) /* SPIDIF-RX Alternate Function mapping */ +#define GPIO_AF8_SAI2 ((uint8_t)0x08U) /* SAI2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF8_SPI6 ((uint8_t)0x08U) /* SPI6 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09U) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09U) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09U) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09U) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09U) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_QUADSPI ((uint8_t)0x09U) /* QUADSPI Alternate Function mapping */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF9_LTDC ((uint8_t)0x09U) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F765xx) || defined(STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF9_FMC ((uint8_t)0x09U) /* FMC Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0xAU) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0xAU) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_QUADSPI ((uint8_t)0xAU) /* QUADSPI Alternate Function mapping */ +#define GPIO_AF10_SAI2 ((uint8_t)0xAU) /* SAI2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF10_DFSDM1 ((uint8_t)0x0AU) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF10_SDMMC2 ((uint8_t)0x0AU) /* SDMMC2 Alternate Function mapping */ +#define GPIO_AF10_LTDC ((uint8_t)0x0AU) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0BU) /* ETHERNET Alternate Function mapping */ +#if defined(STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define GPIO_AF11_CAN3 ((uint8_t)0x0BU) /* CAN3 Alternate Function mapping */ +#define GPIO_AF11_SDMMC2 ((uint8_t)0x0BU) /* SDMMC2 Alternate Function mapping */ +#define GPIO_AF11_I2C4 ((uint8_t)0x0BU) /* I2C4 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0xCU) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0xCU) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDMMC1 ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ +#if defined(STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define GPIO_AF12_MDIOS ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ +#define GPIO_AF12_UART7 ((uint8_t)0xCU) /* UART7 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0DU) /* DCMI Alternate Function mapping */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define GPIO_AF13_DSI ((uint8_t)0x0DU) /* DSI Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF13_LTDC ((uint8_t)0x0DU) /* LTDC Alternate Function mapping */ + +/** + * @brief AF 14 selection + */ +#define GPIO_AF14_LTDC ((uint8_t)0x0EU) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0FU) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------- STM32F72xxx/STM32F73xxx -----------------------*/ +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F732xx) || defined(STM32F733xx) || defined(STM32F730xx) + /** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00U) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00U) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00U) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00U) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01U) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01U) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02U) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02U) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02U) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03U) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03U) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03U) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03U) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_LPTIM1 ((uint8_t)0x03U) /* LPTIM1 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04U) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04U) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04U) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05U) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05U) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05U) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05U) /* SPI5 Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06U) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07U) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07U) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07U) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_UART5 ((uint8_t)0x07U) /* UART5 Alternate Function mapping */ +#define GPIO_AF7_SPI2 ((uint8_t)0x07U) /* SPI2 Alternate Function mapping */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07U) /* SPI3 Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08U) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08U) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08U) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08U) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08U) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_SAI2 ((uint8_t)0x08U) /* SAI2 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09U) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09U) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09U) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09U) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_QUADSPI ((uint8_t)0x09U) /* QUADSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0xAU) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0xAU) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_QUADSPI ((uint8_t)0xAU) /* QUADSPI Alternate Function mapping */ +#define GPIO_AF10_SAI2 ((uint8_t)0xAU) /* SAI2 Alternate Function mapping */ +#define GPIO_AF10_SDMMC2 ((uint8_t)0x0AU) /* SDMMC2 Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_SDMMC2 ((uint8_t)0x0BU) /* SDMMC2 Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0xCU) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0xCU) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDMMC1 ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_RNG ((uint8_t)0x0DU) /* RNG Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0FU) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ +/*----------------------------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Macros GPIO Exported Macros + * @{ + */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Functions GPIO Exported Functions + * @{ + */ +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Constants GPIO Private Constants + * @{ + */ + +/** + * @brief GPIO pin available on the platform + */ +/* Defines the available pins per GPIOs */ +#define GPIOA_PIN_AVAILABLE GPIO_PIN_All +#define GPIOB_PIN_AVAILABLE GPIO_PIN_All +#define GPIOC_PIN_AVAILABLE GPIO_PIN_All +#define GPIOD_PIN_AVAILABLE GPIO_PIN_All +#define GPIOE_PIN_AVAILABLE GPIO_PIN_All +#define GPIOF_PIN_AVAILABLE GPIO_PIN_All +#define GPIOG_PIN_AVAILABLE GPIO_PIN_All +#define GPIOI_PIN_AVAILABLE GPIO_PIN_All +#define GPIOJ_PIN_AVAILABLE GPIO_PIN_All +#define GPIOH_PIN_AVAILABLE GPIO_PIN_All +#define GPIOK_PIN_AVAILABLE (GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | \ + GPIO_PIN_5 | GPIO_PIN_6 | GPIO_PIN_7) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Macros GPIO Private Macros + * @{ + */ +/** @defgroup GPIOEx_Get_Port_Index GPIO Get Port Index + * @{ + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U :\ + ((__GPIOx__) == (GPIOI))? 8U :\ + ((__GPIOx__) == (GPIOJ))? 9U : 10U) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U : 8U) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ +/** + * @} + */ + +#define IS_GPIO_PIN_AVAILABLE(__INSTANCE__,__PIN__) \ + ((((__INSTANCE__) == GPIOA) && (((__PIN__) & (GPIOA_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOA_PIN_AVAILABLE)) == (GPIOA_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOB) && (((__PIN__) & (GPIOB_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOB_PIN_AVAILABLE)) == (GPIOB_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOC) && (((__PIN__) & (GPIOC_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOC_PIN_AVAILABLE)) == (GPIOC_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOD) && (((__PIN__) & (GPIOD_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOD_PIN_AVAILABLE)) == (GPIOD_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOE) && (((__PIN__) & (GPIOE_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOE_PIN_AVAILABLE)) == (GPIOE_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOF) && (((__PIN__) & (GPIOF_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOF_PIN_AVAILABLE)) == (GPIOF_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOG) && (((__PIN__) & (GPIOG_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOG_PIN_AVAILABLE)) == (GPIOG_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOI) && (((__PIN__) & (GPIOI_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOI_PIN_AVAILABLE)) == (GPIOI_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOJ) && (((__PIN__) & (GPIOJ_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOJ_PIN_AVAILABLE)) == (GPIOJ_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOK) && (((__PIN__) & (GPIOK_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOK_PIN_AVAILABLE)) == (GPIOK_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOH) && (((__PIN__) & (GPIOH_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOH_PIN_AVAILABLE)) == (GPIOH_PIN_AVAILABLE)))) +/** @defgroup GPIOEx_IS_Alternat_function_selection GPIO Check Alternate Function + * @{ + */ +#if defined(STM32F756xx) || defined(STM32F746xx) || defined(STM32F750xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF9_LTDC) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT) || \ + ((AF) == GPIO_AF13_DCMI) || ((AF) == GPIO_AF14_LTDC)) +#elif defined(STM32F745xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF13_DCMI) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT)) +#elif defined(STM32F767xx) || defined(STM32F777xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF9_LTDC) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF14_LTDC)) +#elif defined(STM32F769xx) || defined(STM32F779xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF9_LTDC) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF14_LTDC) || ((AF) == GPIO_AF13_DSI)) +#elif defined(STM32F765xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF10_OTG_FS)) +#elif defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF5_SPI3) || \ + ((AF) == GPIO_AF5_SPI4) || ((AF) == GPIO_AF5_SPI5) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF9_QUADSPI) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF10_SAI2) || ((AF) == GPIO_AF10_QUADSPI) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT) || \ + ((AF) == GPIO_AF10_OTG_FS)) +#endif /* STM32F756xx || STM32F746xx || STM32F750xx */ +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_GPIO_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h new file mode 100644 index 000000000..172284340 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h @@ -0,0 +1,782 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c.h + * @author MCD Application Team + * @brief Header file of I2C HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_I2C_H +#define STM32F7xx_HAL_I2C_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup I2C + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup I2C_Exported_Types I2C Exported Types + * @{ + */ + +/** @defgroup I2C_Configuration_Structure_definition I2C Configuration Structure definition + * @brief I2C Configuration Structure definition + * @{ + */ +typedef struct +{ + uint32_t Timing; /*!< Specifies the I2C_TIMINGR_register value. + This parameter calculated by referring to I2C initialization + section in Reference manual */ + + uint32_t OwnAddress1; /*!< Specifies the first device own address. + This parameter can be a 7-bit or 10-bit address. */ + + uint32_t AddressingMode; /*!< Specifies if 7-bit or 10-bit addressing mode is selected. + This parameter can be a value of @ref I2C_ADDRESSING_MODE */ + + uint32_t DualAddressMode; /*!< Specifies if dual addressing mode is selected. + This parameter can be a value of @ref I2C_DUAL_ADDRESSING_MODE */ + + uint32_t OwnAddress2; /*!< Specifies the second device own address if dual addressing mode is selected + This parameter can be a 7-bit address. */ + + uint32_t OwnAddress2Masks; /*!< Specifies the acknowledge mask address second device own address if dual addressing mode is selected + This parameter can be a value of @ref I2C_OWN_ADDRESS2_MASKS */ + + uint32_t GeneralCallMode; /*!< Specifies if general call mode is selected. + This parameter can be a value of @ref I2C_GENERAL_CALL_ADDRESSING_MODE */ + + uint32_t NoStretchMode; /*!< Specifies if nostretch mode is selected. + This parameter can be a value of @ref I2C_NOSTRETCH_MODE */ + +} I2C_InitTypeDef; + +/** + * @} + */ + +/** @defgroup HAL_state_structure_definition HAL state structure definition + * @brief HAL State structure definition + * @note HAL I2C State value coding follow below described bitmap :\n + * b7-b6 Error information\n + * 00 : No Error\n + * 01 : Abort (Abort user request on going)\n + * 10 : Timeout\n + * 11 : Error\n + * b5 Peripheral initialization status\n + * 0 : Reset (peripheral not initialized)\n + * 1 : Init done (peripheral initialized and ready to use. HAL I2C Init function called)\n + * b4 (not used)\n + * x : Should be set to 0\n + * b3\n + * 0 : Ready or Busy (No Listen mode ongoing)\n + * 1 : Listen (peripheral in Address Listen Mode)\n + * b2 Intrinsic process state\n + * 0 : Ready\n + * 1 : Busy (peripheral busy with some configuration or internal operations)\n + * b1 Rx state\n + * 0 : Ready (no Rx operation ongoing)\n + * 1 : Busy (Rx operation ongoing)\n + * b0 Tx state\n + * 0 : Ready (no Tx operation ongoing)\n + * 1 : Busy (Tx operation ongoing) + * @{ + */ +typedef enum +{ + HAL_I2C_STATE_RESET = 0x00U, /*!< Peripheral is not yet Initialized */ + HAL_I2C_STATE_READY = 0x20U, /*!< Peripheral Initialized and ready for use */ + HAL_I2C_STATE_BUSY = 0x24U, /*!< An internal process is ongoing */ + HAL_I2C_STATE_BUSY_TX = 0x21U, /*!< Data Transmission process is ongoing */ + HAL_I2C_STATE_BUSY_RX = 0x22U, /*!< Data Reception process is ongoing */ + HAL_I2C_STATE_LISTEN = 0x28U, /*!< Address Listen Mode is ongoing */ + HAL_I2C_STATE_BUSY_TX_LISTEN = 0x29U, /*!< Address Listen Mode and Data Transmission + process is ongoing */ + HAL_I2C_STATE_BUSY_RX_LISTEN = 0x2AU, /*!< Address Listen Mode and Data Reception + process is ongoing */ + HAL_I2C_STATE_ABORT = 0x60U, /*!< Abort user request ongoing */ + HAL_I2C_STATE_TIMEOUT = 0xA0U, /*!< Timeout state */ + HAL_I2C_STATE_ERROR = 0xE0U /*!< Error */ + +} HAL_I2C_StateTypeDef; + +/** + * @} + */ + +/** @defgroup HAL_mode_structure_definition HAL mode structure definition + * @brief HAL Mode structure definition + * @note HAL I2C Mode value coding follow below described bitmap :\n + * b7 (not used)\n + * x : Should be set to 0\n + * b6\n + * 0 : None\n + * 1 : Memory (HAL I2C communication is in Memory Mode)\n + * b5\n + * 0 : None\n + * 1 : Slave (HAL I2C communication is in Slave Mode)\n + * b4\n + * 0 : None\n + * 1 : Master (HAL I2C communication is in Master Mode)\n + * b3-b2-b1-b0 (not used)\n + * xxxx : Should be set to 0000 + * @{ + */ +typedef enum +{ + HAL_I2C_MODE_NONE = 0x00U, /*!< No I2C communication on going */ + HAL_I2C_MODE_MASTER = 0x10U, /*!< I2C communication is in Master Mode */ + HAL_I2C_MODE_SLAVE = 0x20U, /*!< I2C communication is in Slave Mode */ + HAL_I2C_MODE_MEM = 0x40U /*!< I2C communication is in Memory Mode */ + +} HAL_I2C_ModeTypeDef; + +/** + * @} + */ + +/** @defgroup I2C_Error_Code_definition I2C Error Code definition + * @brief I2C Error Code definition + * @{ + */ +#define HAL_I2C_ERROR_NONE (0x00000000U) /*!< No error */ +#define HAL_I2C_ERROR_BERR (0x00000001U) /*!< BERR error */ +#define HAL_I2C_ERROR_ARLO (0x00000002U) /*!< ARLO error */ +#define HAL_I2C_ERROR_AF (0x00000004U) /*!< ACKF error */ +#define HAL_I2C_ERROR_OVR (0x00000008U) /*!< OVR error */ +#define HAL_I2C_ERROR_DMA (0x00000010U) /*!< DMA transfer error */ +#define HAL_I2C_ERROR_TIMEOUT (0x00000020U) /*!< Timeout error */ +#define HAL_I2C_ERROR_SIZE (0x00000040U) /*!< Size Management error */ +#define HAL_I2C_ERROR_DMA_PARAM (0x00000080U) /*!< DMA Parameter Error */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +#define HAL_I2C_ERROR_INVALID_CALLBACK (0x00000100U) /*!< Invalid Callback error */ +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +#define HAL_I2C_ERROR_INVALID_PARAM (0x00000200U) /*!< Invalid Parameters error */ +/** + * @} + */ + +/** @defgroup I2C_handle_Structure_definition I2C handle Structure definition + * @brief I2C handle Structure definition + * @{ + */ +typedef struct __I2C_HandleTypeDef +{ + I2C_TypeDef *Instance; /*!< I2C registers base address */ + + I2C_InitTypeDef Init; /*!< I2C communication parameters */ + + uint8_t *pBuffPtr; /*!< Pointer to I2C transfer buffer */ + + uint16_t XferSize; /*!< I2C transfer size */ + + __IO uint16_t XferCount; /*!< I2C transfer counter */ + + __IO uint32_t XferOptions; /*!< I2C sequantial transfer options, this parameter can + be a value of @ref I2C_XFEROPTIONS */ + + __IO uint32_t PreviousState; /*!< I2C communication Previous state */ + + HAL_StatusTypeDef(*XferISR)(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); /*!< I2C transfer IRQ handler function pointer */ + + DMA_HandleTypeDef *hdmatx; /*!< I2C Tx DMA handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< I2C Rx DMA handle parameters */ + + HAL_LockTypeDef Lock; /*!< I2C locking object */ + + __IO HAL_I2C_StateTypeDef State; /*!< I2C communication state */ + + __IO HAL_I2C_ModeTypeDef Mode; /*!< I2C communication mode */ + + __IO uint32_t ErrorCode; /*!< I2C Error code */ + + __IO uint32_t AddrEventCount; /*!< I2C Address Event counter */ + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + void (* MasterTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Master Tx Transfer completed callback */ + void (* MasterRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Master Rx Transfer completed callback */ + void (* SlaveTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Slave Tx Transfer completed callback */ + void (* SlaveRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Slave Rx Transfer completed callback */ + void (* ListenCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Listen Complete callback */ + void (* MemTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Memory Tx Transfer completed callback */ + void (* MemRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Memory Rx Transfer completed callback */ + void (* ErrorCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Error callback */ + void (* AbortCpltCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Abort callback */ + + void (* AddrCallback)(struct __I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode); /*!< I2C Slave Address Match callback */ + + void (* MspInitCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Msp Init callback */ + void (* MspDeInitCallback)(struct __I2C_HandleTypeDef *hi2c); /*!< I2C Msp DeInit callback */ + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +} I2C_HandleTypeDef; + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +/** + * @brief HAL I2C Callback ID enumeration definition + */ +typedef enum +{ + HAL_I2C_MASTER_TX_COMPLETE_CB_ID = 0x00U, /*!< I2C Master Tx Transfer completed callback ID */ + HAL_I2C_MASTER_RX_COMPLETE_CB_ID = 0x01U, /*!< I2C Master Rx Transfer completed callback ID */ + HAL_I2C_SLAVE_TX_COMPLETE_CB_ID = 0x02U, /*!< I2C Slave Tx Transfer completed callback ID */ + HAL_I2C_SLAVE_RX_COMPLETE_CB_ID = 0x03U, /*!< I2C Slave Rx Transfer completed callback ID */ + HAL_I2C_LISTEN_COMPLETE_CB_ID = 0x04U, /*!< I2C Listen Complete callback ID */ + HAL_I2C_MEM_TX_COMPLETE_CB_ID = 0x05U, /*!< I2C Memory Tx Transfer callback ID */ + HAL_I2C_MEM_RX_COMPLETE_CB_ID = 0x06U, /*!< I2C Memory Rx Transfer completed callback ID */ + HAL_I2C_ERROR_CB_ID = 0x07U, /*!< I2C Error callback ID */ + HAL_I2C_ABORT_CB_ID = 0x08U, /*!< I2C Abort callback ID */ + + HAL_I2C_MSPINIT_CB_ID = 0x09U, /*!< I2C Msp Init callback ID */ + HAL_I2C_MSPDEINIT_CB_ID = 0x0AU /*!< I2C Msp DeInit callback ID */ + +} HAL_I2C_CallbackIDTypeDef; + +/** + * @brief HAL I2C Callback pointer definition + */ +typedef void (*pI2C_CallbackTypeDef)(I2C_HandleTypeDef *hi2c); /*!< pointer to an I2C callback function */ +typedef void (*pI2C_AddrCallbackTypeDef)(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode); /*!< pointer to an I2C Address Match callback function */ + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** + * @} + */ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Constants I2C Exported Constants + * @{ + */ + +/** @defgroup I2C_XFEROPTIONS I2C Sequential Transfer Options + * @{ + */ +#define I2C_FIRST_FRAME ((uint32_t)I2C_SOFTEND_MODE) +#define I2C_FIRST_AND_NEXT_FRAME ((uint32_t)(I2C_RELOAD_MODE | I2C_SOFTEND_MODE)) +#define I2C_NEXT_FRAME ((uint32_t)(I2C_RELOAD_MODE | I2C_SOFTEND_MODE)) +#define I2C_FIRST_AND_LAST_FRAME ((uint32_t)I2C_AUTOEND_MODE) +#define I2C_LAST_FRAME ((uint32_t)I2C_AUTOEND_MODE) +#define I2C_LAST_FRAME_NO_STOP ((uint32_t)I2C_SOFTEND_MODE) + +/* List of XferOptions in usage of : + * 1- Restart condition in all use cases (direction change or not) + */ +#define I2C_OTHER_FRAME (0x000000AAU) +#define I2C_OTHER_AND_LAST_FRAME (0x0000AA00U) +/** + * @} + */ + +/** @defgroup I2C_ADDRESSING_MODE I2C Addressing Mode + * @{ + */ +#define I2C_ADDRESSINGMODE_7BIT (0x00000001U) +#define I2C_ADDRESSINGMODE_10BIT (0x00000002U) +/** + * @} + */ + +/** @defgroup I2C_DUAL_ADDRESSING_MODE I2C Dual Addressing Mode + * @{ + */ +#define I2C_DUALADDRESS_DISABLE (0x00000000U) +#define I2C_DUALADDRESS_ENABLE I2C_OAR2_OA2EN +/** + * @} + */ + +/** @defgroup I2C_OWN_ADDRESS2_MASKS I2C Own Address2 Masks + * @{ + */ +#define I2C_OA2_NOMASK ((uint8_t)0x00U) +#define I2C_OA2_MASK01 ((uint8_t)0x01U) +#define I2C_OA2_MASK02 ((uint8_t)0x02U) +#define I2C_OA2_MASK03 ((uint8_t)0x03U) +#define I2C_OA2_MASK04 ((uint8_t)0x04U) +#define I2C_OA2_MASK05 ((uint8_t)0x05U) +#define I2C_OA2_MASK06 ((uint8_t)0x06U) +#define I2C_OA2_MASK07 ((uint8_t)0x07U) +/** + * @} + */ + +/** @defgroup I2C_GENERAL_CALL_ADDRESSING_MODE I2C General Call Addressing Mode + * @{ + */ +#define I2C_GENERALCALL_DISABLE (0x00000000U) +#define I2C_GENERALCALL_ENABLE I2C_CR1_GCEN +/** + * @} + */ + +/** @defgroup I2C_NOSTRETCH_MODE I2C No-Stretch Mode + * @{ + */ +#define I2C_NOSTRETCH_DISABLE (0x00000000U) +#define I2C_NOSTRETCH_ENABLE I2C_CR1_NOSTRETCH +/** + * @} + */ + +/** @defgroup I2C_MEMORY_ADDRESS_SIZE I2C Memory Address Size + * @{ + */ +#define I2C_MEMADD_SIZE_8BIT (0x00000001U) +#define I2C_MEMADD_SIZE_16BIT (0x00000002U) +/** + * @} + */ + +/** @defgroup I2C_XFERDIRECTION I2C Transfer Direction Master Point of View + * @{ + */ +#define I2C_DIRECTION_TRANSMIT (0x00000000U) +#define I2C_DIRECTION_RECEIVE (0x00000001U) +/** + * @} + */ + +/** @defgroup I2C_RELOAD_END_MODE I2C Reload End Mode + * @{ + */ +#define I2C_RELOAD_MODE I2C_CR2_RELOAD +#define I2C_AUTOEND_MODE I2C_CR2_AUTOEND +#define I2C_SOFTEND_MODE (0x00000000U) +/** + * @} + */ + +/** @defgroup I2C_START_STOP_MODE I2C Start or Stop Mode + * @{ + */ +#define I2C_NO_STARTSTOP (0x00000000U) +#define I2C_GENERATE_STOP (uint32_t)(0x80000000U | I2C_CR2_STOP) +#define I2C_GENERATE_START_READ (uint32_t)(0x80000000U | I2C_CR2_START | I2C_CR2_RD_WRN) +#define I2C_GENERATE_START_WRITE (uint32_t)(0x80000000U | I2C_CR2_START) +/** + * @} + */ + +/** @defgroup I2C_Interrupt_configuration_definition I2C Interrupt configuration definition + * @brief I2C Interrupt definition + * Elements values convention: 0xXXXXXXXX + * - XXXXXXXX : Interrupt control mask + * @{ + */ +#define I2C_IT_ERRI I2C_CR1_ERRIE +#define I2C_IT_TCI I2C_CR1_TCIE +#define I2C_IT_STOPI I2C_CR1_STOPIE +#define I2C_IT_NACKI I2C_CR1_NACKIE +#define I2C_IT_ADDRI I2C_CR1_ADDRIE +#define I2C_IT_RXI I2C_CR1_RXIE +#define I2C_IT_TXI I2C_CR1_TXIE +/** + * @} + */ + +/** @defgroup I2C_Flag_definition I2C Flag definition + * @{ + */ +#define I2C_FLAG_TXE I2C_ISR_TXE +#define I2C_FLAG_TXIS I2C_ISR_TXIS +#define I2C_FLAG_RXNE I2C_ISR_RXNE +#define I2C_FLAG_ADDR I2C_ISR_ADDR +#define I2C_FLAG_AF I2C_ISR_NACKF +#define I2C_FLAG_STOPF I2C_ISR_STOPF +#define I2C_FLAG_TC I2C_ISR_TC +#define I2C_FLAG_TCR I2C_ISR_TCR +#define I2C_FLAG_BERR I2C_ISR_BERR +#define I2C_FLAG_ARLO I2C_ISR_ARLO +#define I2C_FLAG_OVR I2C_ISR_OVR +#define I2C_FLAG_PECERR I2C_ISR_PECERR +#define I2C_FLAG_TIMEOUT I2C_ISR_TIMEOUT +#define I2C_FLAG_ALERT I2C_ISR_ALERT +#define I2C_FLAG_BUSY I2C_ISR_BUSY +#define I2C_FLAG_DIR I2C_ISR_DIR +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Macros I2C Exported Macros + * @{ + */ + +/** @brief Reset I2C handle state. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +#define __HAL_I2C_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_I2C_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_I2C_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_I2C_STATE_RESET) +#endif + +/** @brief Enable the specified I2C interrupt. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the interrupt source to enable. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval None + */ +#define __HAL_I2C_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR1 |= (__INTERRUPT__)) + +/** @brief Disable the specified I2C interrupt. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the interrupt source to disable. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval None + */ +#define __HAL_I2C_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR1 &= (~(__INTERRUPT__))) + +/** @brief Check whether the specified I2C interrupt source is enabled or not. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the I2C interrupt source to check. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_I2C_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->CR1 & (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Check whether the specified I2C flag is set or not. + * @param __HANDLE__ specifies the I2C Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg @ref I2C_FLAG_TXE Transmit data register empty + * @arg @ref I2C_FLAG_TXIS Transmit interrupt status + * @arg @ref I2C_FLAG_RXNE Receive data register not empty + * @arg @ref I2C_FLAG_ADDR Address matched (slave mode) + * @arg @ref I2C_FLAG_AF Acknowledge failure received flag + * @arg @ref I2C_FLAG_STOPF STOP detection flag + * @arg @ref I2C_FLAG_TC Transfer complete (master mode) + * @arg @ref I2C_FLAG_TCR Transfer complete reload + * @arg @ref I2C_FLAG_BERR Bus error + * @arg @ref I2C_FLAG_ARLO Arbitration lost + * @arg @ref I2C_FLAG_OVR Overrun/Underrun + * @arg @ref I2C_FLAG_PECERR PEC error in reception + * @arg @ref I2C_FLAG_TIMEOUT Timeout or Tlow detection flag + * @arg @ref I2C_FLAG_ALERT SMBus alert + * @arg @ref I2C_FLAG_BUSY Bus busy + * @arg @ref I2C_FLAG_DIR Transfer direction (slave mode) + * + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define I2C_FLAG_MASK (0x0001FFFFU) +#define __HAL_I2C_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) == (__FLAG__)) ? SET : RESET) + +/** @brief Clear the I2C pending flags which are cleared by writing 1 in a specific bit. + * @param __HANDLE__ specifies the I2C Handle. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg @ref I2C_FLAG_TXE Transmit data register empty + * @arg @ref I2C_FLAG_ADDR Address matched (slave mode) + * @arg @ref I2C_FLAG_AF Acknowledge failure received flag + * @arg @ref I2C_FLAG_STOPF STOP detection flag + * @arg @ref I2C_FLAG_BERR Bus error + * @arg @ref I2C_FLAG_ARLO Arbitration lost + * @arg @ref I2C_FLAG_OVR Overrun/Underrun + * @arg @ref I2C_FLAG_PECERR PEC error in reception + * @arg @ref I2C_FLAG_TIMEOUT Timeout or Tlow detection flag + * @arg @ref I2C_FLAG_ALERT SMBus alert + * + * @retval None + */ +#define __HAL_I2C_CLEAR_FLAG(__HANDLE__, __FLAG__) (((__FLAG__) == I2C_FLAG_TXE) ? ((__HANDLE__)->Instance->ISR |= (__FLAG__)) \ + : ((__HANDLE__)->Instance->ICR = (__FLAG__))) + +/** @brief Enable the specified I2C peripheral. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_ENABLE(__HANDLE__) (SET_BIT((__HANDLE__)->Instance->CR1, I2C_CR1_PE)) + +/** @brief Disable the specified I2C peripheral. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_DISABLE(__HANDLE__) (CLEAR_BIT((__HANDLE__)->Instance->CR1, I2C_CR1_PE)) + +/** @brief Generate a Non-Acknowledge I2C peripheral in Slave mode. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_GENERATE_NACK(__HANDLE__) (SET_BIT((__HANDLE__)->Instance->CR2, I2C_CR2_NACK)) +/** + * @} + */ + +/* Include I2C HAL Extended module */ +#include "stm32f7xx_hal_i2c_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup I2C_Exported_Functions + * @{ + */ + +/** @addtogroup I2C_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +/* Initialization and de-initialization functions******************************/ +HAL_StatusTypeDef HAL_I2C_Init(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_DeInit(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MspInit(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MspDeInit(I2C_HandleTypeDef *hi2c); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_I2C_RegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID, pI2C_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_I2C_UnRegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_I2C_RegisterAddrCallback(I2C_HandleTypeDef *hi2c, pI2C_AddrCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_I2C_UnRegisterAddrCallback(I2C_HandleTypeDef *hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup I2C_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +/* IO operation functions ****************************************************/ +/******* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Master_Receive(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Slave_Receive(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Mem_Write(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_IsDeviceReady(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint32_t Trials, uint32_t Timeout); + +/******* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Write_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Read_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size); + +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_EnableListen_IT(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_DisableListen_IT(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_Master_Abort_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress); + +/******* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size); + +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions); +/** + * @} + */ + +/** @addtogroup I2C_IRQ_Handler_and_Callbacks IRQ Handler and Callbacks + * @{ + */ +/******* I2C IRQHandler and Callbacks used in non blocking modes (Interrupt and DMA) */ +void HAL_I2C_EV_IRQHandler(I2C_HandleTypeDef *hi2c); +void HAL_I2C_ER_IRQHandler(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MasterTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MasterRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_SlaveTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_SlaveRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_AddrCallback(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode); +void HAL_I2C_ListenCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MemTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MemRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_ErrorCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_AbortCpltCallback(I2C_HandleTypeDef *hi2c); +/** + * @} + */ + +/** @addtogroup I2C_Exported_Functions_Group3 Peripheral State, Mode and Error functions + * @{ + */ +/* Peripheral State, Mode and Error functions *********************************/ +HAL_I2C_StateTypeDef HAL_I2C_GetState(I2C_HandleTypeDef *hi2c); +HAL_I2C_ModeTypeDef HAL_I2C_GetMode(I2C_HandleTypeDef *hi2c); +uint32_t HAL_I2C_GetError(I2C_HandleTypeDef *hi2c); + +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup I2C_Private_Constants I2C Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup I2C_Private_Macro I2C Private Macros + * @{ + */ + +#define IS_I2C_ADDRESSING_MODE(MODE) (((MODE) == I2C_ADDRESSINGMODE_7BIT) || \ + ((MODE) == I2C_ADDRESSINGMODE_10BIT)) + +#define IS_I2C_DUAL_ADDRESS(ADDRESS) (((ADDRESS) == I2C_DUALADDRESS_DISABLE) || \ + ((ADDRESS) == I2C_DUALADDRESS_ENABLE)) + +#define IS_I2C_OWN_ADDRESS2_MASK(MASK) (((MASK) == I2C_OA2_NOMASK) || \ + ((MASK) == I2C_OA2_MASK01) || \ + ((MASK) == I2C_OA2_MASK02) || \ + ((MASK) == I2C_OA2_MASK03) || \ + ((MASK) == I2C_OA2_MASK04) || \ + ((MASK) == I2C_OA2_MASK05) || \ + ((MASK) == I2C_OA2_MASK06) || \ + ((MASK) == I2C_OA2_MASK07)) + +#define IS_I2C_GENERAL_CALL(CALL) (((CALL) == I2C_GENERALCALL_DISABLE) || \ + ((CALL) == I2C_GENERALCALL_ENABLE)) + +#define IS_I2C_NO_STRETCH(STRETCH) (((STRETCH) == I2C_NOSTRETCH_DISABLE) || \ + ((STRETCH) == I2C_NOSTRETCH_ENABLE)) + +#define IS_I2C_MEMADD_SIZE(SIZE) (((SIZE) == I2C_MEMADD_SIZE_8BIT) || \ + ((SIZE) == I2C_MEMADD_SIZE_16BIT)) + +#define IS_TRANSFER_MODE(MODE) (((MODE) == I2C_RELOAD_MODE) || \ + ((MODE) == I2C_AUTOEND_MODE) || \ + ((MODE) == I2C_SOFTEND_MODE)) + +#define IS_TRANSFER_REQUEST(REQUEST) (((REQUEST) == I2C_GENERATE_STOP) || \ + ((REQUEST) == I2C_GENERATE_START_READ) || \ + ((REQUEST) == I2C_GENERATE_START_WRITE) || \ + ((REQUEST) == I2C_NO_STARTSTOP)) + +#define IS_I2C_TRANSFER_OPTIONS_REQUEST(REQUEST) (((REQUEST) == I2C_FIRST_FRAME) || \ + ((REQUEST) == I2C_FIRST_AND_NEXT_FRAME) || \ + ((REQUEST) == I2C_NEXT_FRAME) || \ + ((REQUEST) == I2C_FIRST_AND_LAST_FRAME) || \ + ((REQUEST) == I2C_LAST_FRAME) || \ + ((REQUEST) == I2C_LAST_FRAME_NO_STOP) || \ + IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(REQUEST)) + +#define IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(REQUEST) (((REQUEST) == I2C_OTHER_FRAME) || \ + ((REQUEST) == I2C_OTHER_AND_LAST_FRAME)) + +#define I2C_RESET_CR2(__HANDLE__) ((__HANDLE__)->Instance->CR2 &= (uint32_t)~((uint32_t)(I2C_CR2_SADD | I2C_CR2_HEAD10R | I2C_CR2_NBYTES | I2C_CR2_RELOAD | I2C_CR2_RD_WRN))) + +#define I2C_GET_ADDR_MATCH(__HANDLE__) ((uint16_t)(((__HANDLE__)->Instance->ISR & I2C_ISR_ADDCODE) >> 16U)) +#define I2C_GET_DIR(__HANDLE__) ((uint8_t)(((__HANDLE__)->Instance->ISR & I2C_ISR_DIR) >> 16U)) +#define I2C_GET_STOP_MODE(__HANDLE__) ((__HANDLE__)->Instance->CR2 & I2C_CR2_AUTOEND) +#define I2C_GET_OWN_ADDRESS1(__HANDLE__) ((uint16_t)((__HANDLE__)->Instance->OAR1 & I2C_OAR1_OA1)) +#define I2C_GET_OWN_ADDRESS2(__HANDLE__) ((uint16_t)((__HANDLE__)->Instance->OAR2 & I2C_OAR2_OA2)) + +#define IS_I2C_OWN_ADDRESS1(ADDRESS1) ((ADDRESS1) <= 0x000003FFU) +#define IS_I2C_OWN_ADDRESS2(ADDRESS2) ((ADDRESS2) <= (uint16_t)0x00FFU) + +#define I2C_MEM_ADD_MSB(__ADDRESS__) ((uint8_t)((uint16_t)(((uint16_t)((__ADDRESS__) & (uint16_t)(0xFF00U))) >> 8U))) +#define I2C_MEM_ADD_LSB(__ADDRESS__) ((uint8_t)((uint16_t)((__ADDRESS__) & (uint16_t)(0x00FFU)))) + +#define I2C_GENERATE_START(__ADDMODE__,__ADDRESS__) (((__ADDMODE__) == I2C_ADDRESSINGMODE_7BIT) ? (uint32_t)((((uint32_t)(__ADDRESS__) & (I2C_CR2_SADD)) | (I2C_CR2_START) | (I2C_CR2_AUTOEND)) & (~I2C_CR2_RD_WRN)) : \ + (uint32_t)((((uint32_t)(__ADDRESS__) & (I2C_CR2_SADD)) | (I2C_CR2_ADD10) | (I2C_CR2_START)) & (~I2C_CR2_RD_WRN))) + +#define I2C_CHECK_FLAG(__ISR__, __FLAG__) ((((__ISR__) & ((__FLAG__) & I2C_FLAG_MASK)) == ((__FLAG__) & I2C_FLAG_MASK)) ? SET : RESET) +#define I2C_CHECK_IT_SOURCE(__CR1__, __IT__) ((((__CR1__) & (__IT__)) == (__IT__)) ? SET : RESET) +/** + * @} + */ + +/* Private Functions ---------------------------------------------------------*/ +/** @defgroup I2C_Private_Functions I2C Private Functions + * @{ + */ +/* Private functions are defined in stm32f7xx_hal_i2c.c file */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_I2C_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h new file mode 100644 index 000000000..f45be67b7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h @@ -0,0 +1,208 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c_ex.h + * @author MCD Application Team + * @brief Header file of I2C HAL Extended module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_I2C_EX_H +#define STM32F7xx_HAL_I2C_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup I2CEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup I2CEx_Exported_Constants I2C Extended Exported Constants + * @{ + */ + +/** @defgroup I2CEx_Analog_Filter I2C Extended Analog Filter + * @{ + */ +#define I2C_ANALOGFILTER_ENABLE 0x00000000U +#define I2C_ANALOGFILTER_DISABLE I2C_CR1_ANFOFF +/** + * @} + */ + +/** @defgroup I2CEx_FastModePlus I2C Extended Fast Mode Plus + * @{ + */ +#define I2C_FMP_NOT_SUPPORTED 0xAAAA0000U /*!< Fast Mode Plus not supported */ +#if defined(SYSCFG_PMC_I2C_PB6_FMP) +#define I2C_FASTMODEPLUS_PB6 SYSCFG_PMC_I2C_PB6_FMP /*!< Enable Fast Mode Plus on PB6 */ +#define I2C_FASTMODEPLUS_PB7 SYSCFG_PMC_I2C_PB7_FMP /*!< Enable Fast Mode Plus on PB7 */ +#else +#define I2C_FASTMODEPLUS_PB6 (uint32_t)(0x00000004U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB6 not supported */ +#define I2C_FASTMODEPLUS_PB7 (uint32_t)(0x00000008U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB7 not supported */ +#endif +#if defined(SYSCFG_PMC_I2C_PB8_FMP) +#define I2C_FASTMODEPLUS_PB8 SYSCFG_PMC_I2C_PB8_FMP /*!< Enable Fast Mode Plus on PB8 */ +#define I2C_FASTMODEPLUS_PB9 SYSCFG_PMC_I2C_PB9_FMP /*!< Enable Fast Mode Plus on PB9 */ +#else +#define I2C_FASTMODEPLUS_PB8 (uint32_t)(0x00000010U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB8 not supported */ +#define I2C_FASTMODEPLUS_PB9 (uint32_t)(0x00000012U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB9 not supported */ +#endif +#if defined(SYSCFG_PMC_I2C1_FMP) +#define I2C_FASTMODEPLUS_I2C1 SYSCFG_PMC_I2C1_FMP /*!< Enable Fast Mode Plus on I2C1 pins */ +#else +#define I2C_FASTMODEPLUS_I2C1 (uint32_t)(0x00000100U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C1 not supported */ +#endif +#if defined(SYSCFG_PMC_I2C2_FMP) +#define I2C_FASTMODEPLUS_I2C2 SYSCFG_PMC_I2C2_FMP /*!< Enable Fast Mode Plus on I2C2 pins */ +#else +#define I2C_FASTMODEPLUS_I2C2 (uint32_t)(0x00000200U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C2 not supported */ +#endif +#if defined(SYSCFG_PMC_I2C3_FMP) +#define I2C_FASTMODEPLUS_I2C3 SYSCFG_PMC_I2C3_FMP /*!< Enable Fast Mode Plus on I2C3 pins */ +#else +#define I2C_FASTMODEPLUS_I2C3 (uint32_t)(0x00000400U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C3 not supported */ +#endif +#if defined(SYSCFG_PMC_I2C4_FMP) +#define I2C_FASTMODEPLUS_I2C4 SYSCFG_PMC_I2C4_FMP /*!< Enable Fast Mode Plus on I2C4 pins */ +#else +#define I2C_FASTMODEPLUS_I2C4 (uint32_t)(0x00000800U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C4 not supported */ +#endif +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup I2CEx_Exported_Functions I2C Extended Exported Functions + * @{ + */ + +/** @addtogroup I2CEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * @{ + */ + +/* Peripheral Control functions ************************************************/ +HAL_StatusTypeDef HAL_I2CEx_ConfigAnalogFilter(I2C_HandleTypeDef *hi2c, uint32_t AnalogFilter); +HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_t DigitalFilter); +#if (defined(SYSCFG_PMC_I2C_PB6_FMP) || defined(SYSCFG_PMC_I2C_PB7_FMP)) || (defined(SYSCFG_PMC_I2C_PB8_FMP) || defined(SYSCFG_PMC_I2C_PB9_FMP)) || (defined(SYSCFG_PMC_I2C1_FMP)) || (defined(SYSCFG_PMC_I2C2_FMP)) || defined(SYSCFG_PMC_I2C3_FMP) || defined(SYSCFG_PMC_I2C4_FMP) +void HAL_I2CEx_EnableFastModePlus(uint32_t ConfigFastModePlus); +void HAL_I2CEx_DisableFastModePlus(uint32_t ConfigFastModePlus); +#endif + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Constants I2C Extended Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Macro I2C Extended Private Macros + * @{ + */ +#define IS_I2C_ANALOG_FILTER(FILTER) (((FILTER) == I2C_ANALOGFILTER_ENABLE) || \ + ((FILTER) == I2C_ANALOGFILTER_DISABLE)) + +#define IS_I2C_DIGITAL_FILTER(FILTER) ((FILTER) <= 0x0000000FU) + +#if (defined(SYSCFG_PMC_I2C1_FMP) && defined(SYSCFG_PMC_I2C2_FMP) && defined(SYSCFG_PMC_I2C3_FMP) && defined(SYSCFG_PMC_I2C4_FMP)) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C3) == I2C_FASTMODEPLUS_I2C3) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C4) == I2C_FASTMODEPLUS_I2C4)) +#elif defined(SYSCFG_PMC_I2C1_FMP) && defined(SYSCFG_PMC_I2C2_FMP) && defined(SYSCFG_PMC_I2C3_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C3) == I2C_FASTMODEPLUS_I2C3)) +#elif defined(SYSCFG_PMC_I2C1_FMP) && defined(SYSCFG_PMC_I2C2_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2)) +#elif defined(SYSCFG_PMC_I2C1_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1)) +#endif /* SYSCFG_PMC_I2C1_FMP && SYSCFG_PMC_I2C2_FMP && SYSCFG_PMC_I2C3_FMP && SYSCFG_PMC_I2C4_FMP */ + + + +/** + * @} + */ + +/* Private Functions ---------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Functions I2C Extended Private Functions + * @{ + */ +/* Private functions are defined in stm32f7xx_hal_i2c_ex.c file */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_I2C_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_iwdg.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_iwdg.h new file mode 100644 index 000000000..7ceb39568 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_iwdg.h @@ -0,0 +1,242 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_iwdg.h + * @author MCD Application Team + * @brief Header file of IWDG HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_IWDG_H +#define STM32F7xx_HAL_IWDG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup IWDG IWDG + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup IWDG_Exported_Types IWDG Exported Types + * @{ + */ + +/** + * @brief IWDG Init structure definition + */ +typedef struct +{ + uint32_t Prescaler; /*!< Select the prescaler of the IWDG. + This parameter can be a value of @ref IWDG_Prescaler */ + + uint32_t Reload; /*!< Specifies the IWDG down-counter reload value. + This parameter must be a number between Min_Data = 0 and Max_Data = 0x0FFF */ + + uint32_t Window; /*!< Specifies the window value to be compared to the down-counter. + This parameter must be a number between Min_Data = 0 and Max_Data = 0x0FFF */ + +} IWDG_InitTypeDef; + +/** + * @brief IWDG Handle Structure definition + */ +typedef struct +{ + IWDG_TypeDef *Instance; /*!< Register base address */ + + IWDG_InitTypeDef Init; /*!< IWDG required parameters */ +} IWDG_HandleTypeDef; + + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup IWDG_Exported_Constants IWDG Exported Constants + * @{ + */ + +/** @defgroup IWDG_Prescaler IWDG Prescaler + * @{ + */ +#define IWDG_PRESCALER_4 0x00000000u /*!< IWDG prescaler set to 4 */ +#define IWDG_PRESCALER_8 IWDG_PR_PR_0 /*!< IWDG prescaler set to 8 */ +#define IWDG_PRESCALER_16 IWDG_PR_PR_1 /*!< IWDG prescaler set to 16 */ +#define IWDG_PRESCALER_32 (IWDG_PR_PR_1 | IWDG_PR_PR_0) /*!< IWDG prescaler set to 32 */ +#define IWDG_PRESCALER_64 IWDG_PR_PR_2 /*!< IWDG prescaler set to 64 */ +#define IWDG_PRESCALER_128 (IWDG_PR_PR_2 | IWDG_PR_PR_0) /*!< IWDG prescaler set to 128 */ +#define IWDG_PRESCALER_256 (IWDG_PR_PR_2 | IWDG_PR_PR_1) /*!< IWDG prescaler set to 256 */ + +/** + * @} + */ + +/** @defgroup IWDG_Window_option IWDG Window option + * @{ + */ +#define IWDG_WINDOW_DISABLE IWDG_WINR_WIN +/** + * @} + */ + + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup IWDG_Exported_Macros IWDG Exported Macros + * @{ + */ + +/** + * @brief Enable the IWDG peripheral. + * @param __HANDLE__ IWDG handle + * @retval None + */ +#define __HAL_IWDG_START(__HANDLE__) WRITE_REG((__HANDLE__)->Instance->KR, IWDG_KEY_ENABLE) + +/** + * @brief Reload IWDG counter with value defined in the reload register + * (write access to IWDG_PR, IWDG_RLR and IWDG_WINR registers disabled). + * @param __HANDLE__ IWDG handle + * @retval None + */ +#define __HAL_IWDG_RELOAD_COUNTER(__HANDLE__) WRITE_REG((__HANDLE__)->Instance->KR, IWDG_KEY_RELOAD) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup IWDG_Exported_Functions IWDG Exported Functions + * @{ + */ + +/** @defgroup IWDG_Exported_Functions_Group1 Initialization and Start functions + * @{ + */ +/* Initialization/Start functions ********************************************/ +HAL_StatusTypeDef HAL_IWDG_Init(IWDG_HandleTypeDef *hiwdg); +/** + * @} + */ + +/** @defgroup IWDG_Exported_Functions_Group2 IO operation functions + * @{ + */ +/* I/O operation functions ****************************************************/ +HAL_StatusTypeDef HAL_IWDG_Refresh(IWDG_HandleTypeDef *hiwdg); +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup IWDG_Private_Constants IWDG Private Constants + * @{ + */ + +/** + * @brief IWDG Key Register BitMask + */ +#define IWDG_KEY_RELOAD 0x0000AAAAu /*!< IWDG Reload Counter Enable */ +#define IWDG_KEY_ENABLE 0x0000CCCCu /*!< IWDG Peripheral Enable */ +#define IWDG_KEY_WRITE_ACCESS_ENABLE 0x00005555u /*!< IWDG KR Write Access Enable */ +#define IWDG_KEY_WRITE_ACCESS_DISABLE 0x00000000u /*!< IWDG KR Write Access Disable */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup IWDG_Private_Macros IWDG Private Macros + * @{ + */ + +/** + * @brief Enable write access to IWDG_PR, IWDG_RLR and IWDG_WINR registers. + * @param __HANDLE__ IWDG handle + * @retval None + */ +#define IWDG_ENABLE_WRITE_ACCESS(__HANDLE__) WRITE_REG((__HANDLE__)->Instance->KR, IWDG_KEY_WRITE_ACCESS_ENABLE) + +/** + * @brief Disable write access to IWDG_PR, IWDG_RLR and IWDG_WINR registers. + * @param __HANDLE__ IWDG handle + * @retval None + */ +#define IWDG_DISABLE_WRITE_ACCESS(__HANDLE__) WRITE_REG((__HANDLE__)->Instance->KR, IWDG_KEY_WRITE_ACCESS_DISABLE) + +/** + * @brief Check IWDG prescaler value. + * @param __PRESCALER__ IWDG prescaler value + * @retval None + */ +#define IS_IWDG_PRESCALER(__PRESCALER__) (((__PRESCALER__) == IWDG_PRESCALER_4) || \ + ((__PRESCALER__) == IWDG_PRESCALER_8) || \ + ((__PRESCALER__) == IWDG_PRESCALER_16) || \ + ((__PRESCALER__) == IWDG_PRESCALER_32) || \ + ((__PRESCALER__) == IWDG_PRESCALER_64) || \ + ((__PRESCALER__) == IWDG_PRESCALER_128)|| \ + ((__PRESCALER__) == IWDG_PRESCALER_256)) + +/** + * @brief Check IWDG reload value. + * @param __RELOAD__ IWDG reload value + * @retval None + */ +#define IS_IWDG_RELOAD(__RELOAD__) ((__RELOAD__) <= IWDG_RLR_RL) + +/** + * @brief Check IWDG window value. + * @param __WINDOW__ IWDG window value + * @retval None + */ +#define IS_IWDG_WINDOW(__WINDOW__) ((__WINDOW__) <= IWDG_WINR_WIN) + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_IWDG_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_jpeg.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_jpeg.h new file mode 100644 index 000000000..52022146d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_jpeg.h @@ -0,0 +1,652 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_jpeg.h + * @author MCD Application Team + * @brief Header file of JPEG HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_JPEG_H +#define STM32F7xx_HAL_JPEG_H + +#ifdef __cplusplus +extern "C" { +#endif +#if defined (JPEG) +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup JPEG + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup JPEG_Exported_Types JPEG Exported Types + * @{ + */ + +/** @defgroup JPEG_Configuration_Structure_definition JPEG Configuration for encoding Structure definition + * @brief JPEG encoding configuration Structure definition + * @{ + */ +typedef struct +{ + uint32_t ColorSpace; /*!< Image Color space : gray-scale, YCBCR, RGB or CMYK + This parameter can be a value of @ref JPEG_ColorSpace */ + + uint32_t ChromaSubsampling; /*!< Chroma Subsampling in case of YCBCR or CMYK color space, 0-> 4:4:4 , 1-> 4:2:2, 2 -> 4:1:1, 3 -> 4:2:0 + This parameter can be a value of @ref JPEG_ChromaSubsampling */ + + uint32_t ImageHeight; /*!< Image height : number of lines */ + + uint32_t ImageWidth; /*!< Image width : number of pixels per line */ + + uint32_t ImageQuality; /*!< Quality of the JPEG encoding : from 1 to 100 */ + +} JPEG_ConfTypeDef; +/** + * @} + */ + +/** @defgroup HAL_JPEG_state_structure_definition HAL JPEG state structure definition + * @brief HAL JPEG State structure definition + * @{ + */ +typedef enum +{ + HAL_JPEG_STATE_RESET = 0x00U, /*!< JPEG not yet initialized or disabled */ + HAL_JPEG_STATE_READY = 0x01U, /*!< JPEG initialized and ready for use */ + HAL_JPEG_STATE_BUSY = 0x02U, /*!< JPEG internal processing is ongoing */ + HAL_JPEG_STATE_BUSY_ENCODING = 0x03U, /*!< JPEG encoding processing is ongoing */ + HAL_JPEG_STATE_BUSY_DECODING = 0x04U, /*!< JPEG decoding processing is ongoing */ + HAL_JPEG_STATE_TIMEOUT = 0x05U, /*!< JPEG timeout state */ + HAL_JPEG_STATE_ERROR = 0x06U /*!< JPEG error state */ +} HAL_JPEG_STATETypeDef; + +/** + * @} + */ + + +/** @defgroup JPEG_handle_Structure_definition JPEG handle Structure definition + * @brief JPEG handle Structure definition + * @{ + */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +typedef struct __JPEG_HandleTypeDef +#else +typedef struct +#endif /* (USE_HAL_JPEG_REGISTER_CALLBACKS) */ +{ + JPEG_TypeDef *Instance; /*!< JPEG peripheral register base address */ + + JPEG_ConfTypeDef Conf; /*!< Current JPEG encoding/decoding parameters */ + + uint8_t *pJpegInBuffPtr; /*!< Pointer to JPEG processing (encoding, decoding,...) input buffer */ + + uint8_t *pJpegOutBuffPtr; /*!< Pointer to JPEG processing (encoding, decoding,...) output buffer */ + + __IO uint32_t JpegInCount; /*!< Internal Counter of input data */ + + __IO uint32_t JpegOutCount; /*!< Internal Counter of output data */ + + uint32_t InDataLength; /*!< Input Buffer Length in Bytes */ + + uint32_t OutDataLength; /*!< Output Buffer Length in Bytes */ + + DMA_HandleTypeDef *hdmain; /*!< JPEG In DMA handle parameters */ + + DMA_HandleTypeDef *hdmaout; /*!< JPEG Out DMA handle parameters */ + + uint8_t CustomQuanTable; /*!< If set to 1 specify that user customized quantization tables are used */ + + uint8_t *QuantTable0; /*!< Basic Quantization Table for component 0 */ + + uint8_t *QuantTable1; /*!< Basic Quantization Table for component 1 */ + + uint8_t *QuantTable2; /*!< Basic Quantization Table for component 2 */ + + uint8_t *QuantTable3; /*!< Basic Quantization Table for component 3 */ + + HAL_LockTypeDef Lock; /*!< JPEG locking object */ + + __IO HAL_JPEG_STATETypeDef State; /*!< JPEG peripheral state */ + + __IO uint32_t ErrorCode; /*!< JPEG Error code */ + + __IO uint32_t Context; /*!< JPEG Internal context */ + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + void (*InfoReadyCallback)(struct __JPEG_HandleTypeDef *hjpeg, + JPEG_ConfTypeDef *pInfo); /*!< JPEG Info ready callback */ + void (*EncodeCpltCallback)(struct __JPEG_HandleTypeDef + *hjpeg); /*!< JPEG Encode complete callback */ + void (*DecodeCpltCallback)(struct __JPEG_HandleTypeDef + *hjpeg); /*!< JPEG Decode complete callback */ + void (*ErrorCallback)(struct __JPEG_HandleTypeDef + *hjpeg); /*!< JPEG Error callback */ + void (*GetDataCallback)(struct __JPEG_HandleTypeDef *hjpeg, + uint32_t NbDecodedData); /*!< JPEG Get Data callback */ + void (*DataReadyCallback)(struct __JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, + uint32_t OutDataLength); /*!< JPEG Data ready callback */ + + void (* MspInitCallback)(struct __JPEG_HandleTypeDef *hjpeg); /*!< JPEG Msp Init callback */ + void (* MspDeInitCallback)(struct __JPEG_HandleTypeDef + *hjpeg); /*!< JPEG Msp DeInit callback */ + + +#endif /* (USE_HAL_JPEG_REGISTER_CALLBACKS) */ + + +} JPEG_HandleTypeDef; +/** + * @} + */ + + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +/** @defgroup HAL_JPEG_Callback_ID_enumeration_definition HAL JPEG Callback ID enumeration definition + * @brief HAL JPEG Callback ID enumeration definition + * @{ + */ +typedef enum +{ + HAL_JPEG_ENCODE_CPLT_CB_ID = 0x01U, /*!< JPEG Encode Complete callback ID */ + HAL_JPEG_DECODE_CPLT_CB_ID = 0x02U, /*!< JPEG Decode Complete callback ID */ + HAL_JPEG_ERROR_CB_ID = 0x03U, /*!< JPEG Error callback ID */ + + HAL_JPEG_MSPINIT_CB_ID = 0x04U, /*!< JPEG MspInit callback ID */ + HAL_JPEG_MSPDEINIT_CB_ID = 0x05U /*!< JPEG MspDeInit callback ID */ + +} HAL_JPEG_CallbackIDTypeDef; +/** + * @} + */ + +/** @defgroup HAL_JPEG_Callback_pointer_definition HAL JPEG Callback pointer definition + * @brief HAL JPEG Callback pointer definition + * @{ + */ +typedef void (*pJPEG_CallbackTypeDef)(JPEG_HandleTypeDef *hjpeg); /*!< pointer to a common JPEG callback function */ +typedef void (*pJPEG_InfoReadyCallbackTypeDef)(JPEG_HandleTypeDef *hjpeg, + JPEG_ConfTypeDef *pInfo); /*!< pointer to an Info ready JPEG callback function */ +typedef void (*pJPEG_GetDataCallbackTypeDef)(JPEG_HandleTypeDef *hjpeg, + uint32_t NbDecodedData); /*!< pointer to a Get data JPEG callback function */ +typedef void (*pJPEG_DataReadyCallbackTypeDef)(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, + uint32_t OutDataLength); /*!< pointer to a Data ready JPEG callback function */ +/** + * @} + */ + +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup JPEG_Exported_Constants JPEG Exported Constants + * @{ + */ + +/** @defgroup JPEG_Error_Code_definition JPEG Error Code definition + * @brief JPEG Error Code definition + * @{ + */ + +#define HAL_JPEG_ERROR_NONE ((uint32_t)0x00000000U) /*!< No error */ +#define HAL_JPEG_ERROR_HUFF_TABLE ((uint32_t)0x00000001U) /*!< HUffman Table programming error */ +#define HAL_JPEG_ERROR_QUANT_TABLE ((uint32_t)0x00000002U) /*!< Quantization Table programming error */ +#define HAL_JPEG_ERROR_DMA ((uint32_t)0x00000004U) /*!< DMA transfer error */ +#define HAL_JPEG_ERROR_TIMEOUT ((uint32_t)0x00000008U) /*!< Timeout error */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +#define HAL_JPEG_ERROR_INVALID_CALLBACK ((uint32_t)0x00000010U) /*!< Invalid Callback error */ +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup JPEG_Quantization_Table_Size JPEG Quantization Table Size + * @brief JPEG Quantization Table Size + * @{ + */ +#define JPEG_QUANT_TABLE_SIZE ((uint32_t)64U) /*!< JPEG Quantization Table Size in bytes */ +/** + * @} + */ + + +/** @defgroup JPEG_ColorSpace JPEG ColorSpace + * @brief JPEG Color Space + * @{ + */ +#define JPEG_GRAYSCALE_COLORSPACE ((uint32_t)0x00000000U) +#define JPEG_YCBCR_COLORSPACE JPEG_CONFR1_COLORSPACE_0 +#define JPEG_CMYK_COLORSPACE JPEG_CONFR1_COLORSPACE + + +/** + * @} + */ + + +/** @defgroup JPEG_ChromaSubsampling JPEG Chrominance Sampling + * @brief JPEG Chrominance Sampling + * @{ + */ +#define JPEG_444_SUBSAMPLING ((uint32_t)0x00000000U) /*!< Chroma Subsampling 4:4:4 */ +#define JPEG_420_SUBSAMPLING ((uint32_t)0x00000001U) /*!< Chroma Subsampling 4:2:0 */ +#define JPEG_422_SUBSAMPLING ((uint32_t)0x00000002U) /*!< Chroma Subsampling 4:2:2 */ + +/** + * @} + */ + +/** @defgroup JPEG_ImageQuality JPEG Image Quality + * @brief JPEG Min and Max Image Quality + * @{ + */ +#define JPEG_IMAGE_QUALITY_MIN ((uint32_t)1U) /*!< Minimum JPEG quality */ +#define JPEG_IMAGE_QUALITY_MAX ((uint32_t)100U) /*!< Maximum JPEG quality */ + +/** + * @} + */ + +/** @defgroup JPEG_Interrupt_configuration_definition JPEG Interrupt configuration definition + * @brief JPEG Interrupt definition + * @{ + */ +#define JPEG_IT_IFT ((uint32_t)JPEG_CR_IFTIE) /*!< Input FIFO Threshold Interrupt */ +#define JPEG_IT_IFNF ((uint32_t)JPEG_CR_IFNFIE) /*!< Input FIFO Not Full Interrupt */ +#define JPEG_IT_OFT ((uint32_t)JPEG_CR_OFTIE) /*!< Output FIFO Threshold Interrupt */ +#define JPEG_IT_OFNE ((uint32_t)JPEG_CR_OFTIE) /*!< Output FIFO Not Empty Interrupt */ +#define JPEG_IT_EOC ((uint32_t)JPEG_CR_EOCIE) /*!< End of Conversion Interrupt */ +#define JPEG_IT_HPD ((uint32_t)JPEG_CR_HPDIE) /*!< Header Parsing Done Interrupt */ +/** + * @} + */ + +/** @defgroup JPEG_Flag_definition JPEG Flag definition + * @brief JPEG Flags definition + * @{ + */ +#define JPEG_FLAG_IFTF ((uint32_t)JPEG_SR_IFTF) /*!< Input FIFO is not full and is bellow its threshold flag */ +#define JPEG_FLAG_IFNFF ((uint32_t)JPEG_SR_IFNFF) /*!< Input FIFO Not Full Flag, a data can be written */ +#define JPEG_FLAG_OFTF ((uint32_t)JPEG_SR_OFTF) /*!< Output FIFO is not empty and has reach its threshold */ +#define JPEG_FLAG_OFNEF ((uint32_t)JPEG_SR_OFNEF) /*!< Output FIFO is not empty, a data is available */ +#define JPEG_FLAG_EOCF ((uint32_t)JPEG_SR_EOCF) /*!< JPEG Codec core has finished the encoding or the decoding process and than last data has been sent to the output FIFO */ +#define JPEG_FLAG_HPDF ((uint32_t)JPEG_SR_HPDF) /*!< JPEG Codec has finished the parsing of the headers and the internal registers have been updated */ +#define JPEG_FLAG_COF ((uint32_t)JPEG_SR_COF) /*!< JPEG Codec operation on going flag*/ + +#define JPEG_FLAG_ALL ((uint32_t)0x000000FEU) /*!< JPEG Codec All previous flag*/ +/** + * @} + */ + +/** @defgroup JPEG_PROCESS_PAUSE_RESUME_definition JPEG Process Pause Resume definition + * @brief JPEG process pause, resume definition + * @{ + */ +#define JPEG_PAUSE_RESUME_INPUT ((uint32_t)0x00000001U) /*!< Pause/Resume Input FIFO Xfer*/ +#define JPEG_PAUSE_RESUME_OUTPUT ((uint32_t)0x00000002U) /*!< Pause/Resume Output FIFO Xfer*/ +#define JPEG_PAUSE_RESUME_INPUT_OUTPUT ((uint32_t)0x00000003U) /*!< Pause/Resume Input and Output FIFO Xfer*/ +/** + * @} + */ + +/** + * @} + */ +/* Exported macro ------------------------------------------------------------*/ + +/** @defgroup JPEG_Exported_Macros JPEG Exported Macros + * @{ + */ + +/** @brief Reset JPEG handle state + * @param __HANDLE__ specifies the JPEG handle. + * @retval None + */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +#define __HAL_JPEG_RESET_HANDLE_STATE(__HANDLE__) do{\ + (__HANDLE__)->State = HAL_JPEG_STATE_RESET;\ + (__HANDLE__)->MspInitCallback = NULL;\ + (__HANDLE__)->MspDeInitCallback = NULL;\ + }while(0) +#else +#define __HAL_JPEG_RESET_HANDLE_STATE(__HANDLE__) ( (__HANDLE__)->State = HAL_JPEG_STATE_RESET) +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + +/** + * @brief Enable the JPEG peripheral. + * @param __HANDLE__ specifies the JPEG handle. + * @retval None + */ +#define __HAL_JPEG_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= JPEG_CR_JCEN) + +/** + * @brief Disable the JPEG peripheral. + * @param __HANDLE__ specifies the JPEG handle. + * @retval None + */ +#define __HAL_JPEG_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~JPEG_CR_JCEN) + + +/** + * @brief Check the specified JPEG status flag. + * @param __HANDLE__ specifies the JPEG handle. + * @param __FLAG__ specifies the flag to check + * This parameter can be one of the following values: + * @arg JPEG_FLAG_IFTF : The input FIFO is not full and is bellow its threshold flag + * @arg JPEG_FLAG_IFNFF : The input FIFO Not Full Flag, a data can be written + * @arg JPEG_FLAG_OFTF : The output FIFO is not empty and has reach its threshold + * @arg JPEG_FLAG_OFNEF : The output FIFO is not empty, a data is available + * @arg JPEG_FLAG_EOCF : JPEG Codec core has finished the encoding or the decoding process + * and than last data has been sent to the output FIFO + * @arg JPEG_FLAG_HPDF : JPEG Codec has finished the parsing of the headers + * and the internal registers have been updated + * @arg JPEG_FLAG_COF : JPEG Codec operation on going flag + * + * @retval __HAL_JPEG_GET_FLAG : returns The new state of __FLAG__ (TRUE or FALSE) + */ + +#define __HAL_JPEG_GET_FLAG(__HANDLE__,__FLAG__) (((__HANDLE__)->Instance->SR & (__FLAG__))) + +/** + * @brief Clear the specified JPEG status flag. + * @param __HANDLE__ specifies the JPEG handle. + * @param __FLAG__ specifies the flag to clear + * This parameter can be one of the following values: + * @arg JPEG_FLAG_EOCF : JPEG Codec core has finished the encoding or the decoding process + * and than last data has been sent to the output FIFO + * @arg JPEG_FLAG_HPDF : JPEG Codec has finished the parsing of the headers + * @retval None + */ + +#define __HAL_JPEG_CLEAR_FLAG(__HANDLE__,__FLAG__) (((__HANDLE__)->Instance->CFR |= ((__FLAG__) & (JPEG_FLAG_EOCF | JPEG_FLAG_HPDF)))) + + +/** + * @brief Enable Interrupt. + * @param __HANDLE__ specifies the JPEG handle. + * @param __INTERRUPT__ specifies the interrupt to enable + * This parameter can be one of the following values: + * @arg JPEG_IT_IFT : Input FIFO Threshold Interrupt + * @arg JPEG_IT_IFNF : Input FIFO Not Full Interrupt + * @arg JPEG_IT_OFT : Output FIFO Threshold Interrupt + * @arg JPEG_IT_OFNE : Output FIFO Not empty Interrupt + * @arg JPEG_IT_EOC : End of Conversion Interrupt + * @arg JPEG_IT_HPD : Header Parsing Done Interrupt + * + * @retval No retrun + */ +#define __HAL_JPEG_ENABLE_IT(__HANDLE__,__INTERRUPT__) ((__HANDLE__)->Instance->CR |= (__INTERRUPT__) ) + +/** + * @brief Disable Interrupt. + * @param __HANDLE__ specifies the JPEG handle. + * @param __INTERRUPT__ specifies the interrupt to disable + * This parameter can be one of the following values: + * @arg JPEG_IT_IFT : Input FIFO Threshold Interrupt + * @arg JPEG_IT_IFNF : Input FIFO Not Full Interrupt + * @arg JPEG_IT_OFT : Output FIFO Threshold Interrupt + * @arg JPEG_IT_OFNE : Output FIFO Not empty Interrupt + * @arg JPEG_IT_EOC : End of Conversion Interrupt + * @arg JPEG_IT_HPD : Header Parsing Done Interrupt + * + * @note To disable an IT we must use MODIFY_REG macro to avoid writing "1" to the FIFO flush bits + * located in the same IT enable register (CR register). + * @retval No retrun + */ +#define __HAL_JPEG_DISABLE_IT(__HANDLE__,__INTERRUPT__) MODIFY_REG((__HANDLE__)->Instance->CR, (__INTERRUPT__), 0UL) + + +/** + * @brief Get Interrupt state. + * @param __HANDLE__ specifies the JPEG handle. + * @param __INTERRUPT__ specifies the interrupt to check + * This parameter can be one of the following values: + * @arg JPEG_IT_IFT : Input FIFO Threshold Interrupt + * @arg JPEG_IT_IFNF : Input FIFO Not Full Interrupt + * @arg JPEG_IT_OFT : Output FIFO Threshold Interrupt + * @arg JPEG_IT_OFNE : Output FIFO Not empty Interrupt + * @arg JPEG_IT_EOC : End of Conversion Interrupt + * @arg JPEG_IT_HPD : Header Parsing Done Interrupt + * + * @retval returns The new state of __INTERRUPT__ (Enabled or disabled) + */ +#define __HAL_JPEG_GET_IT_SOURCE(__HANDLE__,__INTERRUPT__) ((__HANDLE__)->Instance->CR & (__INTERRUPT__)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup JPEG_Exported_Functions + * @{ + */ + +/** @addtogroup JPEG_Exported_Functions_Group1 + * @{ + */ +/* Initialization/de-initialization functions ********************************/ +HAL_StatusTypeDef HAL_JPEG_Init(JPEG_HandleTypeDef *hjpeg); +HAL_StatusTypeDef HAL_JPEG_DeInit(JPEG_HandleTypeDef *hjpeg); +void HAL_JPEG_MspInit(JPEG_HandleTypeDef *hjpeg); +void HAL_JPEG_MspDeInit(JPEG_HandleTypeDef *hjpeg); + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_JPEG_RegisterCallback(JPEG_HandleTypeDef *hjpeg, HAL_JPEG_CallbackIDTypeDef CallbackID, + pJPEG_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_JPEG_UnRegisterCallback(JPEG_HandleTypeDef *hjpeg, HAL_JPEG_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_JPEG_RegisterInfoReadyCallback(JPEG_HandleTypeDef *hjpeg, + pJPEG_InfoReadyCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_JPEG_UnRegisterInfoReadyCallback(JPEG_HandleTypeDef *hjpeg); + +HAL_StatusTypeDef HAL_JPEG_RegisterGetDataCallback(JPEG_HandleTypeDef *hjpeg, pJPEG_GetDataCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_JPEG_UnRegisterGetDataCallback(JPEG_HandleTypeDef *hjpeg); + +HAL_StatusTypeDef HAL_JPEG_RegisterDataReadyCallback(JPEG_HandleTypeDef *hjpeg, + pJPEG_DataReadyCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_JPEG_UnRegisterDataReadyCallback(JPEG_HandleTypeDef *hjpeg); + +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup JPEG_Exported_Functions_Group2 + * @{ + */ +/* Encoding/Decoding Configuration functions ********************************/ +HAL_StatusTypeDef HAL_JPEG_ConfigEncoding(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pConf); +HAL_StatusTypeDef HAL_JPEG_GetInfo(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pInfo); +HAL_StatusTypeDef HAL_JPEG_EnableHeaderParsing(JPEG_HandleTypeDef *hjpeg); +HAL_StatusTypeDef HAL_JPEG_DisableHeaderParsing(JPEG_HandleTypeDef *hjpeg); +HAL_StatusTypeDef HAL_JPEG_SetUserQuantTables(JPEG_HandleTypeDef *hjpeg, uint8_t *QTable0, uint8_t *QTable1, + uint8_t *QTable2, uint8_t *QTable3); + +/** + * @} + */ + +/** @addtogroup JPEG_Exported_Functions_Group3 + * @{ + */ +/* JPEG processing functions **************************************/ +HAL_StatusTypeDef HAL_JPEG_Encode(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength, uint32_t Timeout); +HAL_StatusTypeDef HAL_JPEG_Decode(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength, uint32_t Timeout); +HAL_StatusTypeDef HAL_JPEG_Encode_IT(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength); +HAL_StatusTypeDef HAL_JPEG_Decode_IT(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength); +HAL_StatusTypeDef HAL_JPEG_Encode_DMA(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength); +HAL_StatusTypeDef HAL_JPEG_Decode_DMA(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength); +HAL_StatusTypeDef HAL_JPEG_Pause(JPEG_HandleTypeDef *hjpeg, uint32_t XferSelection); +HAL_StatusTypeDef HAL_JPEG_Resume(JPEG_HandleTypeDef *hjpeg, uint32_t XferSelection); +void HAL_JPEG_ConfigInputBuffer(JPEG_HandleTypeDef *hjpeg, uint8_t *pNewInputBuffer, uint32_t InDataLength); +void HAL_JPEG_ConfigOutputBuffer(JPEG_HandleTypeDef *hjpeg, uint8_t *pNewOutputBuffer, uint32_t OutDataLength); +HAL_StatusTypeDef HAL_JPEG_Abort(JPEG_HandleTypeDef *hjpeg); + +/** + * @} + */ + +/** @addtogroup JPEG_Exported_Functions_Group4 + * @{ + */ +/* JPEG Decode/Encode callback functions ********************************************************/ +void HAL_JPEG_InfoReadyCallback(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pInfo); +void HAL_JPEG_EncodeCpltCallback(JPEG_HandleTypeDef *hjpeg); +void HAL_JPEG_DecodeCpltCallback(JPEG_HandleTypeDef *hjpeg); +void HAL_JPEG_ErrorCallback(JPEG_HandleTypeDef *hjpeg); +void HAL_JPEG_GetDataCallback(JPEG_HandleTypeDef *hjpeg, uint32_t NbDecodedData); +void HAL_JPEG_DataReadyCallback(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, uint32_t OutDataLength); + +/** + * @} + */ + +/** @addtogroup JPEG_Exported_Functions_Group5 + * @{ + */ +/* JPEG IRQ handler management ******************************************************/ +void HAL_JPEG_IRQHandler(JPEG_HandleTypeDef *hjpeg); + +/** + * @} + */ + +/** @addtogroup JPEG_Exported_Functions_Group6 + * @{ + */ +/* Peripheral State and Error functions ************************************************/ +HAL_JPEG_STATETypeDef HAL_JPEG_GetState(JPEG_HandleTypeDef *hjpeg); +uint32_t HAL_JPEG_GetError(JPEG_HandleTypeDef *hjpeg); + +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/** @defgroup JPEG_Private_Types JPEG Private Types + * @{ + */ + +/** + * @} + */ + +/* Private defines -----------------------------------------------------------*/ +/** @defgroup JPEG_Private_Defines JPEG Private Defines + * @{ + */ + +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @defgroup JPEG_Private_Variables JPEG Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup JPEG_Private_Constants JPEG Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup JPEG_Private_Macros JPEG Private Macros + * @{ + */ + +#define IS_JPEG_CHROMASUBSAMPLING(SUBSAMPLING) (((SUBSAMPLING) == JPEG_444_SUBSAMPLING) || \ + ((SUBSAMPLING) == JPEG_420_SUBSAMPLING) || \ + ((SUBSAMPLING) == JPEG_422_SUBSAMPLING)) + +#define IS_JPEG_IMAGE_QUALITY(NUMBER) (((NUMBER) >= JPEG_IMAGE_QUALITY_MIN) && ((NUMBER) <= JPEG_IMAGE_QUALITY_MAX)) + +#define IS_JPEG_COLORSPACE(COLORSPACE) (((COLORSPACE) == JPEG_GRAYSCALE_COLORSPACE) || \ + ((COLORSPACE) == JPEG_YCBCR_COLORSPACE) || \ + ((COLORSPACE) == JPEG_CMYK_COLORSPACE)) + +#define IS_JPEG_PAUSE_RESUME_STATE(VALUE) (((VALUE) == JPEG_PAUSE_RESUME_INPUT) || \ + ((VALUE) == JPEG_PAUSE_RESUME_OUTPUT)|| \ + ((VALUE) == JPEG_PAUSE_RESUME_INPUT_OUTPUT)) + +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup JPEG_Private_Functions_Prototypes JPEG Private Functions Prototypes + * @{ + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup JPEG_Private_Functions JPEG Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* JPEG */ +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_JPEG_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc.h new file mode 100644 index 000000000..0d68f21e1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc.h @@ -0,0 +1,689 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_ltdc.h + * @author MCD Application Team + * @brief Header file of LTDC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_LTDC_H +#define STM32F7xx_HAL_LTDC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined (LTDC) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup LTDC LTDC + * @brief LTDC HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup LTDC_Exported_Types LTDC Exported Types + * @{ + */ +#define MAX_LAYER 2U + +/** + * @brief LTDC color structure definition + */ +typedef struct +{ + uint8_t Blue; /*!< Configures the blue value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. */ + + uint8_t Green; /*!< Configures the green value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. */ + + uint8_t Red; /*!< Configures the red value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. */ + + uint8_t Reserved; /*!< Reserved 0xFF */ +} LTDC_ColorTypeDef; + +/** + * @brief LTDC Init structure definition + */ +typedef struct +{ + uint32_t HSPolarity; /*!< configures the horizontal synchronization polarity. + This parameter can be one value of @ref LTDC_HS_POLARITY */ + + uint32_t VSPolarity; /*!< configures the vertical synchronization polarity. + This parameter can be one value of @ref LTDC_VS_POLARITY */ + + uint32_t DEPolarity; /*!< configures the data enable polarity. + This parameter can be one of value of @ref LTDC_DE_POLARITY */ + + uint32_t PCPolarity; /*!< configures the pixel clock polarity. + This parameter can be one of value of @ref LTDC_PC_POLARITY */ + + uint32_t HorizontalSync; /*!< configures the number of Horizontal synchronization width. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0xFFF. */ + + uint32_t VerticalSync; /*!< configures the number of Vertical synchronization height. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0x7FF. */ + + uint32_t AccumulatedHBP; /*!< configures the accumulated horizontal back porch width. + This parameter must be a number between Min_Data = LTDC_HorizontalSync and Max_Data = 0xFFF. */ + + uint32_t AccumulatedVBP; /*!< configures the accumulated vertical back porch height. + This parameter must be a number between Min_Data = LTDC_VerticalSync and Max_Data = 0x7FF. */ + + uint32_t AccumulatedActiveW; /*!< configures the accumulated active width. + This parameter must be a number between Min_Data = LTDC_AccumulatedHBP and Max_Data = 0xFFF. */ + + uint32_t AccumulatedActiveH; /*!< configures the accumulated active height. + This parameter must be a number between Min_Data = LTDC_AccumulatedVBP and Max_Data = 0x7FF. */ + + uint32_t TotalWidth; /*!< configures the total width. + This parameter must be a number between Min_Data = LTDC_AccumulatedActiveW and Max_Data = 0xFFF. */ + + uint32_t TotalHeigh; /*!< configures the total height. + This parameter must be a number between Min_Data = LTDC_AccumulatedActiveH and Max_Data = 0x7FF. */ + + LTDC_ColorTypeDef Backcolor; /*!< Configures the background color. */ +} LTDC_InitTypeDef; + +/** + * @brief LTDC Layer structure definition + */ +typedef struct +{ + uint32_t WindowX0; /*!< Configures the Window Horizontal Start Position. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0xFFF. */ + + uint32_t WindowX1; /*!< Configures the Window Horizontal Stop Position. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0xFFF. */ + + uint32_t WindowY0; /*!< Configures the Window vertical Start Position. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0x7FF. */ + + uint32_t WindowY1; /*!< Configures the Window vertical Stop Position. + This parameter must be a number between Min_Data = 0x0000 and Max_Data = 0x7FF. */ + + uint32_t PixelFormat; /*!< Specifies the pixel format. + This parameter can be one of value of @ref LTDC_Pixelformat */ + + uint32_t Alpha; /*!< Specifies the constant alpha used for blending. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. */ + + uint32_t Alpha0; /*!< Configures the default alpha value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. */ + + uint32_t BlendingFactor1; /*!< Select the blending factor 1. + This parameter can be one of value of @ref LTDC_BlendingFactor1 */ + + uint32_t BlendingFactor2; /*!< Select the blending factor 2. + This parameter can be one of value of @ref LTDC_BlendingFactor2 */ + + uint32_t FBStartAdress; /*!< Configures the color frame buffer address */ + + uint32_t ImageWidth; /*!< Configures the color frame buffer line length. + This parameter must be a number between Min_Data = 0x0000 and Max_Data = 0x1FFF. */ + + uint32_t ImageHeight; /*!< Specifies the number of line in frame buffer. + This parameter must be a number between Min_Data = 0x000 and Max_Data = 0x7FF. */ + + LTDC_ColorTypeDef Backcolor; /*!< Configures the layer background color. */ +} LTDC_LayerCfgTypeDef; + +/** + * @brief HAL LTDC State structures definition + */ +typedef enum +{ + HAL_LTDC_STATE_RESET = 0x00U, /*!< LTDC not yet initialized or disabled */ + HAL_LTDC_STATE_READY = 0x01U, /*!< LTDC initialized and ready for use */ + HAL_LTDC_STATE_BUSY = 0x02U, /*!< LTDC internal process is ongoing */ + HAL_LTDC_STATE_TIMEOUT = 0x03U, /*!< LTDC Timeout state */ + HAL_LTDC_STATE_ERROR = 0x04U /*!< LTDC state error */ +} HAL_LTDC_StateTypeDef; + +/** + * @brief LTDC handle Structure definition + */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +typedef struct __LTDC_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ +{ + LTDC_TypeDef *Instance; /*!< LTDC Register base address */ + + LTDC_InitTypeDef Init; /*!< LTDC parameters */ + + LTDC_LayerCfgTypeDef LayerCfg[MAX_LAYER]; /*!< LTDC Layers parameters */ + + HAL_LockTypeDef Lock; /*!< LTDC Lock */ + + __IO HAL_LTDC_StateTypeDef State; /*!< LTDC state */ + + __IO uint32_t ErrorCode; /*!< LTDC Error code */ + +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + void (* LineEventCallback)(struct __LTDC_HandleTypeDef *hltdc); /*!< LTDC Line Event Callback */ + void (* ReloadEventCallback)(struct __LTDC_HandleTypeDef *hltdc); /*!< LTDC Reload Event Callback */ + void (* ErrorCallback)(struct __LTDC_HandleTypeDef *hltdc); /*!< LTDC Error Callback */ + + void (* MspInitCallback)(struct __LTDC_HandleTypeDef *hltdc); /*!< LTDC Msp Init callback */ + void (* MspDeInitCallback)(struct __LTDC_HandleTypeDef *hltdc); /*!< LTDC Msp DeInit callback */ + +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + + +} LTDC_HandleTypeDef; + +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +/** + * @brief HAL LTDC Callback ID enumeration definition + */ +typedef enum +{ + HAL_LTDC_MSPINIT_CB_ID = 0x00U, /*!< LTDC MspInit callback ID */ + HAL_LTDC_MSPDEINIT_CB_ID = 0x01U, /*!< LTDC MspDeInit callback ID */ + + HAL_LTDC_LINE_EVENT_CB_ID = 0x02U, /*!< LTDC Line Event Callback ID */ + HAL_LTDC_RELOAD_EVENT_CB_ID = 0x03U, /*!< LTDC Reload Callback ID */ + HAL_LTDC_ERROR_CB_ID = 0x04U /*!< LTDC Error Callback ID */ + +} HAL_LTDC_CallbackIDTypeDef; + +/** + * @brief HAL LTDC Callback pointer definition + */ +typedef void (*pLTDC_CallbackTypeDef)(LTDC_HandleTypeDef *hltdc); /*!< pointer to an LTDC callback function */ + +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup LTDC_Exported_Constants LTDC Exported Constants + * @{ + */ + +/** @defgroup LTDC_Error_Code LTDC Error Code + * @{ + */ +#define HAL_LTDC_ERROR_NONE 0x00000000U /*!< LTDC No error */ +#define HAL_LTDC_ERROR_TE 0x00000001U /*!< LTDC Transfer error */ +#define HAL_LTDC_ERROR_FU 0x00000002U /*!< LTDC FIFO Underrun */ +#define HAL_LTDC_ERROR_TIMEOUT 0x00000020U /*!< LTDC Timeout error */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +#define HAL_LTDC_ERROR_INVALID_CALLBACK 0x00000040U /*!< LTDC Invalid Callback error */ +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup LTDC_Layer LTDC Layer + * @{ + */ +#define LTDC_LAYER_1 0x00000000U /*!< LTDC Layer 1 */ +#define LTDC_LAYER_2 0x00000001U /*!< LTDC Layer 2 */ +/** + * @} + */ + +/** @defgroup LTDC_HS_POLARITY LTDC HS POLARITY + * @{ + */ +#define LTDC_HSPOLARITY_AL 0x00000000U /*!< Horizontal Synchronization is active low. */ +#define LTDC_HSPOLARITY_AH LTDC_GCR_HSPOL /*!< Horizontal Synchronization is active high. */ +/** + * @} + */ + +/** @defgroup LTDC_VS_POLARITY LTDC VS POLARITY + * @{ + */ +#define LTDC_VSPOLARITY_AL 0x00000000U /*!< Vertical Synchronization is active low. */ +#define LTDC_VSPOLARITY_AH LTDC_GCR_VSPOL /*!< Vertical Synchronization is active high. */ +/** + * @} + */ + +/** @defgroup LTDC_DE_POLARITY LTDC DE POLARITY + * @{ + */ +#define LTDC_DEPOLARITY_AL 0x00000000U /*!< Data Enable, is active low. */ +#define LTDC_DEPOLARITY_AH LTDC_GCR_DEPOL /*!< Data Enable, is active high. */ +/** + * @} + */ + +/** @defgroup LTDC_PC_POLARITY LTDC PC POLARITY + * @{ + */ +#define LTDC_PCPOLARITY_IPC 0x00000000U /*!< input pixel clock. */ +#define LTDC_PCPOLARITY_IIPC LTDC_GCR_PCPOL /*!< inverted input pixel clock. */ +/** + * @} + */ + +/** @defgroup LTDC_SYNC LTDC SYNC + * @{ + */ +#define LTDC_HORIZONTALSYNC (LTDC_SSCR_HSW >> 16U) /*!< Horizontal synchronization width. */ +#define LTDC_VERTICALSYNC LTDC_SSCR_VSH /*!< Vertical synchronization height. */ +/** + * @} + */ + +/** @defgroup LTDC_BACK_COLOR LTDC BACK COLOR + * @{ + */ +#define LTDC_COLOR 0x000000FFU /*!< Color mask */ +/** + * @} + */ + +/** @defgroup LTDC_BlendingFactor1 LTDC Blending Factor1 + * @{ + */ +#define LTDC_BLENDING_FACTOR1_CA 0x00000400U /*!< Blending factor : Cte Alpha */ +#define LTDC_BLENDING_FACTOR1_PAxCA 0x00000600U /*!< Blending factor : Cte Alpha x Pixel Alpha*/ +/** + * @} + */ + +/** @defgroup LTDC_BlendingFactor2 LTDC Blending Factor2 + * @{ + */ +#define LTDC_BLENDING_FACTOR2_CA 0x00000005U /*!< Blending factor : Cte Alpha */ +#define LTDC_BLENDING_FACTOR2_PAxCA 0x00000007U /*!< Blending factor : Cte Alpha x Pixel Alpha*/ +/** + * @} + */ + +/** @defgroup LTDC_Pixelformat LTDC Pixel format + * @{ + */ +#define LTDC_PIXEL_FORMAT_ARGB8888 0x00000000U /*!< ARGB8888 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_RGB888 0x00000001U /*!< RGB888 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_RGB565 0x00000002U /*!< RGB565 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_ARGB1555 0x00000003U /*!< ARGB1555 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_ARGB4444 0x00000004U /*!< ARGB4444 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_L8 0x00000005U /*!< L8 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_AL44 0x00000006U /*!< AL44 LTDC pixel format */ +#define LTDC_PIXEL_FORMAT_AL88 0x00000007U /*!< AL88 LTDC pixel format */ +/** + * @} + */ + +/** @defgroup LTDC_Alpha LTDC Alpha + * @{ + */ +#define LTDC_ALPHA LTDC_LxCACR_CONSTA /*!< LTDC Constant Alpha mask */ +/** + * @} + */ + +/** @defgroup LTDC_LAYER_Config LTDC LAYER Config + * @{ + */ +#define LTDC_STOPPOSITION (LTDC_LxWHPCR_WHSPPOS >> 16U) /*!< LTDC Layer stop position */ +#define LTDC_STARTPOSITION LTDC_LxWHPCR_WHSTPOS /*!< LTDC Layer start position */ + +#define LTDC_COLOR_FRAME_BUFFER LTDC_LxCFBLR_CFBLL /*!< LTDC Layer Line length */ +#define LTDC_LINE_NUMBER LTDC_LxCFBLNR_CFBLNBR /*!< LTDC Layer Line number */ +/** + * @} + */ + +/** @defgroup LTDC_Interrupts LTDC Interrupts + * @{ + */ +#define LTDC_IT_LI LTDC_IER_LIE /*!< LTDC Line Interrupt */ +#define LTDC_IT_FU LTDC_IER_FUIE /*!< LTDC FIFO Underrun Interrupt */ +#define LTDC_IT_TE LTDC_IER_TERRIE /*!< LTDC Transfer Error Interrupt */ +#define LTDC_IT_RR LTDC_IER_RRIE /*!< LTDC Register Reload Interrupt */ +/** + * @} + */ + +/** @defgroup LTDC_Flags LTDC Flags + * @{ + */ +#define LTDC_FLAG_LI LTDC_ISR_LIF /*!< LTDC Line Interrupt Flag */ +#define LTDC_FLAG_FU LTDC_ISR_FUIF /*!< LTDC FIFO Underrun interrupt Flag */ +#define LTDC_FLAG_TE LTDC_ISR_TERRIF /*!< LTDC Transfer Error interrupt Flag */ +#define LTDC_FLAG_RR LTDC_ISR_RRIF /*!< LTDC Register Reload interrupt Flag */ +/** + * @} + */ + +/** @defgroup LTDC_Reload_Type LTDC Reload Type + * @{ + */ +#define LTDC_RELOAD_IMMEDIATE LTDC_SRCR_IMR /*!< Immediate Reload */ +#define LTDC_RELOAD_VERTICAL_BLANKING LTDC_SRCR_VBR /*!< Vertical Blanking Reload */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup LTDC_Exported_Macros LTDC Exported Macros + * @{ + */ + +/** @brief Reset LTDC handle state. + * @param __HANDLE__ LTDC handle + * @retval None + */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +#define __HAL_LTDC_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_LTDC_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_LTDC_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_LTDC_STATE_RESET) +#endif /*USE_HAL_LTDC_REGISTER_CALLBACKS */ + +/** + * @brief Enable the LTDC. + * @param __HANDLE__ LTDC handle + * @retval None. + */ +#define __HAL_LTDC_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->GCR |= LTDC_GCR_LTDCEN) + +/** + * @brief Disable the LTDC. + * @param __HANDLE__ LTDC handle + * @retval None. + */ +#define __HAL_LTDC_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->GCR &= ~(LTDC_GCR_LTDCEN)) + +/** + * @brief Enable the LTDC Layer. + * @param __HANDLE__ LTDC handle + * @param __LAYER__ Specify the layer to be enabled. + * This parameter can be LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval None. + */ +#define __HAL_LTDC_LAYER_ENABLE(__HANDLE__, __LAYER__) ((LTDC_LAYER((__HANDLE__), (__LAYER__)))->CR |= (uint32_t)LTDC_LxCR_LEN) + +/** + * @brief Disable the LTDC Layer. + * @param __HANDLE__ LTDC handle + * @param __LAYER__ Specify the layer to be disabled. + * This parameter can be LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval None. + */ +#define __HAL_LTDC_LAYER_DISABLE(__HANDLE__, __LAYER__) ((LTDC_LAYER((__HANDLE__), (__LAYER__)))->CR &= ~(uint32_t)LTDC_LxCR_LEN) + +/** + * @brief Reload immediately all LTDC Layers. + * @param __HANDLE__ LTDC handle + * @retval None. + */ +#define __HAL_LTDC_RELOAD_IMMEDIATE_CONFIG(__HANDLE__) ((__HANDLE__)->Instance->SRCR |= LTDC_SRCR_IMR) + +/** + * @brief Reload during vertical blanking period all LTDC Layers. + * @param __HANDLE__ LTDC handle + * @retval None. + */ +#define __HAL_LTDC_VERTICAL_BLANKING_RELOAD_CONFIG(__HANDLE__) ((__HANDLE__)->Instance->SRCR |= LTDC_SRCR_VBR) + +/* Interrupt & Flag management */ +/** + * @brief Get the LTDC pending flags. + * @param __HANDLE__ LTDC handle + * @param __FLAG__ Get the specified flag. + * This parameter can be any combination of the following values: + * @arg LTDC_FLAG_LI: Line Interrupt flag + * @arg LTDC_FLAG_FU: FIFO Underrun Interrupt flag + * @arg LTDC_FLAG_TE: Transfer Error interrupt flag + * @arg LTDC_FLAG_RR: Register Reload Interrupt Flag + * @retval The state of FLAG (SET or RESET). + */ +#define __HAL_LTDC_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR & (__FLAG__)) + +/** + * @brief Clears the LTDC pending flags. + * @param __HANDLE__ LTDC handle + * @param __FLAG__ Specify the flag to clear. + * This parameter can be any combination of the following values: + * @arg LTDC_FLAG_LI: Line Interrupt flag + * @arg LTDC_FLAG_FU: FIFO Underrun Interrupt flag + * @arg LTDC_FLAG_TE: Transfer Error interrupt flag + * @arg LTDC_FLAG_RR: Register Reload Interrupt Flag + * @retval None + */ +#define __HAL_LTDC_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ICR = (__FLAG__)) + +/** + * @brief Enables the specified LTDC interrupts. + * @param __HANDLE__ LTDC handle + * @param __INTERRUPT__ Specify the LTDC interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg LTDC_IT_LI: Line Interrupt flag + * @arg LTDC_IT_FU: FIFO Underrun Interrupt flag + * @arg LTDC_IT_TE: Transfer Error interrupt flag + * @arg LTDC_IT_RR: Register Reload Interrupt Flag + * @retval None + */ +#define __HAL_LTDC_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->IER |= (__INTERRUPT__)) + +/** + * @brief Disables the specified LTDC interrupts. + * @param __HANDLE__ LTDC handle + * @param __INTERRUPT__ Specify the LTDC interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg LTDC_IT_LI: Line Interrupt flag + * @arg LTDC_IT_FU: FIFO Underrun Interrupt flag + * @arg LTDC_IT_TE: Transfer Error interrupt flag + * @arg LTDC_IT_RR: Register Reload Interrupt Flag + * @retval None + */ +#define __HAL_LTDC_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->IER &= ~(__INTERRUPT__)) + +/** + * @brief Check whether the specified LTDC interrupt has occurred or not. + * @param __HANDLE__ LTDC handle + * @param __INTERRUPT__ Specify the LTDC interrupt source to check. + * This parameter can be one of the following values: + * @arg LTDC_IT_LI: Line Interrupt flag + * @arg LTDC_IT_FU: FIFO Underrun Interrupt flag + * @arg LTDC_IT_TE: Transfer Error interrupt flag + * @arg LTDC_IT_RR: Register Reload Interrupt Flag + * @retval The state of INTERRUPT (SET or RESET). + */ +#define __HAL_LTDC_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->IER & (__INTERRUPT__)) +/** + * @} + */ + +/* Include LTDC HAL Extension module */ +#include "stm32f7xx_hal_ltdc_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup LTDC_Exported_Functions + * @{ + */ +/** @addtogroup LTDC_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +HAL_StatusTypeDef HAL_LTDC_Init(LTDC_HandleTypeDef *hltdc); +HAL_StatusTypeDef HAL_LTDC_DeInit(LTDC_HandleTypeDef *hltdc); +void HAL_LTDC_MspInit(LTDC_HandleTypeDef *hltdc); +void HAL_LTDC_MspDeInit(LTDC_HandleTypeDef *hltdc); +void HAL_LTDC_ErrorCallback(LTDC_HandleTypeDef *hltdc); +void HAL_LTDC_LineEventCallback(LTDC_HandleTypeDef *hltdc); +void HAL_LTDC_ReloadEventCallback(LTDC_HandleTypeDef *hltdc); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_LTDC_RegisterCallback(LTDC_HandleTypeDef *hltdc, HAL_LTDC_CallbackIDTypeDef CallbackID, pLTDC_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_LTDC_UnRegisterCallback(LTDC_HandleTypeDef *hltdc, HAL_LTDC_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup LTDC_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *****************************************************/ +void HAL_LTDC_IRQHandler(LTDC_HandleTypeDef *hltdc); +/** + * @} + */ + +/** @addtogroup LTDC_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +HAL_StatusTypeDef HAL_LTDC_ConfigLayer(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetWindowSize(LTDC_HandleTypeDef *hltdc, uint32_t XSize, uint32_t YSize, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetWindowPosition(LTDC_HandleTypeDef *hltdc, uint32_t X0, uint32_t Y0, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetPixelFormat(LTDC_HandleTypeDef *hltdc, uint32_t Pixelformat, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetAlpha(LTDC_HandleTypeDef *hltdc, uint32_t Alpha, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetAddress(LTDC_HandleTypeDef *hltdc, uint32_t Address, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetPitch(LTDC_HandleTypeDef *hltdc, uint32_t LinePitchInPixels, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_ConfigColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t RGBValue, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_ConfigCLUT(LTDC_HandleTypeDef *hltdc, uint32_t *pCLUT, uint32_t CLUTSize, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_EnableColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_DisableColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_EnableCLUT(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_DisableCLUT(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_ProgramLineEvent(LTDC_HandleTypeDef *hltdc, uint32_t Line); +HAL_StatusTypeDef HAL_LTDC_EnableDither(LTDC_HandleTypeDef *hltdc); +HAL_StatusTypeDef HAL_LTDC_DisableDither(LTDC_HandleTypeDef *hltdc); +HAL_StatusTypeDef HAL_LTDC_Reload(LTDC_HandleTypeDef *hltdc, uint32_t ReloadType); +HAL_StatusTypeDef HAL_LTDC_ConfigLayer_NoReload(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetWindowSize_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t XSize, uint32_t YSize, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetWindowPosition_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t X0, uint32_t Y0, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetPixelFormat_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Pixelformat, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetAlpha_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Alpha, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetAddress_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Address, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_SetPitch_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LinePitchInPixels, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_ConfigColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t RGBValue, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_EnableColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_DisableColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_EnableCLUT_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); +HAL_StatusTypeDef HAL_LTDC_DisableCLUT_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx); + +/** + * @} + */ + +/** @addtogroup LTDC_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions *************************************************/ +HAL_LTDC_StateTypeDef HAL_LTDC_GetState(LTDC_HandleTypeDef *hltdc); +uint32_t HAL_LTDC_GetError(LTDC_HandleTypeDef *hltdc); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup LTDC_Private_Macros LTDC Private Macros + * @{ + */ +#define LTDC_LAYER(__HANDLE__, __LAYER__) ((LTDC_Layer_TypeDef *)((uint32_t)(((uint32_t)((__HANDLE__)->Instance)) + 0x84U + (0x80U*(__LAYER__))))) +#define IS_LTDC_LAYER(__LAYER__) ((__LAYER__) < MAX_LAYER) +#define IS_LTDC_HSPOL(__HSPOL__) (((__HSPOL__) == LTDC_HSPOLARITY_AL) || ((__HSPOL__) == LTDC_HSPOLARITY_AH)) +#define IS_LTDC_VSPOL(__VSPOL__) (((__VSPOL__) == LTDC_VSPOLARITY_AL) || ((__VSPOL__) == LTDC_VSPOLARITY_AH)) +#define IS_LTDC_DEPOL(__DEPOL__) (((__DEPOL__) == LTDC_DEPOLARITY_AL) || ((__DEPOL__) == LTDC_DEPOLARITY_AH)) +#define IS_LTDC_PCPOL(__PCPOL__) (((__PCPOL__) == LTDC_PCPOLARITY_IPC) || ((__PCPOL__) == LTDC_PCPOLARITY_IIPC)) +#define IS_LTDC_HSYNC(__HSYNC__) ((__HSYNC__) <= LTDC_HORIZONTALSYNC) +#define IS_LTDC_VSYNC(__VSYNC__) ((__VSYNC__) <= LTDC_VERTICALSYNC) +#define IS_LTDC_AHBP(__AHBP__) ((__AHBP__) <= LTDC_HORIZONTALSYNC) +#define IS_LTDC_AVBP(__AVBP__) ((__AVBP__) <= LTDC_VERTICALSYNC) +#define IS_LTDC_AAW(__AAW__) ((__AAW__) <= LTDC_HORIZONTALSYNC) +#define IS_LTDC_AAH(__AAH__) ((__AAH__) <= LTDC_VERTICALSYNC) +#define IS_LTDC_TOTALW(__TOTALW__) ((__TOTALW__) <= LTDC_HORIZONTALSYNC) +#define IS_LTDC_TOTALH(__TOTALH__) ((__TOTALH__) <= LTDC_VERTICALSYNC) +#define IS_LTDC_BLUEVALUE(__BBLUE__) ((__BBLUE__) <= LTDC_COLOR) +#define IS_LTDC_GREENVALUE(__BGREEN__) ((__BGREEN__) <= LTDC_COLOR) +#define IS_LTDC_REDVALUE(__BRED__) ((__BRED__) <= LTDC_COLOR) +#define IS_LTDC_BLENDING_FACTOR1(__BLENDING_FACTOR1__) (((__BLENDING_FACTOR1__) == LTDC_BLENDING_FACTOR1_CA) || \ + ((__BLENDING_FACTOR1__) == LTDC_BLENDING_FACTOR1_PAxCA)) +#define IS_LTDC_BLENDING_FACTOR2(__BLENDING_FACTOR1__) (((__BLENDING_FACTOR1__) == LTDC_BLENDING_FACTOR2_CA) || \ + ((__BLENDING_FACTOR1__) == LTDC_BLENDING_FACTOR2_PAxCA)) +#define IS_LTDC_PIXEL_FORMAT(__PIXEL_FORMAT__) (((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_ARGB8888) || ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_RGB888) || \ + ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_RGB565) || ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_ARGB1555) || \ + ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_ARGB4444) || ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_L8) || \ + ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_AL44) || ((__PIXEL_FORMAT__) == LTDC_PIXEL_FORMAT_AL88)) +#define IS_LTDC_ALPHA(__ALPHA__) ((__ALPHA__) <= LTDC_ALPHA) +#define IS_LTDC_HCONFIGST(__HCONFIGST__) ((__HCONFIGST__) <= LTDC_STARTPOSITION) +#define IS_LTDC_HCONFIGSP(__HCONFIGSP__) ((__HCONFIGSP__) <= LTDC_STOPPOSITION) +#define IS_LTDC_VCONFIGST(__VCONFIGST__) ((__VCONFIGST__) <= LTDC_STARTPOSITION) +#define IS_LTDC_VCONFIGSP(__VCONFIGSP__) ((__VCONFIGSP__) <= LTDC_STOPPOSITION) +#define IS_LTDC_CFBP(__CFBP__) ((__CFBP__) <= LTDC_COLOR_FRAME_BUFFER) +#define IS_LTDC_CFBLL(__CFBLL__) ((__CFBLL__) <= LTDC_COLOR_FRAME_BUFFER) +#define IS_LTDC_CFBLNBR(__CFBLNBR__) ((__CFBLNBR__) <= LTDC_LINE_NUMBER) +#define IS_LTDC_LIPOS(__LIPOS__) ((__LIPOS__) <= 0x7FFU) +#define IS_LTDC_RELOAD(__RELOADTYPE__) (((__RELOADTYPE__) == LTDC_RELOAD_IMMEDIATE) || ((__RELOADTYPE__) == LTDC_RELOAD_VERTICAL_BLANKING)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup LTDC_Private_Functions LTDC Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* LTDC */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_LTDC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc_ex.h new file mode 100644 index 000000000..f13712ed8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_ltdc_ex.h @@ -0,0 +1,85 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_ltdc_ex.h + * @author MCD Application Team + * @brief Header file of LTDC HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_LTDC_EX_H +#define STM32F7xx_HAL_LTDC_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined (LTDC) && defined (DSI) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" +#include "stm32f7xx_hal_dsi.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup LTDCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup LTDCEx_Exported_Functions + * @{ + */ + +/** @addtogroup LTDCEx_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_LTDCEx_StructInitFromVideoConfig(LTDC_HandleTypeDef *hltdc, DSI_VidCfgTypeDef *VidCfg); +HAL_StatusTypeDef HAL_LTDCEx_StructInitFromAdaptedCommandConfig(LTDC_HandleTypeDef *hltdc, DSI_CmdCfgTypeDef *CmdCfg); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* LTDC && DSI */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_LTDC_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h new file mode 100644 index 000000000..650dfd513 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h @@ -0,0 +1,436 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd.h + * @author MCD Application Team + * @brief Header file of PCD HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_PCD_H +#define STM32F7xx_HAL_PCD_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_ll_usb.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PCD + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup PCD_Exported_Types PCD Exported Types + * @{ + */ + +/** + * @brief PCD State structure definition + */ +typedef enum +{ + HAL_PCD_STATE_RESET = 0x00, + HAL_PCD_STATE_READY = 0x01, + HAL_PCD_STATE_ERROR = 0x02, + HAL_PCD_STATE_BUSY = 0x03, + HAL_PCD_STATE_TIMEOUT = 0x04 +} PCD_StateTypeDef; + +/* Device LPM suspend state */ +typedef enum +{ + LPM_L0 = 0x00, /* on */ + LPM_L1 = 0x01, /* LPM L1 sleep */ + LPM_L2 = 0x02, /* suspend */ + LPM_L3 = 0x03, /* off */ +} PCD_LPM_StateTypeDef; + +typedef enum +{ + PCD_LPM_L0_ACTIVE = 0x00, /* on */ + PCD_LPM_L1_ACTIVE = 0x01, /* LPM L1 sleep */ +} PCD_LPM_MsgTypeDef; + +typedef enum +{ + PCD_BCD_ERROR = 0xFF, + PCD_BCD_CONTACT_DETECTION = 0xFE, + PCD_BCD_STD_DOWNSTREAM_PORT = 0xFD, + PCD_BCD_CHARGING_DOWNSTREAM_PORT = 0xFC, + PCD_BCD_DEDICATED_CHARGING_PORT = 0xFB, + PCD_BCD_DISCOVERY_COMPLETED = 0x00, + +} PCD_BCD_MsgTypeDef; + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +typedef USB_OTG_GlobalTypeDef PCD_TypeDef; +typedef USB_OTG_CfgTypeDef PCD_InitTypeDef; +typedef USB_OTG_EPTypeDef PCD_EPTypeDef; +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @brief PCD Handle Structure definition + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +typedef struct __PCD_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + PCD_TypeDef *Instance; /*!< Register base address */ + PCD_InitTypeDef Init; /*!< PCD required parameters */ + __IO uint8_t USB_Address; /*!< USB Address */ + PCD_EPTypeDef IN_ep[16]; /*!< IN endpoint parameters */ + PCD_EPTypeDef OUT_ep[16]; /*!< OUT endpoint parameters */ + HAL_LockTypeDef Lock; /*!< PCD peripheral status */ + __IO PCD_StateTypeDef State; /*!< PCD communication state */ + __IO uint32_t ErrorCode; /*!< PCD Error code */ + uint32_t Setup[12]; /*!< Setup packet buffer */ + PCD_LPM_StateTypeDef LPM_State; /*!< LPM State */ + uint32_t BESL; + + + uint32_t lpm_active; /*!< Enable or disable the Link Power Management . + This parameter can be set to ENABLE or DISABLE */ + void *pData; /*!< Pointer to upper stack Handler */ + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + void (* SOFCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD SOF callback */ + void (* SetupStageCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Setup Stage callback */ + void (* ResetCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Reset callback */ + void (* SuspendCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Suspend callback */ + void (* ResumeCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Resume callback */ + void (* ConnectCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Connect callback */ + void (* DisconnectCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Disconnect callback */ + + void (* DataOutStageCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD Data OUT Stage callback */ + void (* DataInStageCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD Data IN Stage callback */ + void (* ISOOUTIncompleteCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD ISO OUT Incomplete callback */ + void (* ISOINIncompleteCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD ISO IN Incomplete callback */ + void (* LPMCallback)(struct __PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); /*!< USB OTG PCD LPM callback */ + + void (* MspInitCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Msp Init callback */ + void (* MspDeInitCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Msp DeInit callback */ +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +} PCD_HandleTypeDef; + +/** + * @} + */ + +/* Include PCD HAL Extended module */ +#include "stm32f7xx_hal_pcd_ex.h" + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PCD_Exported_Constants PCD Exported Constants + * @{ + */ + +/** @defgroup PCD_Speed PCD Speed + * @{ + */ +#define PCD_SPEED_HIGH USBD_HS_SPEED +#define PCD_SPEED_HIGH_IN_FULL USBD_HSINFS_SPEED +#define PCD_SPEED_FULL USBD_FS_SPEED +/** + * @} + */ + +/** @defgroup PCD_PHY_Module PCD PHY Module + * @{ + */ +#define PCD_PHY_ULPI 1U +#define PCD_PHY_EMBEDDED 2U +#define PCD_PHY_UTMI 3U +/** + * @} + */ + +/** @defgroup PCD_Error_Code_definition PCD Error Code definition + * @brief PCD Error Code definition + * @{ + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +#define HAL_PCD_ERROR_INVALID_CALLBACK (0x00000010U) /*!< Invalid Callback error */ +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup PCD_Exported_Macros PCD Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define __HAL_PCD_ENABLE(__HANDLE__) (void)USB_EnableGlobalInt ((__HANDLE__)->Instance) +#define __HAL_PCD_DISABLE(__HANDLE__) (void)USB_DisableGlobalInt ((__HANDLE__)->Instance) + +#define __HAL_PCD_GET_FLAG(__HANDLE__, __INTERRUPT__) ((USB_ReadInterrupts((__HANDLE__)->Instance) & (__INTERRUPT__)) == (__INTERRUPT__)) +#define __HAL_PCD_CLEAR_FLAG(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->GINTSTS) &= (__INTERRUPT__)) +#define __HAL_PCD_IS_INVALID_INTERRUPT(__HANDLE__) (USB_ReadInterrupts((__HANDLE__)->Instance) == 0U) + + +#define __HAL_PCD_UNGATE_PHYCLOCK(__HANDLE__) *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) &= \ + ~(USB_OTG_PCGCCTL_STOPCLK) + +#define __HAL_PCD_GATE_PHYCLOCK(__HANDLE__) *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) |= USB_OTG_PCGCCTL_STOPCLK + +#define __HAL_PCD_IS_PHY_SUSPENDED(__HANDLE__) ((*(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE)) & 0x10U) + +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= (USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = (USB_OTG_HS_WAKEUP_EXTI_LINE) + +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_EDGE() \ + do { \ + EXTI->FTSR &= ~(USB_OTG_HS_WAKEUP_EXTI_LINE); \ + EXTI->RTSR |= USB_OTG_HS_WAKEUP_EXTI_LINE; \ + } while(0U) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= USB_OTG_FS_WAKEUP_EXTI_LINE +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(USB_OTG_FS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (USB_OTG_FS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = USB_OTG_FS_WAKEUP_EXTI_LINE + +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_EDGE() \ + do { \ + EXTI->FTSR &= ~(USB_OTG_FS_WAKEUP_EXTI_LINE); \ + EXTI->RTSR |= USB_OTG_FS_WAKEUP_EXTI_LINE; \ + } while(0U) +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PCD_Exported_Functions PCD Exported Functions + * @{ + */ + +/* Initialization/de-initialization functions ********************************/ +/** @addtogroup PCD_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DeInit(PCD_HandleTypeDef *hpcd); +void HAL_PCD_MspInit(PCD_HandleTypeDef *hpcd); +void HAL_PCD_MspDeInit(PCD_HandleTypeDef *hpcd); + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +/** @defgroup HAL_PCD_Callback_ID_enumeration_definition HAL USB OTG PCD Callback ID enumeration definition + * @brief HAL USB OTG PCD Callback ID enumeration definition + * @{ + */ +typedef enum +{ + HAL_PCD_SOF_CB_ID = 0x01, /*!< USB PCD SOF callback ID */ + HAL_PCD_SETUPSTAGE_CB_ID = 0x02, /*!< USB PCD Setup Stage callback ID */ + HAL_PCD_RESET_CB_ID = 0x03, /*!< USB PCD Reset callback ID */ + HAL_PCD_SUSPEND_CB_ID = 0x04, /*!< USB PCD Suspend callback ID */ + HAL_PCD_RESUME_CB_ID = 0x05, /*!< USB PCD Resume callback ID */ + HAL_PCD_CONNECT_CB_ID = 0x06, /*!< USB PCD Connect callback ID */ + HAL_PCD_DISCONNECT_CB_ID = 0x07, /*!< USB PCD Disconnect callback ID */ + + HAL_PCD_MSPINIT_CB_ID = 0x08, /*!< USB PCD MspInit callback ID */ + HAL_PCD_MSPDEINIT_CB_ID = 0x09 /*!< USB PCD MspDeInit callback ID */ + +} HAL_PCD_CallbackIDTypeDef; +/** + * @} + */ + +/** @defgroup HAL_PCD_Callback_pointer_definition HAL USB OTG PCD Callback pointer definition + * @brief HAL USB OTG PCD Callback pointer definition + * @{ + */ + +typedef void (*pPCD_CallbackTypeDef)(PCD_HandleTypeDef *hpcd); /*!< pointer to a common USB OTG PCD callback function */ +typedef void (*pPCD_DataOutStageCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD Data OUT Stage callback */ +typedef void (*pPCD_DataInStageCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD Data IN Stage callback */ +typedef void (*pPCD_IsoOutIncpltCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD ISO OUT Incomplete callback */ +typedef void (*pPCD_IsoInIncpltCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD ISO IN Incomplete callback */ +typedef void (*pPCD_LpmCallbackTypeDef)(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); /*!< pointer to USB OTG PCD LPM callback */ + +/** + * @} + */ + +HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID, pPCD_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_PCD_RegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd, pPCD_DataOutStageCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterDataInStageCallback(PCD_HandleTypeDef *hpcd, pPCD_DataInStageCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterDataInStageCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd, pPCD_IsoOutIncpltCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, pPCD_IsoInIncpltCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, pPCD_LpmCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_PCD_UnRegisterLpmCallback(PCD_HandleTypeDef *hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* I/O operation functions ***************************************************/ +/* Non-Blocking mode: Interrupt */ +/** @addtogroup PCD_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_Start(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_Stop(PCD_HandleTypeDef *hpcd); +void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd); + +void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd); + +void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +/** + * @} + */ + +/* Peripheral Control functions **********************************************/ +/** @addtogroup PCD_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address); +HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint16_t ep_mps, uint8_t ep_type); +HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len); +HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len); +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_SetStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_Flush(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_ActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); +/** + * @} + */ + +/* Peripheral State functions ************************************************/ +/** @addtogroup PCD_Exported_Functions_Group4 Peripheral State functions + * @{ + */ +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd); +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PCD_Private_Constants PCD Private Constants + * @{ + */ +/** @defgroup USB_EXTI_Line_Interrupt USB EXTI line interrupt + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define USB_OTG_FS_WAKEUP_EXTI_LINE (0x1U << 18) /*!< USB FS EXTI Line WakeUp Interrupt */ +#define USB_OTG_HS_WAKEUP_EXTI_LINE (0x1U << 20) /*!< USB HS EXTI Line WakeUp Interrupt */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ +/** + * @} + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#ifndef USB_OTG_DOEPINT_OTEPSPR +#define USB_OTG_DOEPINT_OTEPSPR (0x1UL << 5) /*!< Status Phase Received interrupt */ +#endif + +#ifndef USB_OTG_DOEPMSK_OTEPSPRM +#define USB_OTG_DOEPMSK_OTEPSPRM (0x1UL << 5) /*!< Setup Packet Received interrupt mask */ +#endif + +#ifndef USB_OTG_DOEPINT_NAK +#define USB_OTG_DOEPINT_NAK (0x1UL << 13) /*!< NAK interrupt */ +#endif + +#ifndef USB_OTG_DOEPMSK_NAKM +#define USB_OTG_DOEPMSK_NAKM (0x1UL << 13) /*!< OUT Packet NAK interrupt mask */ +#endif + +#ifndef USB_OTG_DOEPINT_STPKTRX +#define USB_OTG_DOEPINT_STPKTRX (0x1UL << 15) /*!< Setup Packet Received interrupt */ +#endif + +#ifndef USB_OTG_DOEPMSK_NYETM +#define USB_OTG_DOEPMSK_NYETM (0x1UL << 14) /*!< Setup Packet Received interrupt mask */ +#endif +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PCD_Private_Macros PCD Private Macros + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_PCD_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h new file mode 100644 index 000000000..8f9da2812 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h @@ -0,0 +1,86 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd_ex.h + * @author MCD Application Team + * @brief Header file of PCD HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_PCD_EX_H +#define STM32F7xx_HAL_PCD_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PCDEx + * @{ + */ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/* Exported macros -----------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PCDEx_Exported_Functions PCDEx Exported Functions + * @{ + */ +/** @addtogroup PCDEx_Exported_Functions_Group1 Peripheral Control functions + * @{ + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +HAL_StatusTypeDef HAL_PCDEx_SetTxFiFo(PCD_HandleTypeDef *hpcd, uint8_t fifo, uint16_t size); +HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +HAL_StatusTypeDef HAL_PCDEx_ActivateLPM(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd); + +void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); +void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_PCD_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h new file mode 100644 index 000000000..79cb04809 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h @@ -0,0 +1,404 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr.h + * @author MCD Application Team + * @brief Header file of PWR HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_PWR_H +#define __STM32F7xx_HAL_PWR_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWR + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Types PWR Exported Types + * @{ + */ + +/** + * @brief PWR PVD configuration structure definition + */ +typedef struct +{ + uint32_t PVDLevel; /*!< PVDLevel: Specifies the PVD detection level. + This parameter can be a value of @ref PWR_PVD_detection_level */ + + uint32_t Mode; /*!< Mode: Specifies the operating mode for the selected pins. + This parameter can be a value of @ref PWR_PVD_Mode */ +}PWR_PVDTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWR_Exported_Constants PWR Exported Constants + * @{ + */ + +/** @defgroup PWR_PVD_detection_level PWR PVD detection level + * @{ + */ +#define PWR_PVDLEVEL_0 PWR_CR1_PLS_LEV0 +#define PWR_PVDLEVEL_1 PWR_CR1_PLS_LEV1 +#define PWR_PVDLEVEL_2 PWR_CR1_PLS_LEV2 +#define PWR_PVDLEVEL_3 PWR_CR1_PLS_LEV3 +#define PWR_PVDLEVEL_4 PWR_CR1_PLS_LEV4 +#define PWR_PVDLEVEL_5 PWR_CR1_PLS_LEV5 +#define PWR_PVDLEVEL_6 PWR_CR1_PLS_LEV6 +#define PWR_PVDLEVEL_7 PWR_CR1_PLS_LEV7/* External input analog voltage + (Compare internally to VREFINT) */ + +/** + * @} + */ + +/** @defgroup PWR_PVD_Mode PWR PVD Mode + * @{ + */ +#define PWR_PVD_MODE_NORMAL ((uint32_t)0x00000000U) /*!< basic mode is used */ +#define PWR_PVD_MODE_IT_RISING ((uint32_t)0x00010001U) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_IT_FALLING ((uint32_t)0x00010002U) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_IT_RISING_FALLING ((uint32_t)0x00010003U) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING ((uint32_t)0x00020001U) /*!< Event Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_EVENT_FALLING ((uint32_t)0x00020002U) /*!< Event Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING_FALLING ((uint32_t)0x00020003U) /*!< Event Mode with Rising/Falling edge trigger detection */ +/** + * @} + */ + +/** @defgroup PWR_Regulator_state_in_STOP_mode PWR Regulator state in SLEEP/STOP mode + * @{ + */ +#define PWR_MAINREGULATOR_ON ((uint32_t)0x00000000U) +#define PWR_LOWPOWERREGULATOR_ON PWR_CR1_LPDS +/** + * @} + */ + +/** @defgroup PWR_SLEEP_mode_entry PWR SLEEP mode entry + * @{ + */ +#define PWR_SLEEPENTRY_WFI ((uint8_t)0x01U) +#define PWR_SLEEPENTRY_WFE ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup PWR_STOP_mode_entry PWR STOP mode entry + * @{ + */ +#define PWR_STOPENTRY_WFI ((uint8_t)0x01U) +#define PWR_STOPENTRY_WFE ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup PWR_Regulator_Voltage_Scale PWR Regulator Voltage Scale + * @{ + */ +#define PWR_REGULATOR_VOLTAGE_SCALE1 PWR_CR1_VOS +#define PWR_REGULATOR_VOLTAGE_SCALE2 PWR_CR1_VOS_1 +#define PWR_REGULATOR_VOLTAGE_SCALE3 PWR_CR1_VOS_0 +/** + * @} + */ + +/** @defgroup PWR_Flag PWR Flag + * @{ + */ +#define PWR_FLAG_WU PWR_CSR1_WUIF +#define PWR_FLAG_SB PWR_CSR1_SBF +#define PWR_FLAG_PVDO PWR_CSR1_PVDO +#define PWR_FLAG_BRR PWR_CSR1_BRR +#define PWR_FLAG_VOSRDY PWR_CSR1_VOSRDY +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWR_Exported_Macro PWR Exported Macro + * @{ + */ + +/** @brief macros configure the main internal regulator output voltage. + * @param __REGULATOR__ specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption when the device does + * not operate at the maximum frequency (refer to the datasheets for more details). + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output Scale 1 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output Scale 2 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output Scale 3 mode + * @retval None + */ +#define __HAL_PWR_VOLTAGESCALING_CONFIG(__REGULATOR__) do { \ + __IO uint32_t tmpreg; \ + MODIFY_REG(PWR->CR1, PWR_CR1_VOS, (__REGULATOR__)); \ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(PWR->CR1, PWR_CR1_VOS); \ + UNUSED(tmpreg); \ + } while(0) + +/** @brief Check PWR flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_WU: Wake Up flag. This flag indicates that a wakeup event + * was received on the internal wakeup line in standby mode (RTC alarm (Alarm A or Alarm B), + * RTC Tamper event, RTC TimeStamp event or RTC Wakeup)). + * @arg PWR_FLAG_SB: StandBy flag. This flag indicates that the system was + * resumed from StandBy mode. + * @arg PWR_FLAG_PVDO: PVD Output. This flag is valid only if PVD is enabled + * by the HAL_PWR_EnablePVD() function. The PVD is stopped by Standby mode + * For this reason, this bit is equal to 0 after Standby or reset + * until the PVDE bit is set. + * @arg PWR_FLAG_BRR: Backup regulator ready flag. This bit is not reset + * when the device wakes up from Standby mode or by a system reset + * or power reset. + * @arg PWR_FLAG_VOSRDY: This flag indicates that the Regulator voltage + * scaling output selection is ready. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_FLAG(__FLAG__) ((PWR->CSR1 & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the PWR's pending flags. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be one of the following values: + * @arg PWR_FLAG_SB: StandBy flag + */ +#define __HAL_PWR_CLEAR_FLAG(__FLAG__) (PWR->CR1 |= (__FLAG__) << 2) + +/** + * @brief Enable the PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_IT() (EXTI->IMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable the PVD EXTI Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_IT() (EXTI->IMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_EVENT() (EXTI->EMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE() SET_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Disable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE() CLEAR_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Enable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE() SET_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief Disable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE() CLEAR_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief PVD EXTI line configuration: set rising & falling edge trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_FALLING_EDGE() __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE(); + +/** + * @brief Disable the PVD Extended Interrupt Rising & Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_FALLING_EDGE() __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); + +/** + * @brief checks whether the specified PVD Exti interrupt flag is set or not. + * @retval EXTI PVD Line Status. + */ +#define __HAL_PWR_PVD_EXTI_GET_FLAG() (EXTI->PR & (PWR_EXTI_LINE_PVD)) + +/** + * @brief Clear the PVD Exti flag. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_CLEAR_FLAG() (EXTI->PR = (PWR_EXTI_LINE_PVD)) + +/** + * @brief Generates a Software interrupt on PVD EXTI line. + * @retval None + */ +#define __HAL_PWR_PVD_EXTI_GENERATE_SWIT() (EXTI->SWIER |= (PWR_EXTI_LINE_PVD)) + +/** + * @} + */ + +/* Include PWR HAL Extension module */ +#include "stm32f7xx_hal_pwr_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @addtogroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_PWR_DeInit(void); +void HAL_PWR_EnableBkUpAccess(void); +void HAL_PWR_DisableBkUpAccess(void); +/** + * @} + */ + +/** @addtogroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @{ + */ +/* Peripheral Control functions **********************************************/ +/* PVD configuration */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD); +void HAL_PWR_EnablePVD(void); +void HAL_PWR_DisablePVD(void); + +/* WakeUp pins configuration */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinPolarity); +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx); + +/* Low Power modes entry */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry); +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry); +void HAL_PWR_EnterSTANDBYMode(void); + +/* Power PVD IRQ Handler */ +void HAL_PWR_PVD_IRQHandler(void); +void HAL_PWR_PVDCallback(void); + +/* Cortex System Control functions *******************************************/ +void HAL_PWR_EnableSleepOnExit(void); +void HAL_PWR_DisableSleepOnExit(void); +void HAL_PWR_EnableSEVOnPend(void); +void HAL_PWR_DisableSEVOnPend(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PWR_Private_Constants PWR Private Constants + * @{ + */ + +/** @defgroup PWR_PVD_EXTI_Line PWR PVD EXTI Line + * @{ + */ +#define PWR_EXTI_LINE_PVD ((uint32_t)EXTI_IMR_IM16) /*!< External interrupt line 16 Connected to the PVD EXTI Line */ +/** + * @} + */ + +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWR_Private_Macros PWR Private Macros + * @{ + */ + +/** @defgroup PWR_IS_PWR_Definitions PWR Private macros to check input parameters + * @{ + */ +#define IS_PWR_PVD_LEVEL(LEVEL) (((LEVEL) == PWR_PVDLEVEL_0) || ((LEVEL) == PWR_PVDLEVEL_1)|| \ + ((LEVEL) == PWR_PVDLEVEL_2) || ((LEVEL) == PWR_PVDLEVEL_3)|| \ + ((LEVEL) == PWR_PVDLEVEL_4) || ((LEVEL) == PWR_PVDLEVEL_5)|| \ + ((LEVEL) == PWR_PVDLEVEL_6) || ((LEVEL) == PWR_PVDLEVEL_7)) +#define IS_PWR_PVD_MODE(MODE) (((MODE) == PWR_PVD_MODE_IT_RISING)|| ((MODE) == PWR_PVD_MODE_IT_FALLING) || \ + ((MODE) == PWR_PVD_MODE_IT_RISING_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING) || \ + ((MODE) == PWR_PVD_MODE_EVENT_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING_FALLING) || \ + ((MODE) == PWR_PVD_MODE_NORMAL)) +#define IS_PWR_REGULATOR(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_ON)) +#define IS_PWR_SLEEP_ENTRY(ENTRY) (((ENTRY) == PWR_SLEEPENTRY_WFI) || ((ENTRY) == PWR_SLEEPENTRY_WFE)) +#define IS_PWR_STOP_ENTRY(ENTRY) (((ENTRY) == PWR_STOPENTRY_WFI) || ((ENTRY) == PWR_STOPENTRY_WFE)) +#define IS_PWR_REGULATOR_VOLTAGE(VOLTAGE) (((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE1) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE2) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE3)) + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F7xx_HAL_PWR_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h new file mode 100644 index 000000000..698bde6ea --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h @@ -0,0 +1,262 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr_ex.h + * @author MCD Application Team + * @brief Header file of PWR HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_PWR_EX_H +#define __STM32F7xx_HAL_PWR_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWREx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Constants PWREx Exported Constants + * @{ + */ +/** @defgroup PWREx_WakeUp_Pins PWREx Wake Up Pins + * @{ + */ +#define PWR_WAKEUP_PIN1 PWR_CSR2_EWUP1 +#define PWR_WAKEUP_PIN2 PWR_CSR2_EWUP2 +#define PWR_WAKEUP_PIN3 PWR_CSR2_EWUP3 +#define PWR_WAKEUP_PIN4 PWR_CSR2_EWUP4 +#define PWR_WAKEUP_PIN5 PWR_CSR2_EWUP5 +#define PWR_WAKEUP_PIN6 PWR_CSR2_EWUP6 +#define PWR_WAKEUP_PIN1_HIGH PWR_CSR2_EWUP1 +#define PWR_WAKEUP_PIN2_HIGH PWR_CSR2_EWUP2 +#define PWR_WAKEUP_PIN3_HIGH PWR_CSR2_EWUP3 +#define PWR_WAKEUP_PIN4_HIGH PWR_CSR2_EWUP4 +#define PWR_WAKEUP_PIN5_HIGH PWR_CSR2_EWUP5 +#define PWR_WAKEUP_PIN6_HIGH PWR_CSR2_EWUP6 +#define PWR_WAKEUP_PIN1_LOW (uint32_t)((PWR_CR2_WUPP1<<6) | PWR_CSR2_EWUP1) +#define PWR_WAKEUP_PIN2_LOW (uint32_t)((PWR_CR2_WUPP2<<6) | PWR_CSR2_EWUP2) +#define PWR_WAKEUP_PIN3_LOW (uint32_t)((PWR_CR2_WUPP3<<6) | PWR_CSR2_EWUP3) +#define PWR_WAKEUP_PIN4_LOW (uint32_t)((PWR_CR2_WUPP4<<6) | PWR_CSR2_EWUP4) +#define PWR_WAKEUP_PIN5_LOW (uint32_t)((PWR_CR2_WUPP5<<6) | PWR_CSR2_EWUP5) +#define PWR_WAKEUP_PIN6_LOW (uint32_t)((PWR_CR2_WUPP6<<6) | PWR_CSR2_EWUP6) + +/** + * @} + */ + +/** @defgroup PWREx_Regulator_state_in_UnderDrive_mode PWREx Regulator state in UnderDrive mode + * @{ + */ +#define PWR_MAINREGULATOR_UNDERDRIVE_ON PWR_CR1_MRUDS +#define PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON ((uint32_t)(PWR_CR1_LPDS | PWR_CR1_LPUDS)) +/** + * @} + */ + +/** @defgroup PWREx_Over_Under_Drive_Flag PWREx Over Under Drive Flag + * @{ + */ +#define PWR_FLAG_ODRDY PWR_CSR1_ODRDY +#define PWR_FLAG_ODSWRDY PWR_CSR1_ODSWRDY +#define PWR_FLAG_UDRDY PWR_CSR1_UDRDY +/** + * @} + */ + +/** @defgroup PWREx_Wakeup_Pins_Flag PWREx Wake Up Pin Flags + * @{ + */ +#define PWR_WAKEUP_PIN_FLAG1 PWR_CSR2_WUPF1 +#define PWR_WAKEUP_PIN_FLAG2 PWR_CSR2_WUPF2 +#define PWR_WAKEUP_PIN_FLAG3 PWR_CSR2_WUPF3 +#define PWR_WAKEUP_PIN_FLAG4 PWR_CSR2_WUPF4 +#define PWR_WAKEUP_PIN_FLAG5 PWR_CSR2_WUPF5 +#define PWR_WAKEUP_PIN_FLAG6 PWR_CSR2_WUPF6 +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Macro PWREx Exported Macro + * @{ + */ +/** @brief Macros to enable or disable the Over drive mode. + */ +#define __HAL_PWR_OVERDRIVE_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_ODEN) +#define __HAL_PWR_OVERDRIVE_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_ODEN)) + +/** @brief Macros to enable or disable the Over drive switching. + */ +#define __HAL_PWR_OVERDRIVESWITCHING_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_ODSWEN) +#define __HAL_PWR_OVERDRIVESWITCHING_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_ODSWEN)) + +/** @brief Macros to enable or disable the Under drive mode. + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode. + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + */ +#define __HAL_PWR_UNDERDRIVE_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_UDEN) +#define __HAL_PWR_UNDERDRIVE_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_UDEN)) + +/** @brief Check PWR flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_ODRDY: This flag indicates that the Over-drive mode + * is ready + * @arg PWR_FLAG_ODSWRDY: This flag indicates that the Over-drive mode + * switching is ready + * @arg PWR_FLAG_UDRDY: This flag indicates that the Under-drive mode + * is enabled in Stop mode + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_ODRUDR_FLAG(__FLAG__) ((PWR->CSR1 & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the Under-Drive Ready flag. + */ +#define __HAL_PWR_CLEAR_ODRUDR_FLAG() (PWR->CSR1 |= PWR_FLAG_UDRDY) + +/** @brief Check Wake Up flag is set or not. + * @param __WUFLAG__ specifies the Wake Up flag to check. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag for PA0 + * @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag for PA2 + * @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag for PC1 + * @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag for PC13 + * @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag for PI8 + * @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag for PI11 + */ +#define __HAL_PWR_GET_WAKEUP_FLAG(__WUFLAG__) (PWR->CSR2 & (__WUFLAG__)) + +/** @brief Clear the WakeUp pins flags. + * @param __WUFLAG__ specifies the Wake Up pin flag to clear. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag for PA0 + * @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag for PA2 + * @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag for PC1 + * @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag for PC13 + * @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag for PI8 + * @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag for PI11 + */ +#define __HAL_PWR_CLEAR_WAKEUP_FLAG(__WUFLAG__) (PWR->CR2 |= (__WUFLAG__)) +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @addtogroup PWREx_Exported_Functions_Group1 + * @{ + */ +uint32_t HAL_PWREx_GetVoltageRange(void); +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling); + +void HAL_PWREx_EnableFlashPowerDown(void); +void HAL_PWREx_DisableFlashPowerDown(void); +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void); +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void); + +void HAL_PWREx_EnableMainRegulatorLowVoltage(void); +void HAL_PWREx_DisableMainRegulatorLowVoltage(void); +void HAL_PWREx_EnableLowRegulatorLowVoltage(void); +void HAL_PWREx_DisableLowRegulatorLowVoltage(void); + +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWREx_Private_Macros PWREx Private Macros + * @{ + */ + +/** @defgroup PWREx_IS_PWR_Definitions PWREx Private macros to check input parameters + * @{ + */ +#define IS_PWR_REGULATOR_UNDERDRIVE(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_UNDERDRIVE_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON)) +#define IS_PWR_WAKEUP_PIN(__PIN__) (((__PIN__) == PWR_WAKEUP_PIN1) || \ + ((__PIN__) == PWR_WAKEUP_PIN2) || \ + ((__PIN__) == PWR_WAKEUP_PIN3) || \ + ((__PIN__) == PWR_WAKEUP_PIN4) || \ + ((__PIN__) == PWR_WAKEUP_PIN5) || \ + ((__PIN__) == PWR_WAKEUP_PIN6) || \ + ((__PIN__) == PWR_WAKEUP_PIN1_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN2_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN3_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN4_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN5_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN6_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN1_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN2_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN3_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN4_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN5_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN6_LOW)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F7xx_HAL_PWR_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h new file mode 100644 index 000000000..1681d79b2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h @@ -0,0 +1,1310 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc.h + * @author MCD Application Team + * @brief Header file of RCC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RCC_H +#define __STM32F7xx_HAL_RCC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/* Include RCC HAL Extended module */ +/* (include on top of file since RCC structures are defined in extended file) */ +#include "stm32f7xx_hal_rcc_ex.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup RCC_Exported_Types RCC Exported Types + * @{ + */ + +/** + * @brief RCC Internal/External Oscillator (HSE, HSI, LSE and LSI) configuration structure definition + */ +typedef struct +{ + uint32_t OscillatorType; /*!< The oscillators to be configured. + This parameter can be a value of @ref RCC_Oscillator_Type */ + + uint32_t HSEState; /*!< The new state of the HSE. + This parameter can be a value of @ref RCC_HSE_Config */ + + uint32_t LSEState; /*!< The new state of the LSE. + This parameter can be a value of @ref RCC_LSE_Config */ + + uint32_t HSIState; /*!< The new state of the HSI. + This parameter can be a value of @ref RCC_HSI_Config */ + + uint32_t HSICalibrationValue; /*!< The HSI calibration trimming value (default is RCC_HSICALIBRATION_DEFAULT). + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x1F */ + + uint32_t LSIState; /*!< The new state of the LSI. + This parameter can be a value of @ref RCC_LSI_Config */ + + RCC_PLLInitTypeDef PLL; /*!< PLL structure parameters */ + +}RCC_OscInitTypeDef; + +/** + * @brief RCC System, AHB and APB busses clock configuration structure definition + */ +typedef struct +{ + uint32_t ClockType; /*!< The clock to be configured. + This parameter can be a value of @ref RCC_System_Clock_Type */ + + uint32_t SYSCLKSource; /*!< The clock source (SYSCLKS) used as system clock. + This parameter can be a value of @ref RCC_System_Clock_Source */ + + uint32_t AHBCLKDivider; /*!< The AHB clock (HCLK) divider. This clock is derived from the system clock (SYSCLK). + This parameter can be a value of @ref RCC_AHB_Clock_Source */ + + uint32_t APB1CLKDivider; /*!< The APB1 clock (PCLK1) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + + uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + +}RCC_ClkInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCC_Exported_Constants RCC Exported Constants + * @{ + */ + +/** @defgroup RCC_Oscillator_Type Oscillator Type + * @{ + */ +#define RCC_OSCILLATORTYPE_NONE ((uint32_t)0x00000000U) +#define RCC_OSCILLATORTYPE_HSE ((uint32_t)0x00000001U) +#define RCC_OSCILLATORTYPE_HSI ((uint32_t)0x00000002U) +#define RCC_OSCILLATORTYPE_LSE ((uint32_t)0x00000004U) +#define RCC_OSCILLATORTYPE_LSI ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_HSE_Config RCC HSE Config + * @{ + */ +#define RCC_HSE_OFF ((uint32_t)0x00000000U) +#define RCC_HSE_ON RCC_CR_HSEON +#define RCC_HSE_BYPASS ((uint32_t)(RCC_CR_HSEBYP | RCC_CR_HSEON)) +/** + * @} + */ + +/** @defgroup RCC_LSE_Config RCC LSE Config + * @{ + */ +#define RCC_LSE_OFF ((uint32_t)0x00000000U) +#define RCC_LSE_ON RCC_BDCR_LSEON +#define RCC_LSE_BYPASS ((uint32_t)(RCC_BDCR_LSEBYP | RCC_BDCR_LSEON)) +/** + * @} + */ + +/** @defgroup RCC_HSI_Config RCC HSI Config + * @{ + */ +#define RCC_HSI_OFF ((uint32_t)0x00000000U) +#define RCC_HSI_ON RCC_CR_HSION + +#define RCC_HSICALIBRATION_DEFAULT ((uint32_t)0x10U) /* Default HSI calibration trimming value */ +/** + * @} + */ + +/** @defgroup RCC_LSI_Config RCC LSI Config + * @{ + */ +#define RCC_LSI_OFF ((uint32_t)0x00000000U) +#define RCC_LSI_ON RCC_CSR_LSION +/** + * @} + */ + +/** @defgroup RCC_PLL_Config RCC PLL Config + * @{ + */ +#define RCC_PLL_NONE ((uint32_t)0x00000000U) +#define RCC_PLL_OFF ((uint32_t)0x00000001U) +#define RCC_PLL_ON ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup RCC_PLLP_Clock_Divider PLLP Clock Divider + * @{ + */ +#define RCC_PLLP_DIV2 ((uint32_t)0x00000002U) +#define RCC_PLLP_DIV4 ((uint32_t)0x00000004U) +#define RCC_PLLP_DIV6 ((uint32_t)0x00000006U) +#define RCC_PLLP_DIV8 ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_PLL_Clock_Source PLL Clock Source + * @{ + */ +#define RCC_PLLSOURCE_HSI RCC_PLLCFGR_PLLSRC_HSI +#define RCC_PLLSOURCE_HSE RCC_PLLCFGR_PLLSRC_HSE +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Type RCC System Clock Type + * @{ + */ +#define RCC_CLOCKTYPE_SYSCLK ((uint32_t)0x00000001U) +#define RCC_CLOCKTYPE_HCLK ((uint32_t)0x00000002U) +#define RCC_CLOCKTYPE_PCLK1 ((uint32_t)0x00000004U) +#define RCC_CLOCKTYPE_PCLK2 ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Source RCC System Clock Source + * @{ + */ +#define RCC_SYSCLKSOURCE_HSI RCC_CFGR_SW_HSI +#define RCC_SYSCLKSOURCE_HSE RCC_CFGR_SW_HSE +#define RCC_SYSCLKSOURCE_PLLCLK RCC_CFGR_SW_PLL +/** + * @} + */ + + +/** @defgroup RCC_System_Clock_Source_Status System Clock Source Status + * @{ + */ +#define RCC_SYSCLKSOURCE_STATUS_HSI RCC_CFGR_SWS_HSI /*!< HSI used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_HSE RCC_CFGR_SWS_HSE /*!< HSE used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_PLLCLK RCC_CFGR_SWS_PLL /*!< PLL used as system clock */ +/** + * @} + */ + +/** @defgroup RCC_AHB_Clock_Source RCC AHB Clock Source + * @{ + */ +#define RCC_SYSCLK_DIV1 RCC_CFGR_HPRE_DIV1 +#define RCC_SYSCLK_DIV2 RCC_CFGR_HPRE_DIV2 +#define RCC_SYSCLK_DIV4 RCC_CFGR_HPRE_DIV4 +#define RCC_SYSCLK_DIV8 RCC_CFGR_HPRE_DIV8 +#define RCC_SYSCLK_DIV16 RCC_CFGR_HPRE_DIV16 +#define RCC_SYSCLK_DIV64 RCC_CFGR_HPRE_DIV64 +#define RCC_SYSCLK_DIV128 RCC_CFGR_HPRE_DIV128 +#define RCC_SYSCLK_DIV256 RCC_CFGR_HPRE_DIV256 +#define RCC_SYSCLK_DIV512 RCC_CFGR_HPRE_DIV512 +/** + * @} + */ + +/** @defgroup RCC_APB1_APB2_Clock_Source RCC APB1/APB2 Clock Source + * @{ + */ +#define RCC_HCLK_DIV1 RCC_CFGR_PPRE1_DIV1 +#define RCC_HCLK_DIV2 RCC_CFGR_PPRE1_DIV2 +#define RCC_HCLK_DIV4 RCC_CFGR_PPRE1_DIV4 +#define RCC_HCLK_DIV8 RCC_CFGR_PPRE1_DIV8 +#define RCC_HCLK_DIV16 RCC_CFGR_PPRE1_DIV16 +/** + * @} + */ + +/** @defgroup RCC_RTC_Clock_Source RCC RTC Clock Source + * @{ + */ +#define RCC_RTCCLKSOURCE_NO_CLK ((uint32_t)0x00000000U) +#define RCC_RTCCLKSOURCE_LSE ((uint32_t)0x00000100U) +#define RCC_RTCCLKSOURCE_LSI ((uint32_t)0x00000200U) +#define RCC_RTCCLKSOURCE_HSE_DIVX ((uint32_t)0x00000300U) +#define RCC_RTCCLKSOURCE_HSE_DIV2 ((uint32_t)0x00020300U) +#define RCC_RTCCLKSOURCE_HSE_DIV3 ((uint32_t)0x00030300U) +#define RCC_RTCCLKSOURCE_HSE_DIV4 ((uint32_t)0x00040300U) +#define RCC_RTCCLKSOURCE_HSE_DIV5 ((uint32_t)0x00050300U) +#define RCC_RTCCLKSOURCE_HSE_DIV6 ((uint32_t)0x00060300U) +#define RCC_RTCCLKSOURCE_HSE_DIV7 ((uint32_t)0x00070300U) +#define RCC_RTCCLKSOURCE_HSE_DIV8 ((uint32_t)0x00080300U) +#define RCC_RTCCLKSOURCE_HSE_DIV9 ((uint32_t)0x00090300U) +#define RCC_RTCCLKSOURCE_HSE_DIV10 ((uint32_t)0x000A0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV11 ((uint32_t)0x000B0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV12 ((uint32_t)0x000C0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV13 ((uint32_t)0x000D0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV14 ((uint32_t)0x000E0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV15 ((uint32_t)0x000F0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV16 ((uint32_t)0x00100300U) +#define RCC_RTCCLKSOURCE_HSE_DIV17 ((uint32_t)0x00110300U) +#define RCC_RTCCLKSOURCE_HSE_DIV18 ((uint32_t)0x00120300U) +#define RCC_RTCCLKSOURCE_HSE_DIV19 ((uint32_t)0x00130300U) +#define RCC_RTCCLKSOURCE_HSE_DIV20 ((uint32_t)0x00140300U) +#define RCC_RTCCLKSOURCE_HSE_DIV21 ((uint32_t)0x00150300U) +#define RCC_RTCCLKSOURCE_HSE_DIV22 ((uint32_t)0x00160300U) +#define RCC_RTCCLKSOURCE_HSE_DIV23 ((uint32_t)0x00170300U) +#define RCC_RTCCLKSOURCE_HSE_DIV24 ((uint32_t)0x00180300U) +#define RCC_RTCCLKSOURCE_HSE_DIV25 ((uint32_t)0x00190300U) +#define RCC_RTCCLKSOURCE_HSE_DIV26 ((uint32_t)0x001A0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV27 ((uint32_t)0x001B0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV28 ((uint32_t)0x001C0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV29 ((uint32_t)0x001D0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV30 ((uint32_t)0x001E0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV31 ((uint32_t)0x001F0300U) +/** + * @} + */ + + + +/** @defgroup RCC_MCO_Index RCC MCO Index + * @{ + */ +#define RCC_MCO1 ((uint32_t)0x00000000U) +#define RCC_MCO2 ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup RCC_MCO1_Clock_Source RCC MCO1 Clock Source + * @{ + */ +#define RCC_MCO1SOURCE_HSI ((uint32_t)0x00000000U) +#define RCC_MCO1SOURCE_LSE RCC_CFGR_MCO1_0 +#define RCC_MCO1SOURCE_HSE RCC_CFGR_MCO1_1 +#define RCC_MCO1SOURCE_PLLCLK RCC_CFGR_MCO1 +/** + * @} + */ + +/** @defgroup RCC_MCO2_Clock_Source RCC MCO2 Clock Source + * @{ + */ +#define RCC_MCO2SOURCE_SYSCLK ((uint32_t)0x00000000U) +#define RCC_MCO2SOURCE_PLLI2SCLK RCC_CFGR_MCO2_0 +#define RCC_MCO2SOURCE_HSE RCC_CFGR_MCO2_1 +#define RCC_MCO2SOURCE_PLLCLK RCC_CFGR_MCO2 +/** + * @} + */ + +/** @defgroup RCC_MCOx_Clock_Prescaler RCC MCO1 Clock Prescaler + * @{ + */ +#define RCC_MCODIV_1 ((uint32_t)0x00000000U) +#define RCC_MCODIV_2 RCC_CFGR_MCO1PRE_2 +#define RCC_MCODIV_3 ((uint32_t)RCC_CFGR_MCO1PRE_0 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_4 ((uint32_t)RCC_CFGR_MCO1PRE_1 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_5 RCC_CFGR_MCO1PRE +/** + * @} + */ + +/** @defgroup RCC_Interrupt RCC Interrupt + * @{ + */ +#define RCC_IT_LSIRDY ((uint8_t)0x01U) +#define RCC_IT_LSERDY ((uint8_t)0x02U) +#define RCC_IT_HSIRDY ((uint8_t)0x04U) +#define RCC_IT_HSERDY ((uint8_t)0x08U) +#define RCC_IT_PLLRDY ((uint8_t)0x10U) +#define RCC_IT_PLLI2SRDY ((uint8_t)0x20U) +#define RCC_IT_PLLSAIRDY ((uint8_t)0x40U) +#define RCC_IT_CSS ((uint8_t)0x80U) +/** + * @} + */ + +/** @defgroup RCC_Flag RCC Flags + * Elements values convention: 0XXYYYYYb + * - YYYYY : Flag position in the register + * - 0XX : Register index + * - 01: CR register + * - 10: BDCR register + * - 11: CSR register + * @{ + */ +/* Flags in the CR register */ +#define RCC_FLAG_HSIRDY ((uint8_t)0x21U) +#define RCC_FLAG_HSERDY ((uint8_t)0x31U) +#define RCC_FLAG_PLLRDY ((uint8_t)0x39U) +#define RCC_FLAG_PLLI2SRDY ((uint8_t)0x3BU) +#define RCC_FLAG_PLLSAIRDY ((uint8_t)0x3CU) + +/* Flags in the BDCR register */ +#define RCC_FLAG_LSERDY ((uint8_t)0x41U) + +/* Flags in the CSR register */ +#define RCC_FLAG_LSIRDY ((uint8_t)0x61U) +#define RCC_FLAG_BORRST ((uint8_t)0x79U) +#define RCC_FLAG_PINRST ((uint8_t)0x7AU) +#define RCC_FLAG_PORRST ((uint8_t)0x7BU) +#define RCC_FLAG_SFTRST ((uint8_t)0x7CU) +#define RCC_FLAG_IWDGRST ((uint8_t)0x7DU) +#define RCC_FLAG_WWDGRST ((uint8_t)0x7EU) +#define RCC_FLAG_LPWRRST ((uint8_t)0x7FU) +/** + * @} + */ + +/** @defgroup RCC_LSEDrive_Configuration RCC LSE Drive configurations + * @{ + */ +#define RCC_LSEDRIVE_LOW ((uint32_t)0x00000000U) +#define RCC_LSEDRIVE_MEDIUMLOW RCC_BDCR_LSEDRV_1 +#define RCC_LSEDRIVE_MEDIUMHIGH RCC_BDCR_LSEDRV_0 +#define RCC_LSEDRIVE_HIGH RCC_BDCR_LSEDRV +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCC_Exported_Macros RCC Exported Macros + * @{ + */ + +/** @defgroup RCC_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +#define __HAL_RCC_DMA1_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA1EN)) + +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_WWDG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_PWR_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_WWDG_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_WWDGEN)) +#define __HAL_RCC_PWR_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_PWREN)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SYSCFG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SYSCFG_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SYSCFGEN)) + +/** + * @} + */ + +/** @defgroup RCC_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_DMA1_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA1EN)) != RESET) + +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_DMA1_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA1EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_WWDG_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) != RESET) +#define __HAL_RCC_PWR_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) != RESET) + +#define __HAL_RCC_WWDG_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) == RESET) +#define __HAL_RCC_PWR_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief EGet the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SYSCFG_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) != RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_Peripheral_Clock_Force_Release RCC Peripheral Clock Force Release + * @brief Force or release AHB peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_DMA1_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA1RST)) + +#define __HAL_RCC_AHB1_RELEASE_RESET() (RCC->AHB1RSTR = 0x00U) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_DMA1_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA1RST)) +/** + * @} + */ + +/** @defgroup RCC_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_WWDG_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_PWR_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_PWRRST)) + +#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) +#define __HAL_RCC_WWDG_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_PWR_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_PWRRST)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_SYSCFG_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SYSCFGRST)) + +#define __HAL_RCC_APB2_RELEASE_RESET() (RCC->APB2RSTR = 0x00U) +#define __HAL_RCC_SYSCFG_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SYSCFGRST)) + +/** + * @} + */ + +/** @defgroup RCC_Peripheral_Clock_Sleep_Enable_Disable RCC Peripheral Clock Sleep Enable Disable + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA1LPEN)) + +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA1LPEN)) + +/** @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_PWRLPEN)) + +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_PWRLPEN)) + +/** @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SYSCFGLPEN)) +#define __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SYSCFGLPEN)) + +/** + * @} + */ + +/** @defgroup RCC_AHB1_Clock_Sleep_Enable_Disable_Status AHB1 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_CRC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_CRCLPEN)) != RESET) +#define __HAL_RCC_DMA1_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA1LPEN)) != RESET) + +#define __HAL_RCC_CRC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_CRCLPEN)) == RESET) +#define __HAL_RCC_DMA1_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA1LPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Sleep_Enable_Disable_Status APB1 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_WWDG_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_WWDGLPEN)) != RESET) +#define __HAL_RCC_PWR_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_PWRLPEN)) != RESET) + +#define __HAL_RCC_WWDG_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_WWDGLPEN)) == RESET) +#define __HAL_RCC_PWR_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_PWRLPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Sleep_Enable_Disable_Status APB2 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_SYSCFG_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SYSCFGLPEN)) != RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SYSCFGLPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_HSI_Configuration HSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal High Speed oscillator (HSI). + * @note The HSI is stopped by hardware when entering STOP and STANDBY modes. + * It is used (enabled by hardware) as system clock source after startup + * from Reset, wakeup from STOP and STANDBY mode, or in case of failure + * of the HSE used directly or indirectly as system clock (if the Clock + * Security System CSS is enabled). + * @note HSI can not be stopped if it is used as system clock source. In this case, + * you have to select another source of the system clock then stop the HSI. + * @note After enabling the HSI, the application software should wait on HSIRDY + * flag to be set indicating that HSI clock is stable and can be used as + * system clock source. + * @note When the HSI is stopped, HSIRDY flag goes low after 6 HSI oscillator + * clock cycles. + */ +#define __HAL_RCC_HSI_ENABLE() (RCC->CR |= (RCC_CR_HSION)) +#define __HAL_RCC_HSI_DISABLE() (RCC->CR &= ~(RCC_CR_HSION)) + +/** @brief Macro to adjust the Internal High Speed oscillator (HSI) calibration value. + * @note The calibration is used to compensate for the variations in voltage + * and temperature that influence the frequency of the internal HSI RC. + * @param __HSICALIBRATIONVALUE__ specifies the calibration trimming value. + * (default is RCC_HSICALIBRATION_DEFAULT). + */ +#define __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(__HSICALIBRATIONVALUE__) (MODIFY_REG(RCC->CR,\ + RCC_CR_HSITRIM, (uint32_t)(__HSICALIBRATIONVALUE__) << RCC_CR_HSITRIM_Pos)) +/** + * @} + */ + +/** @defgroup RCC_LSI_Configuration LSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal Low Speed oscillator (LSI). + * @note After enabling the LSI, the application software should wait on + * LSIRDY flag to be set indicating that LSI clock is stable and can + * be used to clock the IWDG and/or the RTC. + * @note LSI can not be disabled if the IWDG is running. + * @note When the LSI is stopped, LSIRDY flag goes low after 6 LSI oscillator + * clock cycles. + */ +#define __HAL_RCC_LSI_ENABLE() (RCC->CSR |= (RCC_CSR_LSION)) +#define __HAL_RCC_LSI_DISABLE() (RCC->CSR &= ~(RCC_CSR_LSION)) +/** + * @} + */ + +/** @defgroup RCC_HSE_Configuration HSE Configuration + * @{ + */ +/** + * @brief Macro to configure the External High Speed oscillator (HSE). + * @note Transitions HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this macro. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @note After enabling the HSE (RCC_HSE_ON or RCC_HSE_Bypass), the application + * software should wait on HSERDY flag to be set indicating that HSE clock + * is stable and can be used to clock the PLL and/or system clock. + * @note HSE state can not be changed if it is used directly or through the + * PLL as system clock. In this case, you have to select another source + * of the system clock then change the HSE state (ex. disable it). + * @note The HSE is stopped by hardware when entering STOP and STANDBY modes. + * @note This function reset the CSSON bit, so if the clock security system(CSS) + * was previously enabled you have to enable it again after calling this + * function. + * @param __STATE__ specifies the new state of the HSE. + * This parameter can be one of the following values: + * @arg RCC_HSE_OFF: turn OFF the HSE oscillator, HSERDY flag goes low after + * 6 HSE oscillator clock cycles. + * @arg RCC_HSE_ON: turn ON the HSE oscillator. + * @arg RCC_HSE_BYPASS: HSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_HSE_CONFIG(__STATE__) \ + do { \ + if ((__STATE__) == RCC_HSE_ON) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else if ((__STATE__) == RCC_HSE_OFF) \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + else if ((__STATE__) == RCC_HSE_BYPASS) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + } while(0) +/** + * @} + */ + +/** @defgroup RCC_LSE_Configuration LSE Configuration + * @{ + */ + +/** + * @brief Macro to configure the External Low Speed oscillator (LSE). + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not supported by this macro. + * User should request a transition to LSE Off first and then LSE On or LSE Bypass. + * @note As the LSE is in the Backup domain and write access is denied to + * this domain after reset, you have to enable write access using + * HAL_PWR_EnableBkUpAccess() function before to configure the LSE + * (to be done once after reset). + * @note After enabling the LSE (RCC_LSE_ON or RCC_LSE_BYPASS), the application + * software should wait on LSERDY flag to be set indicating that LSE clock + * is stable and can be used to clock the RTC. + * @param __STATE__ specifies the new state of the LSE. + * This parameter can be one of the following values: + * @arg RCC_LSE_OFF: turn OFF the LSE oscillator, LSERDY flag goes low after + * 6 LSE oscillator clock cycles. + * @arg RCC_LSE_ON: turn ON the LSE oscillator. + * @arg RCC_LSE_BYPASS: LSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_LSE_CONFIG(__STATE__) \ + do { \ + if((__STATE__) == RCC_LSE_ON) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else if((__STATE__) == RCC_LSE_OFF) \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + else if((__STATE__) == RCC_LSE_BYPASS) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + } while(0) +/** + * @} + */ + +/** @defgroup RCC_Internal_RTC_Clock_Configuration RTC Clock Configuration + * @{ + */ + +/** @brief Macros to enable or disable the RTC clock. + * @note These macros must be used only after the RTC clock source was selected. + */ +#define __HAL_RCC_RTC_ENABLE() (RCC->BDCR |= (RCC_BDCR_RTCEN)) +#define __HAL_RCC_RTC_DISABLE() (RCC->BDCR &= ~(RCC_BDCR_RTCEN)) + +/** @brief Macros to configure the RTC clock (RTCCLK). + * @note As the RTC clock configuration bits are in the Backup domain and write + * access is denied to this domain after reset, you have to enable write + * access using the Power Backup Access macro before to configure + * the RTC clock source (to be done once after reset). + * @note Once the RTC clock is configured it can't be changed unless the + * Backup domain is reset using __HAL_RCC_BackupReset_RELEASE() macro, or by + * a Power On Reset (POR). + * @param __RTCCLKSource__ specifies the RTC clock source. + * This parameter can be one of the following values: + @arg @ref RCC_RTCCLKSOURCE_NO_CLK: No clock selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSE: LSE selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSI: LSI selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected + * as RTC clock, where x:[2,31] + * @note If the LSE or LSI is used as RTC clock source, the RTC continues to + * work in STOP and STANDBY modes, and can be used as wakeup source. + * However, when the HSE clock is used as RTC clock source, the RTC + * cannot be used in STOP and STANDBY modes. + * @note The maximum input clock frequency for RTC is 1MHz (when using HSE as + * RTC clock source). + */ +#define __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__) (((__RTCCLKSource__) & RCC_BDCR_RTCSEL) == RCC_BDCR_RTCSEL) ? \ + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, ((__RTCCLKSource__) & 0xFFFFCFF)) : CLEAR_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) + +#define __HAL_RCC_RTC_CONFIG(__RTCCLKSource__) do { __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__); \ + RCC->BDCR |= ((__RTCCLKSource__) & 0x00000FFF); \ + } while (0) + +/** @brief Macro to get the RTC clock source. + * @retval The clock source can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_NO_CLK No clock selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSE LSE selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSI LSI selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() + */ +#define __HAL_RCC_GET_RTC_SOURCE() (READ_BIT(RCC->BDCR, RCC_BDCR_RTCSEL)) + +/** + * @brief Get the RTC and HSE clock divider (RTCPRE). + * @retval Returned value can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected + * as RTC clock, where x:[2,31] + */ +#define __HAL_RCC_GET_RTC_HSE_PRESCALER() (READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) | RCC_BDCR_RTCSEL) + +/** @brief Macros to force or release the Backup domain reset. + * @note This function resets the RTC peripheral (including the backup registers) + * and the RTC clock source selection in RCC_CSR register. + * @note The BKPSRAM is not affected by this reset. + */ +#define __HAL_RCC_BACKUPRESET_FORCE() (RCC->BDCR |= (RCC_BDCR_BDRST)) +#define __HAL_RCC_BACKUPRESET_RELEASE() (RCC->BDCR &= ~(RCC_BDCR_BDRST)) +/** + * @} + */ + +/** @defgroup RCC_PLL_Configuration PLL Configuration + * @{ + */ + +/** @brief Macros to enable or disable the main PLL. + * @note After enabling the main PLL, the application software should wait on + * PLLRDY flag to be set indicating that PLL clock is stable and can + * be used as system clock source. + * @note The main PLL can not be disabled if it is used as system clock source + * @note The main PLL is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLL_ENABLE() SET_BIT(RCC->CR, RCC_CR_PLLON) +#define __HAL_RCC_PLL_DISABLE() CLEAR_BIT(RCC->CR, RCC_CR_PLLON) + +/** @brief Macro to configure the PLL clock source. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLSOURCE__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * + */ +#define __HAL_RCC_PLL_PLLSOURCE_CONFIG(__PLLSOURCE__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, (__PLLSOURCE__)) + +/** @brief Macro to configure the PLL multiplication factor. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * + */ +#define __HAL_RCC_PLL_PLLM_CONFIG(__PLLM__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, (__PLLM__)) +/** + * @} + */ + +/** @defgroup RCC_PLL_I2S_Configuration PLL I2S Configuration + * @{ + */ + +/** @brief Macro to configure the I2S clock source (I2SCLK). + * @note This function must be called before enabling the I2S APB clock. + * @param __SOURCE__ specifies the I2S clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SCLKSOURCE_PLLI2S: PLLI2S clock used as I2S clock source. + * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin + * used as I2S clock source. + */ +#define __HAL_RCC_I2S_CONFIG(__SOURCE__) do {RCC->CFGR &= ~(RCC_CFGR_I2SSRC); \ + RCC->CFGR |= (__SOURCE__); \ + }while(0) + +/** @brief Macros to enable or disable the PLLI2S. + * @note The PLLI2S is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLI2S_ENABLE() (RCC->CR |= (RCC_CR_PLLI2SON)) +#define __HAL_RCC_PLLI2S_DISABLE() (RCC->CR &= ~(RCC_CR_PLLI2SON)) +/** + * @} + */ + +/** @defgroup RCC_Get_Clock_source Get Clock source + * @{ + */ +/** + * @brief Macro to configure the system clock source. + * @param __RCC_SYSCLKSOURCE__ specifies the system clock source. + * This parameter can be one of the following values: + * - RCC_SYSCLKSOURCE_HSI: HSI oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_HSE: HSE oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_PLLCLK: PLL output is used as system clock source. + */ +#define __HAL_RCC_SYSCLK_CONFIG(__RCC_SYSCLKSOURCE__) MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, (__RCC_SYSCLKSOURCE__)) + +/** @brief Macro to get the clock source used as system clock. + * @retval The clock source used as system clock. The returned value can be one + * of the following: + * - RCC_SYSCLKSOURCE_STATUS_HSI: HSI used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_HSE: HSE used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_PLLCLK: PLL used as system clock. + */ +#define __HAL_RCC_GET_SYSCLK_SOURCE() (RCC->CFGR & RCC_CFGR_SWS) + +/** + * @brief Macro to configures the External Low Speed oscillator (LSE) drive capability. + * @note As the LSE is in the Backup domain and write access is denied to + * this domain after reset, you have to enable write access using + * HAL_PWR_EnableBkUpAccess() function before to configure the LSE + * (to be done once after reset). + * @param __RCC_LSEDRIVE__ specifies the new state of the LSE drive capability. + * This parameter can be one of the following values: + * @arg RCC_LSEDRIVE_LOW: LSE oscillator low drive capability. + * @arg RCC_LSEDRIVE_MEDIUMLOW: LSE oscillator medium low drive capability. + * @arg RCC_LSEDRIVE_MEDIUMHIGH: LSE oscillator medium high drive capability. + * @arg RCC_LSEDRIVE_HIGH: LSE oscillator high drive capability. + * @retval None + */ +#define __HAL_RCC_LSEDRIVE_CONFIG(__RCC_LSEDRIVE__) \ + (MODIFY_REG(RCC->BDCR, RCC_BDCR_LSEDRV, (uint32_t)(__RCC_LSEDRIVE__) )) + +/** @brief Macro to get the oscillator used as PLL clock source. + * @retval The oscillator used as PLL clock source. The returned value can be one + * of the following: + * - RCC_PLLSOURCE_HSI: HSI oscillator is used as PLL clock source. + * - RCC_PLLSOURCE_HSE: HSE oscillator is used as PLL clock source. + */ +#define __HAL_RCC_GET_PLL_OSCSOURCE() ((uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC)) +/** + * @} + */ + +/** @defgroup RCCEx_MCOx_Clock_Config RCC Extended MCOx Clock Config + * @{ + */ + +/** @brief Macro to configure the MCO1 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + */ + +#define __HAL_RCC_MCO1_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), ((__MCOCLKSOURCE__) | (__MCODIV__))) + +/** @brief Macro to configure the MCO2 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + */ + +#define __HAL_RCC_MCO2_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), ((__MCOCLKSOURCE__) | ((__MCODIV__) << 3))); +/** + * @} + */ + +/** @defgroup RCC_Flags_Interrupts_Management Flags Interrupts Management + * @brief macros to manage the specified RCC Flags and interrupts. + * @{ + */ + +/** @brief Enable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to enable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_ENABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS |= (__INTERRUPT__)) + +/** @brief Disable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to disable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_DISABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS &= (uint8_t)(~(__INTERRUPT__))) + +/** @brief Clear the RCC's interrupt pending bits (Perform Byte access to RCC_CIR[23:16] + * bits to clear the selected interrupt pending bits. + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + */ +#define __HAL_RCC_CLEAR_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE2_ADDRESS = (__INTERRUPT__)) + +/** @brief Check the RCC's interrupt has occurred or not. + * @param __INTERRUPT__ specifies the RCC interrupt source to check. + * This parameter can be one of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + * @retval The new state of __INTERRUPT__ (TRUE or FALSE). + */ +#define __HAL_RCC_GET_IT(__INTERRUPT__) ((RCC->CIR & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** @brief Set RMVF bit to clear the reset flags: RCC_FLAG_PINRST, RCC_FLAG_PORRST, + * RCC_FLAG_SFTRST, RCC_FLAG_IWDGRST, RCC_FLAG_WWDGRST and RCC_FLAG_LPWRRST. + */ +#define __HAL_RCC_CLEAR_RESET_FLAGS() (RCC->CSR |= RCC_CSR_RMVF) + +/** @brief Check RCC flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg RCC_FLAG_HSIRDY: HSI oscillator clock ready. + * @arg RCC_FLAG_HSERDY: HSE oscillator clock ready. + * @arg RCC_FLAG_PLLRDY: Main PLL clock ready. + * @arg RCC_FLAG_PLLI2SRDY: PLLI2S clock ready. + * @arg RCC_FLAG_LSERDY: LSE oscillator clock ready. + * @arg RCC_FLAG_LSIRDY: LSI oscillator clock ready. + * @arg RCC_FLAG_BORRST: POR/PDR or BOR reset. + * @arg RCC_FLAG_PINRST: Pin reset. + * @arg RCC_FLAG_PORRST: POR/PDR reset. + * @arg RCC_FLAG_SFTRST: Software reset. + * @arg RCC_FLAG_IWDGRST: Independent Watchdog reset. + * @arg RCC_FLAG_WWDGRST: Window Watchdog reset. + * @arg RCC_FLAG_LPWRRST: Low Power reset. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define RCC_FLAG_MASK ((uint8_t)0x1F) +#define __HAL_RCC_GET_FLAG(__FLAG__) (((((((__FLAG__) >> 5) == 1)? RCC->CR :((((__FLAG__) >> 5) == 2) ? RCC->BDCR :((((__FLAG__) >> 5) == 3)? RCC->CSR :RCC->CIR))) & ((uint32_t)1 << ((__FLAG__) & RCC_FLAG_MASK)))!= 0)? 1 : 0) + +/** + * @} + */ + +/** + * @} + */ + +/* Include RCC HAL Extension module */ +#include "stm32f7xx_hal_rcc_ex.h" + +/* Exported functions --------------------------------------------------------*/ + /** @addtogroup RCC_Exported_Functions + * @{ + */ + +/** @addtogroup RCC_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions ******************************/ +HAL_StatusTypeDef HAL_RCC_DeInit(void); +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency); +/** + * @} + */ + +/** @addtogroup RCC_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv); +void HAL_RCC_EnableCSS(void); +void HAL_RCC_DisableCSS(void); +uint32_t HAL_RCC_GetSysClockFreq(void); +uint32_t HAL_RCC_GetHCLKFreq(void); +uint32_t HAL_RCC_GetPCLK1Freq(void); +uint32_t HAL_RCC_GetPCLK2Freq(void); +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency); + +/* CSS NMI IRQ handler */ +void HAL_RCC_NMI_IRQHandler(void); + +/* User Callbacks in non blocking mode (IT mode) */ +void HAL_RCC_CSSCallback(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Constants RCC Private Constants + * @{ + */ +#define HSE_TIMEOUT_VALUE HSE_STARTUP_TIMEOUT +#define HSI_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define LSI_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define PLL_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define CLOCKSWITCH_TIMEOUT_VALUE ((uint32_t)5000) /* 5 s */ +#define PLLI2S_TIMEOUT_VALUE 100U /* Timeout value fixed to 100 ms */ +#define PLLSAI_TIMEOUT_VALUE 100U /* Timeout value fixed to 100 ms */ + +/** @defgroup RCC_BitAddress_Alias RCC BitAddress Alias + * @brief RCC registers bit address alias + * @{ + */ +/* CIR register byte 2 (Bits[15:8]) base address */ +#define RCC_CIR_BYTE1_ADDRESS ((uint32_t)(RCC_BASE + 0x0C + 0x01)) + +/* CIR register byte 3 (Bits[23:16]) base address */ +#define RCC_CIR_BYTE2_ADDRESS ((uint32_t)(RCC_BASE + 0x0C + 0x02)) + +#define RCC_DBP_TIMEOUT_VALUE ((uint32_t)100) +#define RCC_LSE_TIMEOUT_VALUE LSE_STARTUP_TIMEOUT +/** + * @} + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @addtogroup RCC_Private_Macros RCC Private Macros + * @{ + */ + +/** @defgroup RCC_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#define IS_RCC_OSCILLATORTYPE(OSCILLATOR) ((OSCILLATOR) <= 15) + +#define IS_RCC_HSE(HSE) (((HSE) == RCC_HSE_OFF) || ((HSE) == RCC_HSE_ON) || \ + ((HSE) == RCC_HSE_BYPASS)) + +#define IS_RCC_LSE(LSE) (((LSE) == RCC_LSE_OFF) || ((LSE) == RCC_LSE_ON) || \ + ((LSE) == RCC_LSE_BYPASS)) + +#define IS_RCC_HSI(HSI) (((HSI) == RCC_HSI_OFF) || ((HSI) == RCC_HSI_ON)) + +#define IS_RCC_LSI(LSI) (((LSI) == RCC_LSI_OFF) || ((LSI) == RCC_LSI_ON)) + +#define IS_RCC_PLL(PLL) (((PLL) == RCC_PLL_NONE) ||((PLL) == RCC_PLL_OFF) || ((PLL) == RCC_PLL_ON)) + +#define IS_RCC_PLLSOURCE(SOURCE) (((SOURCE) == RCC_PLLSOURCE_HSI) || \ + ((SOURCE) == RCC_PLLSOURCE_HSE)) + +#define IS_RCC_SYSCLKSOURCE(SOURCE) (((SOURCE) == RCC_SYSCLKSOURCE_HSI) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_HSE) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_PLLCLK)) +#define IS_RCC_PLLM_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 63)) + +#define IS_RCC_PLLN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) + +#define IS_RCC_PLLP_VALUE(VALUE) (((VALUE) == RCC_PLLP_DIV2) || ((VALUE) == RCC_PLLP_DIV4) || \ + ((VALUE) == RCC_PLLP_DIV6) || ((VALUE) == RCC_PLLP_DIV8)) +#define IS_RCC_PLLQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) + +#define IS_RCC_HCLK(HCLK) (((HCLK) == RCC_SYSCLK_DIV1) || ((HCLK) == RCC_SYSCLK_DIV2) || \ + ((HCLK) == RCC_SYSCLK_DIV4) || ((HCLK) == RCC_SYSCLK_DIV8) || \ + ((HCLK) == RCC_SYSCLK_DIV16) || ((HCLK) == RCC_SYSCLK_DIV64) || \ + ((HCLK) == RCC_SYSCLK_DIV128) || ((HCLK) == RCC_SYSCLK_DIV256) || \ + ((HCLK) == RCC_SYSCLK_DIV512)) + +#define IS_RCC_CLOCKTYPE(CLK) ((1 <= (CLK)) && ((CLK) <= 15)) + +#define IS_RCC_PCLK(PCLK) (((PCLK) == RCC_HCLK_DIV1) || ((PCLK) == RCC_HCLK_DIV2) || \ + ((PCLK) == RCC_HCLK_DIV4) || ((PCLK) == RCC_HCLK_DIV8) || \ + ((PCLK) == RCC_HCLK_DIV16)) + +#define IS_RCC_MCO(MCOX) (((MCOX) == RCC_MCO1) || ((MCOX) == RCC_MCO2)) + + +#define IS_RCC_MCO1SOURCE(SOURCE) (((SOURCE) == RCC_MCO1SOURCE_HSI) || ((SOURCE) == RCC_MCO1SOURCE_LSE) || \ + ((SOURCE) == RCC_MCO1SOURCE_HSE) || ((SOURCE) == RCC_MCO1SOURCE_PLLCLK)) + +#define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_PLLI2SCLK)|| \ + ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) + +#define IS_RCC_MCODIV(DIV) (((DIV) == RCC_MCODIV_1) || ((DIV) == RCC_MCODIV_2) || \ + ((DIV) == RCC_MCODIV_3) || ((DIV) == RCC_MCODIV_4) || \ + ((DIV) == RCC_MCODIV_5)) +#define IS_RCC_CALIBRATION_VALUE(VALUE) ((VALUE) <= 0x1F) + +#define IS_RCC_RTCCLKSOURCE(SOURCE) (((SOURCE) == RCC_RTCCLKSOURCE_LSE) || ((SOURCE) == RCC_RTCCLKSOURCE_LSI) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV2) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV3) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV4) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV5) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV6) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV7) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV8) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV9) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV10) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV11) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV12) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV13) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV14) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV15) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV16) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV17) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV18) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV19) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV20) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV21) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV22) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV23) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV24) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV25) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV26) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV27) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV28) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV29) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV30) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV31)) + + +#define IS_RCC_LSE_DRIVE(DRIVE) (((DRIVE) == RCC_LSEDRIVE_LOW) || \ + ((DRIVE) == RCC_LSEDRIVE_MEDIUMLOW) || \ + ((DRIVE) == RCC_LSEDRIVE_MEDIUMHIGH) || \ + ((DRIVE) == RCC_LSEDRIVE_HIGH)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RCC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h new file mode 100644 index 000000000..0a7580d56 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h @@ -0,0 +1,3524 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc_ex.h + * @author MCD Application Team + * @brief Header file of RCC HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RCC_EX_H +#define __STM32F7xx_HAL_RCC_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Types RCCEx Exported Types + * @{ + */ + +/** + * @brief RCC PLL configuration structure definition + */ +typedef struct +{ + uint32_t PLLState; /*!< The new state of the PLL. + This parameter can be a value of @ref RCC_PLL_Config */ + + uint32_t PLLSource; /*!< RCC_PLLSource: PLL entry clock source. + This parameter must be a value of @ref RCC_PLL_Clock_Source */ + + uint32_t PLLM; /*!< PLLM: Division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ + + uint32_t PLLN; /*!< PLLN: Multiplication factor for PLL VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ + + uint32_t PLLP; /*!< PLLP: Division factor for main system clock (SYSCLK). + This parameter must be a value of @ref RCC_PLLP_Clock_Divider */ + + uint32_t PLLQ; /*!< PLLQ: Division factor for OTG FS, SDMMC and RNG clocks. + This parameter must be a number between Min_Data = 2 and Max_Data = 15 */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + uint32_t PLLR; /*!< PLLR: Division factor for DSI clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7 */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +}RCC_PLLInitTypeDef; + +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + uint32_t PLLI2SP; /*!< Specifies the division factor for SPDIF-RX clock. + This parameter must be a value of @ref RCCEx_PLLI2SP_Clock_Divider. + This parameter will be used only when PLLI2S is selected as Clock Source SPDIF-RX */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +}RCC_PLLI2SInitTypeDef; + +/** + * @brief PLLSAI Clock structure definition + */ +typedef struct +{ + uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + uint32_t PLLSAIR; /*!< specifies the division factor for LTDC clock + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLSAI is selected as Clock Source LTDC */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + uint32_t PLLSAIP; /*!< Specifies the division factor for 48MHz clock. + This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider + This parameter will be used only when PLLSAI is disabled */ +}RCC_PLLSAIInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ + + uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLSAIDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLSAI is selected as Clock Source SAI */ + + uint32_t PLLSAIDivR; /*!< Specifies the PLLSAI division factor for LTDC clock. + This parameter must be one value of @ref RCCEx_PLLSAI_DIVR */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock source Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint32_t I2sClockSelection; /*!< Specifies I2S Clock source Selection. + This parameter can be a value of @ref RCCEx_I2S_Clock_Source */ + + uint32_t TIMPresSelection; /*!< Specifies TIM Clock Prescalers Selection. + This parameter can be a value of @ref RCCEx_TIM_Prescaler_Selection */ + + uint32_t Sai1ClockSelection; /*!< Specifies SAI1 Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI1_Clock_Source */ + + uint32_t Sai2ClockSelection; /*!< Specifies SAI2 Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI2_Clock_Source */ + + uint32_t Usart1ClockSelection; /*!< USART1 clock source + This parameter can be a value of @ref RCCEx_USART1_Clock_Source */ + + uint32_t Usart2ClockSelection; /*!< USART2 clock source + This parameter can be a value of @ref RCCEx_USART2_Clock_Source */ + + uint32_t Usart3ClockSelection; /*!< USART3 clock source + This parameter can be a value of @ref RCCEx_USART3_Clock_Source */ + + uint32_t Uart4ClockSelection; /*!< UART4 clock source + This parameter can be a value of @ref RCCEx_UART4_Clock_Source */ + + uint32_t Uart5ClockSelection; /*!< UART5 clock source + This parameter can be a value of @ref RCCEx_UART5_Clock_Source */ + + uint32_t Usart6ClockSelection; /*!< USART6 clock source + This parameter can be a value of @ref RCCEx_USART6_Clock_Source */ + + uint32_t Uart7ClockSelection; /*!< UART7 clock source + This parameter can be a value of @ref RCCEx_UART7_Clock_Source */ + + uint32_t Uart8ClockSelection; /*!< UART8 clock source + This parameter can be a value of @ref RCCEx_UART8_Clock_Source */ + + uint32_t I2c1ClockSelection; /*!< I2C1 clock source + This parameter can be a value of @ref RCCEx_I2C1_Clock_Source */ + + uint32_t I2c2ClockSelection; /*!< I2C2 clock source + This parameter can be a value of @ref RCCEx_I2C2_Clock_Source */ + + uint32_t I2c3ClockSelection; /*!< I2C3 clock source + This parameter can be a value of @ref RCCEx_I2C3_Clock_Source */ + + uint32_t I2c4ClockSelection; /*!< I2C4 clock source + This parameter can be a value of @ref RCCEx_I2C4_Clock_Source */ + + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 clock source + This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ + + uint32_t CecClockSelection; /*!< CEC clock source + This parameter can be a value of @ref RCCEx_CEC_Clock_Source */ + + uint32_t Clk48ClockSelection; /*!< Specifies 48Mhz clock source used by USB OTG FS, RNG and SDMMC + This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ + + uint32_t Sdmmc1ClockSelection; /*!< SDMMC1 clock source + This parameter can be a value of @ref RCCEx_SDMMC1_Clock_Source */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) + uint32_t Sdmmc2ClockSelection; /*!< SDMMC2 clock source + This parameter can be a value of @ref RCCEx_SDMMC2_Clock_Source */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + uint32_t Dfsdm1ClockSelection; /*!< DFSDM1 clock source + This parameter can be a value of @ref RCCEx_DFSDM1_Kernel_Clock_Source */ + + uint32_t Dfsdm1AudioClockSelection; /*!< DFSDM1 clock source + This parameter can be a value of @ref RCCEx_DFSDM1_AUDIO_Clock_Source */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +}RCC_PeriphCLKInitTypeDef; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Constants RCCEx Exported Constants + * @{ + */ + +/** @defgroup RCCEx_Periph_Clock_Selection RCC Periph Clock Selection + * @{ + */ +#define RCC_PERIPHCLK_I2S ((uint32_t)0x00000001U) +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define RCC_PERIPHCLK_LTDC ((uint32_t)0x00000008U) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#define RCC_PERIPHCLK_TIM ((uint32_t)0x00000010U) +#define RCC_PERIPHCLK_RTC ((uint32_t)0x00000020U) +#define RCC_PERIPHCLK_USART1 ((uint32_t)0x00000040U) +#define RCC_PERIPHCLK_USART2 ((uint32_t)0x00000080U) +#define RCC_PERIPHCLK_USART3 ((uint32_t)0x00000100U) +#define RCC_PERIPHCLK_UART4 ((uint32_t)0x00000200U) +#define RCC_PERIPHCLK_UART5 ((uint32_t)0x00000400U) +#define RCC_PERIPHCLK_USART6 ((uint32_t)0x00000800U) +#define RCC_PERIPHCLK_UART7 ((uint32_t)0x00001000U) +#define RCC_PERIPHCLK_UART8 ((uint32_t)0x00002000U) +#define RCC_PERIPHCLK_I2C1 ((uint32_t)0x00004000U) +#define RCC_PERIPHCLK_I2C2 ((uint32_t)0x00008000U) +#define RCC_PERIPHCLK_I2C3 ((uint32_t)0x00010000U) +#define RCC_PERIPHCLK_I2C4 ((uint32_t)0x00020000U) +#define RCC_PERIPHCLK_LPTIM1 ((uint32_t)0x00040000U) +#define RCC_PERIPHCLK_SAI1 ((uint32_t)0x00080000U) +#define RCC_PERIPHCLK_SAI2 ((uint32_t)0x00100000U) +#define RCC_PERIPHCLK_CLK48 ((uint32_t)0x00200000U) +#define RCC_PERIPHCLK_CEC ((uint32_t)0x00400000U) +#define RCC_PERIPHCLK_SDMMC1 ((uint32_t)0x00800000U) +#define RCC_PERIPHCLK_SPDIFRX ((uint32_t)0x01000000U) +#define RCC_PERIPHCLK_PLLI2S ((uint32_t)0x02000000U) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define RCC_PERIPHCLK_SDMMC2 ((uint32_t)0x04000000U) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_PERIPHCLK_DFSDM1 ((uint32_t)0x08000000U) +#define RCC_PERIPHCLK_DFSDM1_AUDIO ((uint32_t)0x10000000U) +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +/** @defgroup RCCEx_PLLI2SP_Clock_Divider RCCEx PLLI2SP Clock Divider + * @{ + */ +#define RCC_PLLI2SP_DIV2 ((uint32_t)0x00000000U) +#define RCC_PLLI2SP_DIV4 ((uint32_t)0x00000001U) +#define RCC_PLLI2SP_DIV6 ((uint32_t)0x00000002U) +#define RCC_PLLI2SP_DIV8 ((uint32_t)0x00000003U) +/** + * @} + */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @defgroup RCCEx_PLLSAIP_Clock_Divider RCCEx PLLSAIP Clock Divider + * @{ + */ +#define RCC_PLLSAIP_DIV2 ((uint32_t)0x00000000U) +#define RCC_PLLSAIP_DIV4 ((uint32_t)0x00000001U) +#define RCC_PLLSAIP_DIV6 ((uint32_t)0x00000002U) +#define RCC_PLLSAIP_DIV8 ((uint32_t)0x00000003U) +/** + * @} + */ + +/** @defgroup RCCEx_PLLSAI_DIVR RCCEx PLLSAI DIVR + * @{ + */ +#define RCC_PLLSAIDIVR_2 ((uint32_t)0x00000000U) +#define RCC_PLLSAIDIVR_4 RCC_DCKCFGR1_PLLSAIDIVR_0 +#define RCC_PLLSAIDIVR_8 RCC_DCKCFGR1_PLLSAIDIVR_1 +#define RCC_PLLSAIDIVR_16 RCC_DCKCFGR1_PLLSAIDIVR +/** + * @} + */ + +/** @defgroup RCCEx_I2S_Clock_Source RCCEx I2S Clock Source + * @{ + */ +#define RCC_I2SCLKSOURCE_PLLI2S ((uint32_t)0x00000000U) +#define RCC_I2SCLKSOURCE_EXT RCC_CFGR_I2SSRC + +/** + * @} + */ + +/** @defgroup RCCEx_SAI1_Clock_Source RCCEx SAI1 Clock Source + * @{ + */ +#define RCC_SAI1CLKSOURCE_PLLSAI ((uint32_t)0x00000000U) +#define RCC_SAI1CLKSOURCE_PLLI2S RCC_DCKCFGR1_SAI1SEL_0 +#define RCC_SAI1CLKSOURCE_PIN RCC_DCKCFGR1_SAI1SEL_1 +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_SAI1CLKSOURCE_PLLSRC RCC_DCKCFGR1_SAI1SEL +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_SAI2_Clock_Source RCCEx SAI2 Clock Source + * @{ + */ +#define RCC_SAI2CLKSOURCE_PLLSAI ((uint32_t)0x00000000U) +#define RCC_SAI2CLKSOURCE_PLLI2S RCC_DCKCFGR1_SAI2SEL_0 +#define RCC_SAI2CLKSOURCE_PIN RCC_DCKCFGR1_SAI2SEL_1 +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_SAI2CLKSOURCE_PLLSRC RCC_DCKCFGR1_SAI2SEL +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_CEC_Clock_Source RCCEx CEC Clock Source + * @{ + */ +#define RCC_CECCLKSOURCE_LSE ((uint32_t)0x00000000U) +#define RCC_CECCLKSOURCE_HSI RCC_DCKCFGR2_CECSEL /* CEC clock is HSI/488*/ +/** + * @} + */ + +/** @defgroup RCCEx_USART1_Clock_Source RCCEx USART1 Clock Source + * @{ + */ +#define RCC_USART1CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_USART1CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART1SEL_0 +#define RCC_USART1CLKSOURCE_HSI RCC_DCKCFGR2_USART1SEL_1 +#define RCC_USART1CLKSOURCE_LSE RCC_DCKCFGR2_USART1SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART2_Clock_Source RCCEx USART2 Clock Source + * @{ + */ +#define RCC_USART2CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_USART2CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART2SEL_0 +#define RCC_USART2CLKSOURCE_HSI RCC_DCKCFGR2_USART2SEL_1 +#define RCC_USART2CLKSOURCE_LSE RCC_DCKCFGR2_USART2SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART3_Clock_Source RCCEx USART3 Clock Source + * @{ + */ +#define RCC_USART3CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_USART3CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART3SEL_0 +#define RCC_USART3CLKSOURCE_HSI RCC_DCKCFGR2_USART3SEL_1 +#define RCC_USART3CLKSOURCE_LSE RCC_DCKCFGR2_USART3SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART4_Clock_Source RCCEx UART4 Clock Source + * @{ + */ +#define RCC_UART4CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART4CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART4SEL_0 +#define RCC_UART4CLKSOURCE_HSI RCC_DCKCFGR2_UART4SEL_1 +#define RCC_UART4CLKSOURCE_LSE RCC_DCKCFGR2_UART4SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART5_Clock_Source RCCEx UART5 Clock Source + * @{ + */ +#define RCC_UART5CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART5CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART5SEL_0 +#define RCC_UART5CLKSOURCE_HSI RCC_DCKCFGR2_UART5SEL_1 +#define RCC_UART5CLKSOURCE_LSE RCC_DCKCFGR2_UART5SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART6_Clock_Source RCCEx USART6 Clock Source + * @{ + */ +#define RCC_USART6CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_USART6CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART6SEL_0 +#define RCC_USART6CLKSOURCE_HSI RCC_DCKCFGR2_USART6SEL_1 +#define RCC_USART6CLKSOURCE_LSE RCC_DCKCFGR2_USART6SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART7_Clock_Source RCCEx UART7 Clock Source + * @{ + */ +#define RCC_UART7CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART7CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART7SEL_0 +#define RCC_UART7CLKSOURCE_HSI RCC_DCKCFGR2_UART7SEL_1 +#define RCC_UART7CLKSOURCE_LSE RCC_DCKCFGR2_UART7SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART8_Clock_Source RCCEx UART8 Clock Source + * @{ + */ +#define RCC_UART8CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART8CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART8SEL_0 +#define RCC_UART8CLKSOURCE_HSI RCC_DCKCFGR2_UART8SEL_1 +#define RCC_UART8CLKSOURCE_LSE RCC_DCKCFGR2_UART8SEL +/** + * @} + */ + +/** @defgroup RCCEx_I2C1_Clock_Source RCCEx I2C1 Clock Source + * @{ + */ +#define RCC_I2C1CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C1CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C1SEL_0 +#define RCC_I2C1CLKSOURCE_HSI RCC_DCKCFGR2_I2C1SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_I2C2_Clock_Source RCCEx I2C2 Clock Source + * @{ + */ +#define RCC_I2C2CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C2CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C2SEL_0 +#define RCC_I2C2CLKSOURCE_HSI RCC_DCKCFGR2_I2C2SEL_1 + +/** + * @} + */ + +/** @defgroup RCCEx_I2C3_Clock_Source RCCEx I2C3 Clock Source + * @{ + */ +#define RCC_I2C3CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C3CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C3SEL_0 +#define RCC_I2C3CLKSOURCE_HSI RCC_DCKCFGR2_I2C3SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_I2C4_Clock_Source RCCEx I2C4 Clock Source + * @{ + */ +#define RCC_I2C4CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C4CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C4SEL_0 +#define RCC_I2C4CLKSOURCE_HSI RCC_DCKCFGR2_I2C4SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_LPTIM1_Clock_Source RCCEx LPTIM1 Clock Source + * @{ + */ +#define RCC_LPTIM1CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_LPTIM1CLKSOURCE_LSI RCC_DCKCFGR2_LPTIM1SEL_0 +#define RCC_LPTIM1CLKSOURCE_HSI RCC_DCKCFGR2_LPTIM1SEL_1 +#define RCC_LPTIM1CLKSOURCE_LSE RCC_DCKCFGR2_LPTIM1SEL + +/** + * @} + */ + +/** @defgroup RCCEx_CLK48_Clock_Source RCCEx CLK48 Clock Source + * @{ + */ +#define RCC_CLK48SOURCE_PLL ((uint32_t)0x00000000U) +#define RCC_CLK48SOURCE_PLLSAIP RCC_DCKCFGR2_CK48MSEL +/** + * @} + */ + +/** @defgroup RCCEx_TIM_Prescaler_Selection RCCEx TIM Prescaler Selection + * @{ + */ +#define RCC_TIMPRES_DESACTIVATED ((uint32_t)0x00000000U) +#define RCC_TIMPRES_ACTIVATED RCC_DCKCFGR1_TIMPRE +/** + * @} + */ + +/** @defgroup RCCEx_SDMMC1_Clock_Source RCCEx SDMMC1 Clock Source + * @{ + */ +#define RCC_SDMMC1CLKSOURCE_CLK48 ((uint32_t)0x00000000U) +#define RCC_SDMMC1CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDMMC1SEL +/** + * @} + */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +/** @defgroup RCCEx_SDMMC2_Clock_Source RCCEx SDMMC2 Clock Source + * @{ + */ +#define RCC_SDMMC2CLKSOURCE_CLK48 ((uint32_t)0x00000000U) +#define RCC_SDMMC2CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDMMC2SEL +/** + * @} + */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @defgroup RCCEx_DFSDM1_Kernel_Clock_Source RCCEx DFSDM1 Kernel Clock Source + * @{ + */ +#define RCC_DFSDM1CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_DFSDM1CLKSOURCE_SYSCLK RCC_DCKCFGR1_DFSDM1SEL +/** + * @} + */ + +/** @defgroup RCCEx_DFSDM1_AUDIO_Clock_Source RCCEx DFSDM1 AUDIO Clock Source + * @{ + */ +#define RCC_DFSDM1AUDIOCLKSOURCE_SAI1 ((uint32_t)0x00000000U) +#define RCC_DFSDM1AUDIOCLKSOURCE_SAI2 RCC_DCKCFGR1_ADFSDM1SEL +/** + * @} + */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +/** @defgroup RCCEx_DSI_Clock_Source RCC DSI Clock Source + * @{ + */ +#define RCC_DSICLKSOURCE_DSIPHY ((uint32_t)0x00000000U) +#define RCC_DSICLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR2_DSISEL) +/** + * @} + */ +#endif /* STM32F769xx || STM32F779xx */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Macros RCCEx Exported Macros + * @{ + */ +/** @defgroup RCCEx_Peripheral_Clock_Enable_Disable RCCEx_Peripheral_Clock_Enable_Disable + * @brief Enables or disables the AHB/APB peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ + +/** @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DTCMRAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DTCMRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DTCMRAMEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOA_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOB_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOK_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA2D_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_BKPSRAM_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_BKPSRAMEN)) +#define __HAL_RCC_DTCMRAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DTCMRAMEN)) +#define __HAL_RCC_DMA2_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2EN)) +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSULPIEN)) +#define __HAL_RCC_GPIOA_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOAEN)) +#define __HAL_RCC_GPIOB_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOBEN)) +#define __HAL_RCC_GPIOC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOCEN)) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#define __HAL_RCC_GPIOH_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOHEN)) +#define __HAL_RCC_GPIOI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOIEN)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOJEN)) +#define __HAL_RCC_GPIOK_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOKEN)) +#define __HAL_RCC_DMA2D_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2DEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETH_CLK_ENABLE() do { \ + __HAL_RCC_ETHMAC_CLK_ENABLE(); \ + __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ + __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ + } while(0) +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACEN)) +#define __HAL_RCC_ETHMACTX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACTXEN)) +#define __HAL_RCC_ETHMACRX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACRXEN)) +#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) +#define __HAL_RCC_ETH_CLK_DISABLE() do { \ + __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ + __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ + __HAL_RCC_ETHMAC_CLK_DISABLE(); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0) +#define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_JPEGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_JPEGEN);\ + UNUSED(tmpreg); \ + } while(0) +#define __HAL_RCC_JPEG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_JPEGEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_OTGFSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_OTGFSEN);\ + UNUSED(tmpreg); \ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + } while(0) + +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_HASH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) +#define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) +#endif /* STM32F756x || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_AES_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_AESEN)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_FMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_QSPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) +#define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) + +/** @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_SPI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CEC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_TIM5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM5EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN3EN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI2EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_USART2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART2EN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#define __HAL_RCC_I2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C1EN)) +#define __HAL_RCC_I2C2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C2EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +#define __HAL_RCC_UART7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART7EN)) +#define __HAL_RCC_UART8_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART8EN)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_DISABLE()(RCC->APB1ENR &= ~(RCC_APB1ENR_SPDIFRXEN)) +#define __HAL_RCC_I2C4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C4EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#define __HAL_RCC_CEC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CECEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || || STM32F750xx */ + +/** @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC2EN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || || STM32F730xx */ + +#define __HAL_RCC_ADC1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ADC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ADC3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SDMMC1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM9_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM11_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SAI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SAI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F769xx || STM32F779xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_MDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_MDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_MDIOEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_OTGPHYCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_OTGPHYCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM1EN)) +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#define __HAL_RCC_USART1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART1EN)) +#define __HAL_RCC_USART6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART6EN)) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDMMC2EN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#define __HAL_RCC_ADC1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC1EN)) +#define __HAL_RCC_ADC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC2EN)) +#define __HAL_RCC_ADC3_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC3EN)) +#define __HAL_RCC_SDMMC1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDMMC1EN)) +#define __HAL_RCC_SPI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI1EN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM9_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM9EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_TIM11_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM11EN)) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +#define __HAL_RCC_SPI6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI6EN)) +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) +#define __HAL_RCC_SAI2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI2EN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_LTDCEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DSIEN)) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM1EN)) +#define __HAL_RCC_MDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_MDIOEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_OTGPHYCEN)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_Peripheral_Clock_Enable_Disable_Status Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB/APB peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ + +/** @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_DTCMRAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DTCMRAMEN)) != RESET) +#define __HAL_RCC_DMA2_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2EN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) +#define __HAL_RCC_GPIOA_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOAEN)) != RESET) +#define __HAL_RCC_GPIOB_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOBEN)) != RESET) +#define __HAL_RCC_GPIOC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOCEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_GPIOH_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOHEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) != RESET) +#define __HAL_RCC_GPIOK_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) != RESET) +#define __HAL_RCC_DMA2D_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) != RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_DTCMRAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DTCMRAMEN)) == RESET) +#define __HAL_RCC_DMA2_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2EN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) +#define __HAL_RCC_GPIOA_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOAEN)) == RESET) +#define __HAL_RCC_GPIOB_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOBEN)) == RESET) +#define __HAL_RCC_GPIOC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOCEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_GPIOH_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOHEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) == RESET) +#define __HAL_RCC_GPIOK_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) == RESET) +#define __HAL_RCC_DMA2D_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) != RESET) +#define __HAL_RCC_ETH_IS_CLK_ENABLED() (__HAL_RCC_ETHMAC_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) + +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) == RESET) +#define __HAL_RCC_ETH_IS_CLK_DISABLED() (__HAL_RCC_ETHMAC_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_DISABLED()) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) + +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +#define __HAL_RCC_USB_IS_OTG_FS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) +#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) != RESET) +#define __HAL_RCC_AES_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) == RESET) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_JPEGEN)) != RESET) +#define __HAL_RCC_JPEG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_JPEGEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_FMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) + +#define __HAL_RCC_FMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) == RESET) +#define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) + +/** @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_TIM5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_USART2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_I2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) != RESET) +#define __HAL_RCC_I2C2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_TIM5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_USART2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_I2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) == RESET) +#define __HAL_RCC_I2C2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_CEC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) != RESET) +#define __HAL_RCC_I2C4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C4EN)) != RESET) + +#define __HAL_RCC_SPDIFRX_IS_CLK_DISABLED()((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_CEC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) == RESET) +#define __HAL_RCC_I2C4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C4EN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCEN)) != RESET) +#define __HAL_RCC_RTC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +/** @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_USART1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) != RESET) +#define __HAL_RCC_USART6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) != RESET) +#define __HAL_RCC_ADC1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC1EN)) != RESET) +#define __HAL_RCC_SPI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM9_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_TIM11_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#define __HAL_RCC_SAI2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) != RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) != RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) != RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC2EN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) != RESET) +#define __HAL_RCC_MDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_MDIOEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_OTGPHYCEN)) != RESET) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_USART1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) == RESET) +#define __HAL_RCC_USART6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) == RESET) +#define __HAL_RCC_ADC1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC1EN)) == RESET) +#define __HAL_RCC_SPI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_TIM11_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_SPI6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) == RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) == RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) == RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC2EN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) == RESET) +#define __HAL_RCC_MDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_MDIOEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_OTGPHYCEN)) == RESET) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_Force_Release_Peripheral_Reset RCCEx Force Release Peripheral Reset + * @brief Forces or releases AHB/APB peripheral reset. + * @{ + */ + +/** @brief Force or release AHB1 peripheral reset. + */ +#define __HAL_RCC_DMA2_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2RST)) +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOA_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOH_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_GPIOI_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOIRST)) + +#define __HAL_RCC_DMA2_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2RST)) +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOA_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOH_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_GPIOI_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOIRST)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_ETHMAC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_GPIOJ_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOKRST)) + +#define __HAL_RCC_DMA2D_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_ETHMAC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_GPIOJ_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOKRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release AHB2 peripheral reset. + */ +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_JPEGRST)) +#define __HAL_RCC_JPEG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_JPEGRST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) +#define __HAL_RCC_CRYP_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_HASHRST)) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_AESRST)) +#define __HAL_RCC_AES_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_AESRST)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) +#define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release AHB3 peripheral reset + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) + +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_FMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) + +/** @brief Force or release APB1 peripheral reset. + */ +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN3RST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_I2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_LPTIM1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_LPTIM1RST)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN3RST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_I2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_I2C4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C4RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CECRST)) + +#define __HAL_RCC_SPDIFRX_RELEASE_RESET()(RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_I2C4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C4RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CECRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release APB2 peripheral reset. + */ +#define __HAL_RCC_TIM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_USART1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SDMMC1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDMMC1RST)) +#define __HAL_RCC_SPI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM9_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM11_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM11RST)) +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI2RST)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_LTDCRST)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_OTGPHYCRST)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_USART1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SDMMC1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDMMC1RST)) +#define __HAL_RCC_SPI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM9_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM11_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM11RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI2RST)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_LTDCRST)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_OTGPHYCRST)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DSIRST)) +#define __HAL_RCC_DSI_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DSIRST)) +#endif /* STM32F769xx || STM32F779xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDMMC2RST)) +#define __HAL_RCC_SDMMC2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDMMC2RST)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM1RST)) +#define __HAL_RCC_MDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_MDIORST)) +#define __HAL_RCC_DFSDM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM1RST)) +#define __HAL_RCC_MDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_MDIORST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_Peripheral_Clock_Sleep_Enable_Disable RCCEx Peripheral Clock Sleep Enable Disable + * @brief Enables or disables the AHB/APB peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ + +/** @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + */ +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_AXI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_AXILPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_BKPSRAMLPEN)) +#define __HAL_RCC_DTCM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DTCMLPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOILPEN)) + +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_AXI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_AXILPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_BKPSRAMLPEN)) +#define __HAL_RCC_DTCM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DTCMLPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOILPEN)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOKLPEN)) + +#define __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOKLPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) +#define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_JPEGLPEN)) +#define __HAL_RCC_JPEG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_JPEGLPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) + +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_HASHLPEN)) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_AESLPEN)) +#define __HAL_RCC_AES_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_AESLPEN)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FMCLPEN)) +#define __HAL_RCC_FMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FMCLPEN)) + +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_QSPILPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_QSPILPEN)) + +/** @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_LPTIM1LPEN)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN3LPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_LPTIM1LPEN)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN3LPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_RTCLPEN)) +#define __HAL_RCC_RTC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_RTCLPEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_I2C4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C4LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CECLPEN)) + +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_I2C4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C4LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CECLPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDMMC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM11LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI2LPEN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_LTDCLPEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_TIM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDMMC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM11LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI2LPEN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx)|| defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_LTDCLPEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DSILPEN)) +#define __HAL_RCC_DSI_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DSILPEN)) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM1LPEN)) +#define __HAL_RCC_MDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_MDIOLPEN)) +#define __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM1LPEN)) +#define __HAL_RCC_MDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_MDIOLPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDMMC2LPEN)) +#define __HAL_RCC_SDMMC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDMMC2LPEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPI6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI6LPEN)) +#define __HAL_RCC_SPI6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI6LPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @} + */ + +/** @defgroup RCC_Clock_Sleep_Enable_Disable_Status AHB/APB Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the AHB/APB peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ + +/** @brief Get the enable or disable status of the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FLITF_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_FLITFLPEN)) != RESET) +#define __HAL_RCC_AXI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_AXILPEN)) != RESET) +#define __HAL_RCC_SRAM1_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM1LPEN)) != RESET) +#define __HAL_RCC_SRAM2_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM2LPEN)) != RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_BKPSRAMLPEN)) != RESET) +#define __HAL_RCC_DTCM_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DTCMLPEN)) != RESET) +#define __HAL_RCC_DMA2_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2LPEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSLPEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSULPILPEN)) != RESET) +#define __HAL_RCC_GPIOA_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOALPEN)) != RESET) +#define __HAL_RCC_GPIOB_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOBLPEN)) != RESET) +#define __HAL_RCC_GPIOC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOCLPEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIODLPEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOELPEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOFLPEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOGLPEN)) != RESET) +#define __HAL_RCC_GPIOH_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOHLPEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOILPEN)) != RESET) + +#define __HAL_RCC_FLITF_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_FLITFLPEN)) == RESET) +#define __HAL_RCC_AXI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_AXILPEN)) == RESET) +#define __HAL_RCC_SRAM1_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM1LPEN)) == RESET) +#define __HAL_RCC_SRAM2_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM2LPEN)) == RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_BKPSRAMLPEN)) == RESET) +#define __HAL_RCC_DTCM_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DTCMLPEN)) == RESET) +#define __HAL_RCC_DMA2_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2LPEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSLPEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSULPILPEN)) == RESET) +#define __HAL_RCC_GPIOA_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOALPEN)) == RESET) +#define __HAL_RCC_GPIOB_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOBLPEN)) == RESET) +#define __HAL_RCC_GPIOC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOCLPEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIODLPEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOELPEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOFLPEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOGLPEN)) == RESET) +#define __HAL_RCC_GPIOH_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOHLPEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOILPEN)) == RESET) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2DLPEN)) != RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACLPEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACTXLPEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACRXLPEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACPTPLPEN)) != RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOJLPEN)) != RESET) +#define __HAL_RCC_GPIOK_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOKLPEN)) != RESET) + +#define __HAL_RCC_DMA2D_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2DLPEN)) == RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACLPEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACTXLPEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACRXLPEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACPTPLPEN)) == RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOJLPEN)) == RESET) +#define __HAL_RCC_GPIOK_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOKLPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_DCMILPEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_DCMILPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F767xx) || defined(STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_JPEGLPEN)) != RESET) +#define __HAL_RCC_JPEG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_JPEGLPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_RNGLPEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_RNGLPEN)) == RESET) + +#define __HAL_RCC_USB_OTG_FS_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_OTGFSLPEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_OTGFSLPEN)) == RESET) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_CRYPLPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_HASHLPEN)) != RESET) + +#define __HAL_RCC_CRYP_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_CRYPLPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_HASHLPEN)) == RESET) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_AESLPEN)) != RESET) +#define __HAL_RCC_AES_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_AESLPEN)) == RESET) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Get the enable or disable status of the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FMC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_FMCLPEN)) != RESET) +#define __HAL_RCC_FMC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_FMCLPEN)) == RESET) + +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_QSPILPEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_QSPILPEN)) == RESET) + +/** @brief Get the enable or disable status of the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM2LPEN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM3LPEN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM4LPEN)) != RESET) +#define __HAL_RCC_TIM5_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM5LPEN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM6LPEN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM7LPEN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM12LPEN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM13LPEN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM14LPEN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_LPTIM1LPEN)) != RESET) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_RTCLPEN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN3LPEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI2LPEN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI3LPEN)) != RESET) +#define __HAL_RCC_USART2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART2LPEN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART3LPEN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART4LPEN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART5LPEN)) != RESET) +#define __HAL_RCC_I2C1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C1LPEN)) != RESET) +#define __HAL_RCC_I2C2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C2LPEN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C3LPEN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN1LPEN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_DACLPEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART7LPEN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART8LPEN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM2LPEN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM3LPEN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM4LPEN)) == RESET) +#define __HAL_RCC_TIM5_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM5LPEN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM6LPEN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM7LPEN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM12LPEN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM13LPEN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM14LPEN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_LPTIM1LPEN)) == RESET) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_RTCLPEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN3LPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI2LPEN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI3LPEN)) == RESET) +#define __HAL_RCC_USART2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART2LPEN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART3LPEN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART4LPEN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART5LPEN)) == RESET) +#define __HAL_RCC_I2C1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C1LPEN)) == RESET) +#define __HAL_RCC_I2C2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C2LPEN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C3LPEN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN1LPEN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_DACLPEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART7LPEN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART8LPEN)) == RESET) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPDIFRXLPEN)) != RESET) +#define __HAL_RCC_I2C4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C4LPEN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN2LPEN)) != RESET) +#define __HAL_RCC_CEC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CECLPEN)) != RESET) + +#define __HAL_RCC_SPDIFRX_IS_CLK_SLEEP_DISABLED()((RCC->APB1LPENR & (RCC_APB1LPENR_SPDIFRXLPEN)) == RESET) +#define __HAL_RCC_I2C4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C4LPEN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN2LPEN)) == RESET) +#define __HAL_RCC_CEC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CECLPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM1LPEN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM8LPEN)) != RESET) +#define __HAL_RCC_USART1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART1LPEN)) != RESET) +#define __HAL_RCC_USART6_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART6LPEN)) != RESET) +#define __HAL_RCC_ADC1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC1LPEN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC2LPEN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC3LPEN)) != RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC1LPEN)) != RESET) +#define __HAL_RCC_SPI1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI1LPEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI4LPEN)) != RESET) +#define __HAL_RCC_TIM9_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM9LPEN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM10LPEN)) != RESET) +#define __HAL_RCC_TIM11_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM11LPEN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI5LPEN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI1LPEN)) != RESET) +#define __HAL_RCC_SAI2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI2LPEN)) != RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_LTDCLPEN)) != RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DSILPEN)) != RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC2LPEN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DFSDM1LPEN)) != RESET) +#define __HAL_RCC_MDIO_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_MDIOLPEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_TIM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM1LPEN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM8LPEN)) == RESET) +#define __HAL_RCC_USART1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART1LPEN)) == RESET) +#define __HAL_RCC_USART6_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART6LPEN)) == RESET) +#define __HAL_RCC_ADC1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC1LPEN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC2LPEN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC3LPEN)) == RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC1LPEN)) == RESET) +#define __HAL_RCC_SPI1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI1LPEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI4LPEN)) == RESET) +#define __HAL_RCC_TIM9_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM9LPEN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM10LPEN)) == RESET) +#define __HAL_RCC_TIM11_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM11LPEN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI5LPEN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI1LPEN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI2LPEN)) == RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_LTDCLPEN)) == RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DSILPEN)) == RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC2LPEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DFSDM1LPEN)) == RESET) +#define __HAL_RCC_MDIO_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_MDIOLPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPI6_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI6LPEN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI6LPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @} + */ + +/*------------------------------- PLL Configuration --------------------------*/ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz. + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note You have to set the PLLP parameter correctly to not exceed 216 MHz on + * the System clock frequency. + * @param __PLLQ__ specifies the division factor for OTG FS, SDMMC and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDMMC and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + * @param __PLLR__ specifies the division factor for DSI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__,__PLLR__) \ + (RCC->PLLCFGR = ((__RCC_PLLSource__) | (__PLLM__) | \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1) -1) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos) | \ + ((__PLLR__) << RCC_PLLCFGR_PLLR_Pos))) +#else +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz. + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note You have to set the PLLP parameter correctly to not exceed 216 MHz on + * the System clock frequency. + * @param __PLLQ__ specifies the division factor for OTG FS, SDMMC and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDMMC and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__) \ + (RCC->PLLCFGR = (0x20000000 | (__RCC_PLLSource__) | (__PLLM__)| \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1) -1) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/*---------------------------------------------------------------------------------------------*/ + +/** @brief Macro to configure the Timers clocks prescalers + * @param __PRESC__ specifies the Timers clocks prescalers selection + * This parameter can be one of the following values: + * @arg RCC_TIMPRES_DESACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1 or 2, + * else it is equal to [(HPRE * PPREx) / 2] if PPREx is corresponding to + * division by 4 or more. + * @arg RCC_TIMPRES_ACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1, 2 or 4, + * else it is equal to [(HPRE * PPREx) / 4] if PPREx is corresponding + * to division by 8 or more. + */ +#define __HAL_RCC_TIMCLKPRESCALER(__PRESC__) do {RCC->DCKCFGR1 &= ~(RCC_DCKCFGR1_TIMPRE);\ + RCC->DCKCFGR1 |= (__PRESC__); \ + }while(0) + +/** @brief Macros to Enable or Disable the PLLISAI. + * @note The PLLSAI is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLSAI_ENABLE() (RCC->CR |= (RCC_CR_PLLSAION)) +#define __HAL_RCC_PLLSAI_DISABLE() (RCC->CR &= ~(RCC_CR_PLLSAION)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * @note This function must be used only when the PLLSAI is disabled. + * @note PLLSAI clock source is common with the main PLL (configured in + * RCC_PLLConfig function ) + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLSAIP__ specifies the division factor for USB, RNG, SDMMC clocks + * This parameter can be a value of @ref RCCEx_PLLSAIP_Clock_Divider. + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__) \ + (RCC->PLLSAICFGR = ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((__PLLSAIP__) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos)) + +/** @brief Macro to configure the PLLI2S clock multiplication and division factors. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLI2SQ__ specifies the division factor for SAI clock. + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos)) +#else +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * @note This function must be used only when the PLLSAI is disabled. + * @note PLLSAI clock source is common with the main PLL (configured in + * RCC_PLLConfig function ) + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLSAIP__ specifies the division factor for USB, RNG, SDMMC clocks + * This parameter can be a value of @ref RCCEx_PLLSAIP_Clock_Divider. + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLSAIR__ specifies the division factor for LTDC clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ + (RCC->PLLSAICFGR = ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((__PLLSAIP__) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) |\ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos)) + +/** @brief Macro to configure the PLLI2S clock multiplication and division factors. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLI2SP__ specifies the division factor for SPDDIF-RX clock. + * This parameter can be a value of @ref RCCEx_PLLI2SP_Clock_Divider. + * @param __PLLI2SQ__ specifies the division factor for SAI clock. + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SP__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SP__) << RCC_PLLI2SCFGR_PLLI2SP_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Macro to configure the SAI clock Divider coming from PLLI2S. + * @note This function must be called before enabling the PLLI2S. + * @param __PLLI2SDivQ__ specifies the PLLI2S division factor for SAI1 clock . + * This parameter must be a number between 1 and 32. + * SAI1 clock frequency = f(PLLI2SQ) / __PLLI2SDivQ__ + */ +#define __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(__PLLI2SDivQ__) (MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLI2SDIVQ, (__PLLI2SDivQ__)-1)) + +/** @brief Macro to configure the SAI clock Divider coming from PLLSAI. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivQ__ specifies the PLLSAI division factor for SAI1 clock . + * This parameter must be a number between Min_Data = 1 and Max_Data = 32. + * SAI1 clock frequency = f(PLLSAIQ) / __PLLSAIDivQ__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(__PLLSAIDivQ__) (MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLSAIDIVQ, ((__PLLSAIDivQ__)-1)<<8)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** @brief Macro to configure the LTDC clock Divider coming from PLLSAI. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivR__ specifies the PLLSAI division factor for LTDC clock . + * This parameter can be a value of @ref RCCEx_PLLSAI_DIVR. + * LTDC clock frequency = f(PLLSAIR) / __PLLSAIDivR__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(__PLLSAIDivR__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLSAIDIVR, (uint32_t)(__PLLSAIDivR__)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Macro to configure SAI1 clock source selection. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI1 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI1 clock. + * @note The RCC_SAI1CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_SAI1_CONFIG(__SOURCE__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI1SEL, (uint32_t)(__SOURCE__)) + +/** @brief Macro to get the SAI1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI1 clock. + * @note The RCC_SAI1CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_GET_SAI1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI1SEL))) + +/** @brief Macro to configure SAI2 clock source selection. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI2 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI2 clock. + * @note The RCC_SAI2CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_SAI2_CONFIG(__SOURCE__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI2SEL, (uint32_t)(__SOURCE__)) + + +/** @brief Macro to get the SAI2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI2 clock. + * @note The RCC_SAI2CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_GET_SAI2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI2SEL))) + + +/** @brief Enable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_ENABLE_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Disable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_DISABLE_IT() (RCC->CIR &= ~(RCC_CIR_PLLSAIRDYIE)) + +/** @brief Clear the PLLSAI RDY interrupt pending bits. + */ +#define __HAL_RCC_PLLSAI_CLEAR_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYF)) + +/** @brief Check the PLLSAI RDY interrupt has occurred or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_IT() ((RCC->CIR & (RCC_CIR_PLLSAIRDYIE)) == (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Check PLLSAI RDY flag is set or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_FLAG() ((RCC->CR & (RCC_CR_PLLSAIRDY)) == (RCC_CR_PLLSAIRDY)) + +/** @brief Macro to Get I2S clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SCLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S clock source + */ +#define __HAL_RCC_GET_I2SCLKSOURCE() (READ_BIT(RCC->CFGR, RCC_CFGR_I2SSRC)) + +/** @brief Macro to configure the I2C1 clock (I2C1CLK). + * + * @param __I2C1_CLKSOURCE__ specifies the I2C1 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C1CLKSOURCE_PCLK1: PCLK1 selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_HSI: HSI selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_SYSCLK: System Clock selected as I2C1 clock + */ +#define __HAL_RCC_I2C1_CONFIG(__I2C1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C1SEL, (uint32_t)(__I2C1_CLKSOURCE__)) + +/** @brief Macro to get the I2C1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C1CLKSOURCE_PCLK1: PCLK1 selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_HSI: HSI selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_SYSCLK: System Clock selected as I2C1 clock + */ +#define __HAL_RCC_GET_I2C1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C1SEL))) + +/** @brief Macro to configure the I2C2 clock (I2C2CLK). + * + * @param __I2C2_CLKSOURCE__ specifies the I2C2 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C2CLKSOURCE_PCLK1: PCLK1 selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_HSI: HSI selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_SYSCLK: System Clock selected as I2C2 clock + */ +#define __HAL_RCC_I2C2_CONFIG(__I2C2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C2SEL, (uint32_t)(__I2C2_CLKSOURCE__)) + +/** @brief Macro to get the I2C2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C2CLKSOURCE_PCLK1: PCLK1 selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_HSI: HSI selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_SYSCLK: System Clock selected as I2C2 clock + */ +#define __HAL_RCC_GET_I2C2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C2SEL))) + +/** @brief Macro to configure the I2C3 clock (I2C3CLK). + * + * @param __I2C3_CLKSOURCE__ specifies the I2C3 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C3CLKSOURCE_PCLK1: PCLK1 selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_HSI: HSI selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_SYSCLK: System Clock selected as I2C3 clock + */ +#define __HAL_RCC_I2C3_CONFIG(__I2C3_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C3SEL, (uint32_t)(__I2C3_CLKSOURCE__)) + +/** @brief macro to get the I2C3 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C3CLKSOURCE_PCLK1: PCLK1 selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_HSI: HSI selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_SYSCLK: System Clock selected as I2C3 clock + */ +#define __HAL_RCC_GET_I2C3_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C3SEL))) + +/** @brief Macro to configure the I2C4 clock (I2C4CLK). + * + * @param __I2C4_CLKSOURCE__ specifies the I2C4 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C4CLKSOURCE_PCLK1: PCLK1 selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_HSI: HSI selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_SYSCLK: System Clock selected as I2C4 clock + */ +#define __HAL_RCC_I2C4_CONFIG(__I2C4_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C4SEL, (uint32_t)(__I2C4_CLKSOURCE__)) + +/** @brief macro to get the I2C4 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C4CLKSOURCE_PCLK1: PCLK1 selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_HSI: HSI selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_SYSCLK: System Clock selected as I2C4 clock + */ +#define __HAL_RCC_GET_I2C4_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C4SEL))) + +/** @brief Macro to configure the USART1 clock (USART1CLK). + * + * @param __USART1_CLKSOURCE__ specifies the USART1 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART1CLKSOURCE_PCLK2: PCLK2 selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_HSI: HSI selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_SYSCLK: System Clock selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_LSE: LSE selected as USART1 clock + */ +#define __HAL_RCC_USART1_CONFIG(__USART1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART1SEL, (uint32_t)(__USART1_CLKSOURCE__)) + +/** @brief macro to get the USART1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART1CLKSOURCE_PCLK2: PCLK2 selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_HSI: HSI selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_SYSCLK: System Clock selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_LSE: LSE selected as USART1 clock + */ +#define __HAL_RCC_GET_USART1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART1SEL))) + +/** @brief Macro to configure the USART2 clock (USART2CLK). + * + * @param __USART2_CLKSOURCE__ specifies the USART2 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART2CLKSOURCE_PCLK1: PCLK1 selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_HSI: HSI selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_SYSCLK: System Clock selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_LSE: LSE selected as USART2 clock + */ +#define __HAL_RCC_USART2_CONFIG(__USART2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART2SEL, (uint32_t)(__USART2_CLKSOURCE__)) + +/** @brief macro to get the USART2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART2CLKSOURCE_PCLK1: PCLK1 selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_HSI: HSI selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_SYSCLK: System Clock selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_LSE: LSE selected as USART2 clock + */ +#define __HAL_RCC_GET_USART2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART2SEL))) + +/** @brief Macro to configure the USART3 clock (USART3CLK). + * + * @param __USART3_CLKSOURCE__ specifies the USART3 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART3CLKSOURCE_PCLK1: PCLK1 selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_HSI: HSI selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_SYSCLK: System Clock selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_LSE: LSE selected as USART3 clock + */ +#define __HAL_RCC_USART3_CONFIG(__USART3_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART3SEL, (uint32_t)(__USART3_CLKSOURCE__)) + +/** @brief macro to get the USART3 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART3CLKSOURCE_PCLK1: PCLK1 selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_HSI: HSI selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_SYSCLK: System Clock selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_LSE: LSE selected as USART3 clock + */ +#define __HAL_RCC_GET_USART3_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART3SEL))) + + /** @brief Macro to configure the UART4 clock (UART4CLK). + * + * @param __UART4_CLKSOURCE__ specifies the UART4 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART4CLKSOURCE_PCLK1: PCLK1 selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_HSI: HSI selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_SYSCLK: System Clock selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_LSE: LSE selected as UART4 clock + */ +#define __HAL_RCC_UART4_CONFIG(__UART4_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART4SEL, (uint32_t)(__UART4_CLKSOURCE__)) + +/** @brief macro to get the UART4 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART4CLKSOURCE_PCLK1: PCLK1 selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_HSI: HSI selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_SYSCLK: System Clock selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_LSE: LSE selected as UART4 clock + */ +#define __HAL_RCC_GET_UART4_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART4SEL))) + + /** @brief Macro to configure the UART5 clock (UART5CLK). + * + * @param __UART5_CLKSOURCE__ specifies the UART5 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART5CLKSOURCE_PCLK1: PCLK1 selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_HSI: HSI selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_SYSCLK: System Clock selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_LSE: LSE selected as UART5 clock + */ +#define __HAL_RCC_UART5_CONFIG(__UART5_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART5SEL, (uint32_t)(__UART5_CLKSOURCE__)) + +/** @brief macro to get the UART5 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART5CLKSOURCE_PCLK1: PCLK1 selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_HSI: HSI selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_SYSCLK: System Clock selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_LSE: LSE selected as UART5 clock + */ +#define __HAL_RCC_GET_UART5_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART5SEL))) + + /** @brief Macro to configure the USART6 clock (USART6CLK). + * + * @param __USART6_CLKSOURCE__ specifies the USART6 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART6CLKSOURCE_PCLK1: PCLK1 selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_HSI: HSI selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_SYSCLK: System Clock selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_LSE: LSE selected as USART6 clock + */ +#define __HAL_RCC_USART6_CONFIG(__USART6_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART6SEL, (uint32_t)(__USART6_CLKSOURCE__)) + +/** @brief macro to get the USART6 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART6CLKSOURCE_PCLK1: PCLK1 selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_HSI: HSI selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_SYSCLK: System Clock selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_LSE: LSE selected as USART6 clock + */ +#define __HAL_RCC_GET_USART6_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART6SEL))) + + /** @brief Macro to configure the UART7 clock (UART7CLK). + * + * @param __UART7_CLKSOURCE__ specifies the UART7 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART7CLKSOURCE_PCLK1: PCLK1 selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_HSI: HSI selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_SYSCLK: System Clock selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_LSE: LSE selected as UART7 clock + */ +#define __HAL_RCC_UART7_CONFIG(__UART7_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART7SEL, (uint32_t)(__UART7_CLKSOURCE__)) + +/** @brief macro to get the UART7 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART7CLKSOURCE_PCLK1: PCLK1 selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_HSI: HSI selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_SYSCLK: System Clock selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_LSE: LSE selected as UART7 clock + */ +#define __HAL_RCC_GET_UART7_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART7SEL))) + +/** @brief Macro to configure the UART8 clock (UART8CLK). + * + * @param __UART8_CLKSOURCE__ specifies the UART8 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART8CLKSOURCE_PCLK1: PCLK1 selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_HSI: HSI selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_SYSCLK: System Clock selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_LSE: LSE selected as UART8 clock + */ +#define __HAL_RCC_UART8_CONFIG(__UART8_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART8SEL, (uint32_t)(__UART8_CLKSOURCE__)) + +/** @brief macro to get the UART8 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART8CLKSOURCE_PCLK1: PCLK1 selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_HSI: HSI selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_SYSCLK: System Clock selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_LSE: LSE selected as UART8 clock + */ +#define __HAL_RCC_GET_UART8_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART8SEL))) + +/** @brief Macro to configure the LPTIM1 clock (LPTIM1CLK). + * + * @param __LPTIM1_CLKSOURCE__ specifies the LPTIM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_LPTIM1_CONFIG(__LPTIM1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, (uint32_t)(__LPTIM1_CLKSOURCE__)) + +/** @brief macro to get the LPTIM1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_GET_LPTIM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL))) + +/** @brief Macro to configure the CEC clock (CECCLK). + * + * @param __CEC_CLKSOURCE__ specifies the CEC clock source. + * This parameter can be one of the following values: + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + * @arg RCC_CECCLKSOURCE_HSI: HSI divided by 488 selected as CEC clock + */ +#define __HAL_RCC_CEC_CONFIG(__CEC_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL, (uint32_t)(__CEC_CLKSOURCE__)) + +/** @brief macro to get the CEC clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + * @arg RCC_CECCLKSOURCE_HSI: HSI selected as CEC clock + */ +#define __HAL_RCC_GET_CEC_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL))) + +/** @brief Macro to configure the CLK48 source (CLK48CLK). + * + * @param __CLK48_SOURCE__ specifies the CLK48 clock source. + * This parameter can be one of the following values: + * @arg RCC_CLK48SOURCE_PLL: PLL selected as CLK48 source + * @arg RCC_CLK48SOURCE_PLLSAIP: PLLSAIP selected as CLK48 source + */ +#define __HAL_RCC_CLK48_CONFIG(__CLK48_SOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__CLK48_SOURCE__)) + +/** @brief macro to get the CLK48 source. + * @retval The clock source can be one of the following values: + * @arg RCC_CLK48SOURCE_PLL: PLL used as CLK48 source + * @arg RCC_CLK48SOURCE_PLLSAIP: PLLSAIP used as CLK48 source + */ +#define __HAL_RCC_GET_CLK48_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL))) + +/** @brief Macro to configure the SDMMC1 clock (SDMMC1CLK). + * + * @param __SDMMC1_CLKSOURCE__ specifies the SDMMC1 clock source. + * This parameter can be one of the following values: + * @arg RCC_SDMMC1CLKSOURCE_CLK48: CLK48 selected as SDMMC clock + * @arg RCC_SDMMC1CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC clock + */ +#define __HAL_RCC_SDMMC1_CONFIG(__SDMMC1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC1SEL, (uint32_t)(__SDMMC1_CLKSOURCE__)) + +/** @brief macro to get the SDMMC1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SDMMC1CLKSOURCE_CLK48: CLK48 selected as SDMMC1 clock + * @arg RCC_SDMMC1CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC1 clock + */ +#define __HAL_RCC_GET_SDMMC1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC1SEL))) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +/** @brief Macro to configure the SDMMC2 clock (SDMMC2CLK). + * @param __SDMMC2_CLKSOURCE__ specifies the SDMMC2 clock source. + * This parameter can be one of the following values: + * @arg RCC_SDMMC2CLKSOURCE_CLK48: CLK48 selected as SDMMC2 clock + * @arg RCC_SDMMC2CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC2 clock + */ +#define __HAL_RCC_SDMMC2_CONFIG(__SDMMC2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC2SEL, (uint32_t)(__SDMMC2_CLKSOURCE__)) + +/** @brief macro to get the SDMMC2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SDMMC2CLKSOURCE_CLK48: CLK48 selected as SDMMC2 clock + * @arg RCC_SDMMC2CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC2 clock + */ +#define __HAL_RCC_GET_SDMMC2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC2SEL))) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief Macro to configure the DFSDM1 clock + * @param __DFSDM1_CLKSOURCE__ specifies the DFSDM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 Clock selected as DFSDM clock + * @arg RCC_DFSDMCLKSOURCE_SYSCLK: System Clock selected as DFSDM clock + */ +#define __HAL_RCC_DFSDM1_CONFIG(__DFSDM1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_DFSDM1SEL, (uint32_t)(__DFSDM1_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 Clock selected as DFSDM1 clock + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System Clock selected as DFSDM1 clock + */ +#define __HAL_RCC_GET_DFSDM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_DFSDM1SEL))) + +/** @brief Macro to configure the DFSDM1 Audio clock + * @param __DFSDM1AUDIO_CLKSOURCE__ specifies the DFSDM1 Audio clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI1: SAI1 Clock selected as DFSDM1 Audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI2: SAI2 Clock selected as DFSDM1 Audio clock + */ +#define __HAL_RCC_DFSDM1AUDIO_CONFIG(__DFSDM1AUDIO_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_ADFSDM1SEL, (uint32_t)(__DFSDM1AUDIO_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM1 Audio clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI1: SAI1 Clock selected as DFSDM1 Audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI2: SAI2 Clock selected as DFSDM1 Audio clock + */ +#define __HAL_RCC_GET_DFSDM1AUDIO_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_ADFSDM1SEL))) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +/** @brief Macro to configure the DSI clock. + * @param __DSI_CLKSOURCE__ specifies the DSI clock source. + * This parameter can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_DSI_CONFIG(__DSI_CLKSOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_DSISEL, (uint32_t)(__DSI_CLKSOURCE__))) + +/** @brief Macro to Get the DSI clock. + * @retval The clock source can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_GET_DSI_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_DSISEL)) +#endif /* STM32F769xx || STM32F779xx */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup RCCEx_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk); +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void); +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/** @addtogroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ +/** @defgroup RCCEx_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#if defined(STM32F756xx) || defined(STM32F746xx) || defined(STM32F750xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined(STM32F745xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F765xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#endif /* STM32F746xx || STM32F756xx || STM32F750xx */ +#define IS_RCC_PLLI2SN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define IS_RCC_PLLI2SP_VALUE(VALUE) (((VALUE) == RCC_PLLI2SP_DIV2) ||\ + ((VALUE) == RCC_PLLI2SP_DIV4) ||\ + ((VALUE) == RCC_PLLI2SP_DIV6) ||\ + ((VALUE) == RCC_PLLI2SP_DIV8)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#define IS_RCC_PLLI2SQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) +#define IS_RCC_PLLI2SR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_PLLSAIN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) +#define IS_RCC_PLLSAIP_VALUE(VALUE) (((VALUE) == RCC_PLLSAIP_DIV2) ||\ + ((VALUE) == RCC_PLLSAIP_DIV4) ||\ + ((VALUE) == RCC_PLLSAIP_DIV6) ||\ + ((VALUE) == RCC_PLLSAIP_DIV8)) +#define IS_RCC_PLLSAIQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) +#define IS_RCC_PLLSAIR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_PLLSAI_DIVQ_VALUE(VALUE) ((1 <= (VALUE)) && ((VALUE) <= 32)) + +#define IS_RCC_PLLI2S_DIVQ_VALUE(VALUE) ((1 <= (VALUE)) && ((VALUE) <= 32)) + +#define IS_RCC_PLLSAI_DIVR_VALUE(VALUE) (((VALUE) == RCC_PLLSAIDIVR_2) ||\ + ((VALUE) == RCC_PLLSAIDIVR_4) ||\ + ((VALUE) == RCC_PLLSAIDIVR_8) ||\ + ((VALUE) == RCC_PLLSAIDIVR_16)) +#define IS_RCC_I2SCLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SCLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_I2SCLKSOURCE_EXT)) + +#define IS_RCC_SDMMC1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SDMMC1CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_SDMMC1CLKSOURCE_CLK48)) + +#define IS_RCC_CECCLKSOURCE(SOURCE) (((SOURCE) == RCC_CECCLKSOURCE_HSI) || \ + ((SOURCE) == RCC_CECCLKSOURCE_LSE)) +#define IS_RCC_USART1CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART1CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_HSI)) + +#define IS_RCC_USART2CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART2CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_HSI)) +#define IS_RCC_USART3CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART3CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_HSI)) + +#define IS_RCC_UART4CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART4CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_HSI)) + +#define IS_RCC_UART5CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART5CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_HSI)) + +#define IS_RCC_USART6CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART6CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_HSI)) + +#define IS_RCC_UART7CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART7CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_HSI)) + +#define IS_RCC_UART8CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART8CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_HSI)) +#define IS_RCC_I2C1CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C1CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C1CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C1CLKSOURCE_HSI)) +#define IS_RCC_I2C2CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C2CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C2CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C2CLKSOURCE_HSI)) + +#define IS_RCC_I2C3CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C3CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C3CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C3CLKSOURCE_HSI)) +#define IS_RCC_I2C4CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C4CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C4CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C4CLKSOURCE_HSI)) +#define IS_RCC_LPTIM1CLK(SOURCE) \ + (((SOURCE) == RCC_LPTIM1CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSI) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_HSI) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSE)) +#define IS_RCC_CLK48SOURCE(SOURCE) \ + (((SOURCE) == RCC_CLK48SOURCE_PLLSAIP) || \ + ((SOURCE) == RCC_CLK48SOURCE_PLL)) +#define IS_RCC_TIMPRES(VALUE) \ + (((VALUE) == RCC_TIMPRES_DESACTIVATED) || \ + ((VALUE) == RCC_TIMPRES_ACTIVATED)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F745xx) ||\ + defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F730xx) || defined (STM32F750xx) +#define IS_RCC_SAI1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI1CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PIN)) +#define IS_RCC_SAI2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI2CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PIN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F745xx || STM32F746xx || STM32F756xx || STM32F750xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_PLLR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_SAI1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI1CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PIN) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLSRC)) + +#define IS_RCC_SAI2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI2CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PIN) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLSRC)) + +#define IS_RCC_DFSDM1CLKSOURCE(SOURCE) (((SOURCE) == RCC_DFSDM1CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_DFSDM1CLKSOURCE_SYSCLK)) + +#define IS_RCC_DFSDM1AUDIOCLKSOURCE(SOURCE) (((SOURCE) == RCC_DFSDM1AUDIOCLKSOURCE_SAI1) || \ + ((SOURCE) == RCC_DFSDM1AUDIOCLKSOURCE_SAI2)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define IS_RCC_SDMMC2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SDMMC2CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_SDMMC2CLKSOURCE_CLK48)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_DSIBYTELANECLKSOURCE(SOURCE) (((SOURCE) == RCC_DSICLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_DSICLKSOURCE_DSIPHY)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RCC_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rng.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rng.h new file mode 100644 index 000000000..3245aa70c --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rng.h @@ -0,0 +1,360 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rng.h + * @author MCD Application Team + * @brief Header file of RNG HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_RNG_H +#define STM32F7xx_HAL_RNG_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +#if defined (RNG) + +/** @defgroup RNG RNG + * @brief RNG HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup RNG_Exported_Types RNG Exported Types + * @{ + */ + +/** @defgroup RNG_Exported_Types_Group1 RNG Init Structure definition + * @{ + */ + +/** + * @} + */ + +/** @defgroup RNG_Exported_Types_Group2 RNG State Structure definition + * @{ + */ +typedef enum +{ + HAL_RNG_STATE_RESET = 0x00U, /*!< RNG not yet initialized or disabled */ + HAL_RNG_STATE_READY = 0x01U, /*!< RNG initialized and ready for use */ + HAL_RNG_STATE_BUSY = 0x02U, /*!< RNG internal process is ongoing */ + HAL_RNG_STATE_TIMEOUT = 0x03U, /*!< RNG timeout state */ + HAL_RNG_STATE_ERROR = 0x04U /*!< RNG error state */ + +} HAL_RNG_StateTypeDef; + +/** + * @} + */ + +/** @defgroup RNG_Exported_Types_Group3 RNG Handle Structure definition + * @{ + */ +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +typedef struct __RNG_HandleTypeDef +#else +typedef struct +#endif /* (USE_HAL_RNG_REGISTER_CALLBACKS) */ +{ + RNG_TypeDef *Instance; /*!< Register base address */ + + HAL_LockTypeDef Lock; /*!< RNG locking object */ + + __IO HAL_RNG_StateTypeDef State; /*!< RNG communication state */ + + __IO uint32_t ErrorCode; /*!< RNG Error code */ + + uint32_t RandomNumber; /*!< Last Generated RNG Data */ + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) + void (* ReadyDataCallback)(struct __RNG_HandleTypeDef *hrng, uint32_t random32bit); /*!< RNG Data Ready Callback */ + void (* ErrorCallback)(struct __RNG_HandleTypeDef *hrng); /*!< RNG Error Callback */ + + void (* MspInitCallback)(struct __RNG_HandleTypeDef *hrng); /*!< RNG Msp Init callback */ + void (* MspDeInitCallback)(struct __RNG_HandleTypeDef *hrng); /*!< RNG Msp DeInit callback */ +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + +} RNG_HandleTypeDef; + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +/** + * @brief HAL RNG Callback ID enumeration definition + */ +typedef enum +{ + HAL_RNG_ERROR_CB_ID = 0x00U, /*!< RNG Error Callback ID */ + + HAL_RNG_MSPINIT_CB_ID = 0x01U, /*!< RNG MspInit callback ID */ + HAL_RNG_MSPDEINIT_CB_ID = 0x02U /*!< RNG MspDeInit callback ID */ + +} HAL_RNG_CallbackIDTypeDef; + +/** + * @brief HAL RNG Callback pointer definition + */ +typedef void (*pRNG_CallbackTypeDef)(RNG_HandleTypeDef *hrng); /*!< pointer to a common RNG callback function */ +typedef void (*pRNG_ReadyDataCallbackTypeDef)(RNG_HandleTypeDef *hrng, uint32_t random32bit); /*!< pointer to an RNG Data Ready specific callback function */ + +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RNG_Exported_Constants RNG Exported Constants + * @{ + */ + +/** @defgroup RNG_Exported_Constants_Group1 RNG Interrupt definition + * @{ + */ +#define RNG_IT_DRDY RNG_SR_DRDY /*!< Data Ready interrupt */ +#define RNG_IT_CEI RNG_SR_CEIS /*!< Clock error interrupt */ +#define RNG_IT_SEI RNG_SR_SEIS /*!< Seed error interrupt */ +/** + * @} + */ + +/** @defgroup RNG_Exported_Constants_Group2 RNG Flag definition + * @{ + */ +#define RNG_FLAG_DRDY RNG_SR_DRDY /*!< Data ready */ +#define RNG_FLAG_CECS RNG_SR_CECS /*!< Clock error current status */ +#define RNG_FLAG_SECS RNG_SR_SECS /*!< Seed error current status */ +/** + * @} + */ + +/** @defgroup RNG_Error_Definition RNG Error Definition + * @{ + */ +#define HAL_RNG_ERROR_NONE 0x00000000U /*!< No error */ +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +#define HAL_RNG_ERROR_INVALID_CALLBACK 0x00000001U /*!< Invalid Callback error */ +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ +#define HAL_RNG_ERROR_TIMEOUT 0x00000002U /*!< Timeout error */ +#define HAL_RNG_ERROR_BUSY 0x00000004U /*!< Busy error */ +#define HAL_RNG_ERROR_SEED 0x00000008U /*!< Seed error */ +#define HAL_RNG_ERROR_CLOCK 0x00000010U /*!< Clock error */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup RNG_Exported_Macros RNG Exported Macros + * @{ + */ + +/** @brief Reset RNG handle state + * @param __HANDLE__ RNG Handle + * @retval None + */ +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +#define __HAL_RNG_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_RNG_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0U) +#else +#define __HAL_RNG_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_RNG_STATE_RESET) +#endif /*USE_HAL_RNG_REGISTER_CALLBACKS */ + +/** + * @brief Enables the RNG peripheral. + * @param __HANDLE__ RNG Handle + * @retval None + */ +#define __HAL_RNG_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= RNG_CR_RNGEN) + +/** + * @brief Disables the RNG peripheral. + * @param __HANDLE__ RNG Handle + * @retval None + */ +#define __HAL_RNG_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~RNG_CR_RNGEN) + +/** + * @brief Check the selected RNG flag status. + * @param __HANDLE__ RNG Handle + * @param __FLAG__ RNG flag + * This parameter can be one of the following values: + * @arg RNG_FLAG_DRDY: Data ready + * @arg RNG_FLAG_CECS: Clock error current status + * @arg RNG_FLAG_SECS: Seed error current status + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define __HAL_RNG_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR & (__FLAG__)) == (__FLAG__)) + +/** + * @brief Clears the selected RNG flag status. + * @param __HANDLE__ RNG handle + * @param __FLAG__ RNG flag to clear + * @note WARNING: This is a dummy macro for HAL code alignment, + * flags RNG_FLAG_DRDY, RNG_FLAG_CECS and RNG_FLAG_SECS are read-only. + * @retval None + */ +#define __HAL_RNG_CLEAR_FLAG(__HANDLE__, __FLAG__) /* dummy macro */ + +/** + * @brief Enables the RNG interrupts. + * @param __HANDLE__ RNG Handle + * @retval None + */ +#define __HAL_RNG_ENABLE_IT(__HANDLE__) ((__HANDLE__)->Instance->CR |= RNG_CR_IE) + +/** + * @brief Disables the RNG interrupts. + * @param __HANDLE__ RNG Handle + * @retval None + */ +#define __HAL_RNG_DISABLE_IT(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~RNG_CR_IE) + +/** + * @brief Checks whether the specified RNG interrupt has occurred or not. + * @param __HANDLE__ RNG Handle + * @param __INTERRUPT__ specifies the RNG interrupt status flag to check. + * This parameter can be one of the following values: + * @arg RNG_IT_DRDY: Data ready interrupt + * @arg RNG_IT_CEI: Clock error interrupt + * @arg RNG_IT_SEI: Seed error interrupt + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_RNG_GET_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->SR & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Clear the RNG interrupt status flags. + * @param __HANDLE__ RNG Handle + * @param __INTERRUPT__ specifies the RNG interrupt status flag to clear. + * This parameter can be one of the following values: + * @arg RNG_IT_CEI: Clock error interrupt + * @arg RNG_IT_SEI: Seed error interrupt + * @note RNG_IT_DRDY flag is read-only, reading RNG_DR register automatically clears RNG_IT_DRDY. + * @retval None + */ +#define __HAL_RNG_CLEAR_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->SR) = ~(__INTERRUPT__)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup RNG_Exported_Functions RNG Exported Functions + * @{ + */ + +/** @defgroup RNG_Exported_Functions_Group1 Initialization and configuration functions + * @{ + */ +HAL_StatusTypeDef HAL_RNG_Init(RNG_HandleTypeDef *hrng); +HAL_StatusTypeDef HAL_RNG_DeInit(RNG_HandleTypeDef *hrng); +void HAL_RNG_MspInit(RNG_HandleTypeDef *hrng); +void HAL_RNG_MspDeInit(RNG_HandleTypeDef *hrng); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_RNG_RegisterCallback(RNG_HandleTypeDef *hrng, HAL_RNG_CallbackIDTypeDef CallbackID, pRNG_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_RNG_UnRegisterCallback(RNG_HandleTypeDef *hrng, HAL_RNG_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_RNG_RegisterReadyDataCallback(RNG_HandleTypeDef *hrng, pRNG_ReadyDataCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_RNG_UnRegisterReadyDataCallback(RNG_HandleTypeDef *hrng); +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup RNG_Exported_Functions_Group2 Peripheral Control functions + * @{ + */ +uint32_t HAL_RNG_GetRandomNumber(RNG_HandleTypeDef *hrng); /* Obsolete, use HAL_RNG_GenerateRandomNumber() instead */ +uint32_t HAL_RNG_GetRandomNumber_IT(RNG_HandleTypeDef *hrng); /* Obsolete, use HAL_RNG_GenerateRandomNumber_IT() instead */ +HAL_StatusTypeDef HAL_RNG_GenerateRandomNumber(RNG_HandleTypeDef *hrng, uint32_t *random32bit); +HAL_StatusTypeDef HAL_RNG_GenerateRandomNumber_IT(RNG_HandleTypeDef *hrng); +uint32_t HAL_RNG_ReadLastRandomNumber(RNG_HandleTypeDef *hrng); + +void HAL_RNG_IRQHandler(RNG_HandleTypeDef *hrng); +void HAL_RNG_ErrorCallback(RNG_HandleTypeDef *hrng); +void HAL_RNG_ReadyDataCallback(RNG_HandleTypeDef *hrng, uint32_t random32bit); + +/** + * @} + */ + +/** @defgroup RNG_Exported_Functions_Group3 Peripheral State functions + * @{ + */ +HAL_RNG_StateTypeDef HAL_RNG_GetState(RNG_HandleTypeDef *hrng); +uint32_t HAL_RNG_GetError(RNG_HandleTypeDef *hrng); +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup RNG_Private_Macros RNG Private Macros + * @{ + */ +#define IS_RNG_IT(IT) (((IT) == RNG_IT_CEI) || \ + ((IT) == RNG_IT_SEI)) + +#define IS_RNG_FLAG(FLAG) (((FLAG) == RNG_FLAG_DRDY) || \ + ((FLAG) == RNG_FLAG_CECS) || \ + ((FLAG) == RNG_FLAG_SECS)) + +/** + * @} + */ + +/** + * @} + */ + +#endif /* RNG */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_RNG_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc.h new file mode 100644 index 000000000..87feafad8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc.h @@ -0,0 +1,857 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rtc.h + * @author MCD Application Team + * @brief Header file of RTC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RTC_H +#define __STM32F7xx_HAL_RTC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RTC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RTC_Exported_Types RTC Exported Types + * @{ + */ + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_RTC_STATE_RESET = 0x00U, /*!< RTC not yet initialized or disabled */ + HAL_RTC_STATE_READY = 0x01U, /*!< RTC initialized and ready for use */ + HAL_RTC_STATE_BUSY = 0x02U, /*!< RTC process is ongoing */ + HAL_RTC_STATE_TIMEOUT = 0x03U, /*!< RTC timeout state */ + HAL_RTC_STATE_ERROR = 0x04U /*!< RTC error state */ + +}HAL_RTCStateTypeDef; + +/** + * @brief RTC Configuration Structure definition + */ +typedef struct +{ + uint32_t HourFormat; /*!< Specifies the RTC Hour Format. + This parameter can be a value of @ref RTC_Hour_Formats */ + + uint32_t AsynchPrediv; /*!< Specifies the RTC Asynchronous Predivider value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x7F */ + + uint32_t SynchPrediv; /*!< Specifies the RTC Synchronous Predivider value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x7FFF */ + + uint32_t OutPut; /*!< Specifies which signal will be routed to the RTC output. + This parameter can be a value of @ref RTCEx_Output_selection_Definitions */ + + uint32_t OutPutPolarity; /*!< Specifies the polarity of the output signal. + This parameter can be a value of @ref RTC_Output_Polarity_Definitions */ + + uint32_t OutPutType; /*!< Specifies the RTC Output Pin mode. + This parameter can be a value of @ref RTC_Output_Type_ALARM_OUT */ +}RTC_InitTypeDef; + +/** + * @brief RTC Time structure definition + */ +typedef struct +{ + uint8_t Hours; /*!< Specifies the RTC Time Hour. + This parameter must be a number between Min_Data = 0 and Max_Data = 12 if the RTC_HourFormat_12 is selected. + This parameter must be a number between Min_Data = 0 and Max_Data = 23 if the RTC_HourFormat_24 is selected */ + + uint8_t Minutes; /*!< Specifies the RTC Time Minutes. + This parameter must be a number between Min_Data = 0 and Max_Data = 59 */ + + uint8_t Seconds; /*!< Specifies the RTC Time Seconds. + This parameter must be a number between Min_Data = 0 and Max_Data = 59 */ + + uint32_t SubSeconds; /*!< Specifies the RTC_SSR RTC Sub Second register content. + This parameter corresponds to a time unit range between [0-1] Second + with [1 Sec / SecondFraction +1] granularity */ + + uint32_t SecondFraction; /*!< Specifies the range or granularity of Sub Second register content + corresponding to Synchronous pre-scaler factor value (PREDIV_S) + This parameter corresponds to a time unit range between [0-1] Second + with [1 Sec / SecondFraction +1] granularity. + This field will be used only by HAL_RTC_GetTime function */ + + uint8_t TimeFormat; /*!< Specifies the RTC AM/PM Time. + This parameter can be a value of @ref RTC_AM_PM_Definitions */ + + uint32_t DayLightSaving; /*!< Specifies RTC_DayLightSaveOperation: the value of hour adjustment. + This parameter can be a value of @ref RTC_DayLightSaving_Definitions */ + + uint32_t StoreOperation; /*!< Specifies RTC_StoreOperation value to be written in the BKP bit + in CR register to store the operation. + This parameter can be a value of @ref RTC_StoreOperation_Definitions */ +}RTC_TimeTypeDef; + +/** + * @brief RTC Date structure definition + */ +typedef struct +{ + uint8_t WeekDay; /*!< Specifies the RTC Date WeekDay. + This parameter can be a value of @ref RTC_WeekDay_Definitions */ + + uint8_t Month; /*!< Specifies the RTC Date Month (in BCD format). + This parameter can be a value of @ref RTC_Month_Date_Definitions */ + + uint8_t Date; /*!< Specifies the RTC Date. + This parameter must be a number between Min_Data = 1 and Max_Data = 31 */ + + uint8_t Year; /*!< Specifies the RTC Date Year. + This parameter must be a number between Min_Data = 0 and Max_Data = 99 */ + +}RTC_DateTypeDef; + +/** + * @brief RTC Alarm structure definition + */ +typedef struct +{ + RTC_TimeTypeDef AlarmTime; /*!< Specifies the RTC Alarm Time members */ + + uint32_t AlarmMask; /*!< Specifies the RTC Alarm Masks. + This parameter can be a value of @ref RTC_AlarmMask_Definitions */ + + uint32_t AlarmSubSecondMask; /*!< Specifies the RTC Alarm SubSeconds Masks. + This parameter can be a value of @ref RTC_Alarm_Sub_Seconds_Masks_Definitions */ + + uint32_t AlarmDateWeekDaySel; /*!< Specifies the RTC Alarm is on Date or WeekDay. + This parameter can be a value of @ref RTC_AlarmDateWeekDay_Definitions */ + + uint8_t AlarmDateWeekDay; /*!< Specifies the RTC Alarm Date/WeekDay. + If the Alarm Date is selected, this parameter must be set to a value in the 1-31 range. + If the Alarm WeekDay is selected, this parameter can be a value of @ref RTC_WeekDay_Definitions */ + + uint32_t Alarm; /*!< Specifies the alarm . + This parameter can be a value of @ref RTC_Alarms_Definitions */ +}RTC_AlarmTypeDef; + +/** + * @brief RTC Handle Structure definition + */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) +typedef struct __RTC_HandleTypeDef +#else +typedef struct +#endif /* (USE_HAL_RTC_REGISTER_CALLBACKS) */ +{ + RTC_TypeDef *Instance; /*!< Register base address */ + + RTC_InitTypeDef Init; /*!< RTC required parameters */ + + HAL_LockTypeDef Lock; /*!< RTC locking object */ + + __IO HAL_RTCStateTypeDef State; /*!< Time communication state */ + +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + void (* AlarmAEventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Alarm A Event callback */ + + void (* AlarmBEventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Alarm B Event callback */ + + void (* TimeStampEventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC TimeStamp Event callback */ + + void (* WakeUpTimerEventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC WakeUpTimer Event callback */ + + void (* Tamper1EventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Tamper 1 Event callback */ + + void (* Tamper2EventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Tamper 2 Event callback */ + + void (* Tamper3EventCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Tamper 3 Event callback */ + + void (* MspInitCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Msp Init callback */ + + void (* MspDeInitCallback) ( struct __RTC_HandleTypeDef * hrtc); /*!< RTC Msp DeInit callback */ + +#endif /* (USE_HAL_RTC_REGISTER_CALLBACKS) */ + +}RTC_HandleTypeDef; + +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) +/** + * @brief HAL RTC Callback ID enumeration definition + */ +typedef enum +{ + HAL_RTC_ALARM_A_EVENT_CB_ID = 0x00u, /*!< RTC Alarm A Event Callback ID */ + HAL_RTC_ALARM_B_EVENT_CB_ID = 0x01u, /*!< RTC Alarm B Event Callback ID */ + HAL_RTC_TIMESTAMP_EVENT_CB_ID = 0x02u, /*!< RTC TimeStamp Event Callback ID */ + HAL_RTC_WAKEUPTIMER_EVENT_CB_ID = 0x03u, /*!< RTC Wake-Up Timer Event Callback ID */ + HAL_RTC_TAMPER1_EVENT_CB_ID = 0x04u, /*!< RTC Tamper 1 Callback ID */ + HAL_RTC_TAMPER2_EVENT_CB_ID = 0x05u, /*!< RTC Tamper 2 Callback ID */ + HAL_RTC_TAMPER3_EVENT_CB_ID = 0x06u, /*!< RTC Tamper 3 Callback ID */ + HAL_RTC_MSPINIT_CB_ID = 0x0Eu, /*!< RTC Msp Init callback ID */ + HAL_RTC_MSPDEINIT_CB_ID = 0x0Fu /*!< RTC Msp DeInit callback ID */ +}HAL_RTC_CallbackIDTypeDef; + +/** + * @brief HAL RTC Callback pointer definition + */ +typedef void (*pRTC_CallbackTypeDef)(RTC_HandleTypeDef * hrtc); /*!< pointer to an RTC callback function */ +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RTC_Exported_Constants RTC Exported Constants + * @{ + */ + +/** @defgroup RTC_Hour_Formats RTC Hour Formats + * @{ + */ +#define RTC_HOURFORMAT_24 0x00000000U +#define RTC_HOURFORMAT_12 0x00000040U +/** + * @} + */ + + +/** @defgroup RTC_Output_Polarity_Definitions RTC Output Polarity Definitions + * @{ + */ +#define RTC_OUTPUT_POLARITY_HIGH 0x00000000U +#define RTC_OUTPUT_POLARITY_LOW 0x00100000U +/** + * @} + */ + +/** @defgroup RTC_Output_Type_ALARM_OUT RTC Output Type ALARM OUT + * @{ + */ +#define RTC_OUTPUT_TYPE_OPENDRAIN 0x00000000U +#define RTC_OUTPUT_TYPE_PUSHPULL RTC_OR_ALARMTYPE /* 0x00000008 */ +/** + * @} + */ + +/** @defgroup RTC_AM_PM_Definitions RTC AM PM Definitions + * @{ + */ +#define RTC_HOURFORMAT12_AM ((uint8_t)0x00U) +#define RTC_HOURFORMAT12_PM ((uint8_t)0x40U) +/** + * @} + */ + +/** @defgroup RTC_DayLightSaving_Definitions RTC DayLight Saving Definitions + * @{ + */ +#define RTC_DAYLIGHTSAVING_SUB1H 0x00020000U +#define RTC_DAYLIGHTSAVING_ADD1H 0x00010000U +#define RTC_DAYLIGHTSAVING_NONE 0x00000000U +/** + * @} + */ + +/** @defgroup RTC_StoreOperation_Definitions RTC Store Operation Definitions + * @{ + */ +#define RTC_STOREOPERATION_RESET 0x00000000U +#define RTC_STOREOPERATION_SET 0x00040000U +/** + * @} + */ + +/** @defgroup RTC_Input_parameter_format_definitions RTC Input Parameter Format Definitions + * @{ + */ +#define RTC_FORMAT_BIN 0x00000000U +#define RTC_FORMAT_BCD 0x00000001U +/** + * @} + */ + +/** @defgroup RTC_Month_Date_Definitions RTC Month Date Definitions + * @{ + */ +/* Coded in BCD format */ +#define RTC_MONTH_JANUARY ((uint8_t)0x01U) +#define RTC_MONTH_FEBRUARY ((uint8_t)0x02U) +#define RTC_MONTH_MARCH ((uint8_t)0x03U) +#define RTC_MONTH_APRIL ((uint8_t)0x04U) +#define RTC_MONTH_MAY ((uint8_t)0x05U) +#define RTC_MONTH_JUNE ((uint8_t)0x06U) +#define RTC_MONTH_JULY ((uint8_t)0x07U) +#define RTC_MONTH_AUGUST ((uint8_t)0x08U) +#define RTC_MONTH_SEPTEMBER ((uint8_t)0x09U) +#define RTC_MONTH_OCTOBER ((uint8_t)0x10U) +#define RTC_MONTH_NOVEMBER ((uint8_t)0x11U) +#define RTC_MONTH_DECEMBER ((uint8_t)0x12U) +/** + * @} + */ + +/** @defgroup RTC_WeekDay_Definitions RTC WeekDay Definitions + * @{ + */ +#define RTC_WEEKDAY_MONDAY ((uint8_t)0x01U) +#define RTC_WEEKDAY_TUESDAY ((uint8_t)0x02U) +#define RTC_WEEKDAY_WEDNESDAY ((uint8_t)0x03U) +#define RTC_WEEKDAY_THURSDAY ((uint8_t)0x04U) +#define RTC_WEEKDAY_FRIDAY ((uint8_t)0x05U) +#define RTC_WEEKDAY_SATURDAY ((uint8_t)0x06U) +#define RTC_WEEKDAY_SUNDAY ((uint8_t)0x07U) +/** + * @} + */ + +/** @defgroup RTC_AlarmDateWeekDay_Definitions RTC Alarm Date WeekDay Definitions + * @{ + */ +#define RTC_ALARMDATEWEEKDAYSEL_DATE 0x00000000U +#define RTC_ALARMDATEWEEKDAYSEL_WEEKDAY 0x40000000U +/** + * @} + */ + +/** @defgroup RTC_AlarmMask_Definitions RTC Alarm Mask Definitions + * @{ + */ +#define RTC_ALARMMASK_NONE 0x00000000U +#define RTC_ALARMMASK_DATEWEEKDAY RTC_ALRMAR_MSK4 +#define RTC_ALARMMASK_HOURS RTC_ALRMAR_MSK3 +#define RTC_ALARMMASK_MINUTES RTC_ALRMAR_MSK2 +#define RTC_ALARMMASK_SECONDS RTC_ALRMAR_MSK1 +#define RTC_ALARMMASK_ALL 0x80808080U +/** + * @} + */ + +/** @defgroup RTC_Alarms_Definitions RTC Alarms Definitions + * @{ + */ +#define RTC_ALARM_A RTC_CR_ALRAE +#define RTC_ALARM_B RTC_CR_ALRBE +/** + * @} + */ + +/** @defgroup RTC_Alarm_Sub_Seconds_Masks_Definitions RTC Alarm Sub Seconds Masks Definitions + * @{ + */ +#define RTC_ALARMSUBSECONDMASK_ALL 0x00000000U /*!< All Alarm SS fields are masked. + There is no comparison on sub seconds + for Alarm */ +#define RTC_ALARMSUBSECONDMASK_SS14_1 0x01000000U /*!< SS[14:1] are don't care in Alarm + comparison. Only SS[0] is compared. */ +#define RTC_ALARMSUBSECONDMASK_SS14_2 0x02000000U /*!< SS[14:2] are don't care in Alarm + comparison. Only SS[1:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_3 0x03000000U /*!< SS[14:3] are don't care in Alarm + comparison. Only SS[2:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_4 0x04000000U /*!< SS[14:4] are don't care in Alarm + comparison. Only SS[3:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_5 0x05000000U /*!< SS[14:5] are don't care in Alarm + comparison. Only SS[4:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_6 0x06000000U /*!< SS[14:6] are don't care in Alarm + comparison. Only SS[5:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_7 0x07000000U /*!< SS[14:7] are don't care in Alarm + comparison. Only SS[6:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_8 0x08000000U /*!< SS[14:8] are don't care in Alarm + comparison. Only SS[7:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_9 0x09000000U /*!< SS[14:9] are don't care in Alarm + comparison. Only SS[8:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_10 0x0A000000U /*!< SS[14:10] are don't care in Alarm + comparison. Only SS[9:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_11 0x0B000000U /*!< SS[14:11] are don't care in Alarm + comparison. Only SS[10:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_12 0x0C000000U /*!< SS[14:12] are don't care in Alarm + comparison.Only SS[11:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14_13 0x0D000000U /*!< SS[14:13] are don't care in Alarm + comparison. Only SS[12:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_SS14 0x0E000000U /*!< SS[14] is don't care in Alarm + comparison.Only SS[13:0] are compared */ +#define RTC_ALARMSUBSECONDMASK_NONE 0x0F000000U /*!< SS[14:0] are compared and must match + to activate alarm. */ +/** + * @} + */ + +/** @defgroup RTC_Interrupts_Definitions RTC Interrupts Definitions + * @{ + */ +#define RTC_IT_TS RTC_CR_TSIE +#define RTC_IT_WUT RTC_CR_WUTIE +#define RTC_IT_ALRA RTC_CR_ALRAIE +#define RTC_IT_ALRB RTC_CR_ALRBIE +#define RTC_IT_TAMP RTC_TAMPCR_TAMPIE /* Used only to Enable the Tamper Interrupt */ +#define RTC_IT_TAMP1 RTC_TAMPCR_TAMP1IE +#define RTC_IT_TAMP2 RTC_TAMPCR_TAMP2IE +#define RTC_IT_TAMP3 RTC_TAMPCR_TAMP3IE +/** + * @} + */ + +/** @defgroup RTC_Flags_Definitions RTC Flags Definitions + * @{ + */ +#define RTC_FLAG_RECALPF RTC_ISR_RECALPF +#define RTC_FLAG_TAMP3F RTC_ISR_TAMP3F +#define RTC_FLAG_TAMP2F RTC_ISR_TAMP2F +#define RTC_FLAG_TAMP1F RTC_ISR_TAMP1F +#define RTC_FLAG_TSOVF RTC_ISR_TSOVF +#define RTC_FLAG_TSF RTC_ISR_TSF +#define RTC_FLAG_ITSF RTC_ISR_ITSF +#define RTC_FLAG_WUTF RTC_ISR_WUTF +#define RTC_FLAG_ALRBF RTC_ISR_ALRBF +#define RTC_FLAG_ALRAF RTC_ISR_ALRAF +#define RTC_FLAG_INITF RTC_ISR_INITF +#define RTC_FLAG_RSF RTC_ISR_RSF +#define RTC_FLAG_INITS RTC_ISR_INITS +#define RTC_FLAG_SHPF RTC_ISR_SHPF +#define RTC_FLAG_WUTWF RTC_ISR_WUTWF +#define RTC_FLAG_ALRBWF RTC_ISR_ALRBWF +#define RTC_FLAG_ALRAWF RTC_ISR_ALRAWF +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RTC_Exported_Macros RTC Exported Macros + * @{ + */ + +/** @brief Reset RTC handle state + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) +#define __HAL_RTC_RESET_HANDLE_STATE(__HANDLE__) do{\ + (__HANDLE__)->State = HAL_RTC_STATE_RESET;\ + (__HANDLE__)->MspInitCallback = NULL;\ + (__HANDLE__)->MspDeInitCallback = NULL;\ + }while(0u) +#else +#define __HAL_RTC_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_RTC_STATE_RESET) +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + +/** + * @brief Disable the write protection for RTC registers. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_WRITEPROTECTION_DISABLE(__HANDLE__) \ + do{ \ + (__HANDLE__)->Instance->WPR = 0xCAU; \ + (__HANDLE__)->Instance->WPR = 0x53U; \ + } while(0U) + +/** + * @brief Enable the write protection for RTC registers. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_WRITEPROTECTION_ENABLE(__HANDLE__) \ + do{ \ + (__HANDLE__)->Instance->WPR = 0xFFU; \ + } while(0U) + +/** + * @brief Enable the RTC ALARMA peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_ALARMA_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_ALRAE)) + +/** + * @brief Disable the RTC ALARMA peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_ALARMA_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_ALRAE)) + +/** + * @brief Enable the RTC ALARMB peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_ALARMB_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_ALRBE)) + +/** + * @brief Disable the RTC ALARMB peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_ALARMB_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_ALRBE)) + +/** + * @brief Enable the RTC Alarm interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Alarm interrupt sources to be enabled or disabled. + * This parameter can be any combination of the following values: + * @arg RTC_IT_ALRA: Alarm A interrupt + * @arg RTC_IT_ALRB: Alarm B interrupt + * @retval None + */ +#define __HAL_RTC_ALARM_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the RTC Alarm interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Alarm interrupt sources to be enabled or disabled. + * This parameter can be any combination of the following values: + * @arg RTC_IT_ALRA: Alarm A interrupt + * @arg RTC_IT_ALRB: Alarm B interrupt + * @retval None + */ +#define __HAL_RTC_ALARM_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) + +/** + * @brief Check whether the specified RTC Alarm interrupt has occurred or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Alarm interrupt to check. + * This parameter can be: + * @arg RTC_IT_ALRA: Alarm A interrupt + * @arg RTC_IT_ALRB: Alarm B interrupt + * @retval None + */ +#define __HAL_RTC_ALARM_GET_IT(__HANDLE__, __INTERRUPT__) ((((((__HANDLE__)->Instance->ISR)& ((__INTERRUPT__)>> 4U)) & 0x0000FFFFU) != RESET)? SET : RESET) + +/** + * @brief Get the selected RTC Alarm's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Alarm Flag to check. + * This parameter can be: + * @arg RTC_FLAG_ALRAF + * @arg RTC_FLAG_ALRBF + * @arg RTC_FLAG_ALRAWF + * @arg RTC_FLAG_ALRBWF + * @retval None + */ +#define __HAL_RTC_ALARM_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET)? SET : RESET) + +/** + * @brief Clear the RTC Alarm's pending flags. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Alarm Flag sources to be enabled or disabled. + * This parameter can be: + * @arg RTC_FLAG_ALRAF + * @arg RTC_FLAG_ALRBF + * @retval None + */ +#define __HAL_RTC_ALARM_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR) = (~(((__FLAG__) | RTC_ISR_INIT)& 0x0000FFFFU)|((__HANDLE__)->Instance->ISR & RTC_ISR_INIT)) + +/** + * @brief Check whether the specified RTC Alarm interrupt has been enabled or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Alarm interrupt sources to check. + * This parameter can be: + * @arg RTC_IT_ALRA: Alarm A interrupt + * @arg RTC_IT_ALRB: Alarm B interrupt + * @retval None + */ +#define __HAL_RTC_ALARM_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->CR) & (__INTERRUPT__)) != RESET) ? SET : RESET) + +/** + * @brief Enable interrupt on the RTC Alarm associated Exti line. + * @retval None + */ +#define __HAL_RTC_ALARM_EXTI_ENABLE_IT() (EXTI->IMR |= RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Disable interrupt on the RTC Alarm associated Exti line. + * @retval None + */ +#define __HAL_RTC_ALARM_EXTI_DISABLE_IT() (EXTI->IMR &= ~(RTC_EXTI_LINE_ALARM_EVENT)) + +/** + * @brief Enable event on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_ENABLE_EVENT() (EXTI->EMR |= RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Disable event on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(RTC_EXTI_LINE_ALARM_EVENT)) + +/** + * @brief Enable falling edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_ENABLE_FALLING_EDGE() (EXTI->FTSR |= RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Disable falling edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_DISABLE_FALLING_EDGE() (EXTI->FTSR &= ~(RTC_EXTI_LINE_ALARM_EVENT)) + +/** + * @brief Enable rising edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_ENABLE_RISING_EDGE() (EXTI->RTSR |= RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Disable rising edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_DISABLE_RISING_EDGE() (EXTI->RTSR &= ~(RTC_EXTI_LINE_ALARM_EVENT)) + +/** + * @brief Enable rising & falling edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_ENABLE_RISING_FALLING_EDGE() __HAL_RTC_ALARM_EXTI_ENABLE_RISING_EDGE();__HAL_RTC_ALARM_EXTI_ENABLE_FALLING_EDGE(); + +/** + * @brief Disable rising & falling edge trigger on the RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_DISABLE_RISING_FALLING_EDGE() __HAL_RTC_ALARM_EXTI_DISABLE_RISING_EDGE();__HAL_RTC_ALARM_EXTI_DISABLE_FALLING_EDGE(); + +/** + * @brief Check whether the RTC Alarm associated Exti line interrupt flag is set or not. + * @retval Line Status. + */ +#define __HAL_RTC_ALARM_EXTI_GET_FLAG() (EXTI->PR & RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Clear the RTC Alarm associated Exti line flag. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() (EXTI->PR = RTC_EXTI_LINE_ALARM_EVENT) + +/** + * @brief Generate a Software interrupt on RTC Alarm associated Exti line. + * @retval None. + */ +#define __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() (EXTI->SWIER |= RTC_EXTI_LINE_ALARM_EVENT) +/** + * @} + */ + +/* Include RTC HAL Extension module */ +#include "stm32f7xx_hal_rtc_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup RTC_Exported_Functions + * @{ + */ + +/** @addtogroup RTC_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_RTC_Init(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTC_DeInit(RTC_HandleTypeDef *hrtc); +void HAL_RTC_MspInit(RTC_HandleTypeDef *hrtc); +void HAL_RTC_MspDeInit(RTC_HandleTypeDef *hrtc); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_RTC_RegisterCallback(RTC_HandleTypeDef *hrtc, HAL_RTC_CallbackIDTypeDef CallbackID, pRTC_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_RTC_UnRegisterCallback(RTC_HandleTypeDef *hrtc, HAL_RTC_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup RTC_Exported_Functions_Group2 + * @{ + */ +/* RTC Time and Date functions ************************************************/ +HAL_StatusTypeDef HAL_RTC_SetTime(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef *sTime, uint32_t Format); +HAL_StatusTypeDef HAL_RTC_GetTime(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef *sTime, uint32_t Format); +HAL_StatusTypeDef HAL_RTC_SetDate(RTC_HandleTypeDef *hrtc, RTC_DateTypeDef *sDate, uint32_t Format); +HAL_StatusTypeDef HAL_RTC_GetDate(RTC_HandleTypeDef *hrtc, RTC_DateTypeDef *sDate, uint32_t Format); +/** + * @} + */ + +/** @addtogroup RTC_Exported_Functions_Group3 + * @{ + */ +/* RTC Alarm functions ********************************************************/ +HAL_StatusTypeDef HAL_RTC_SetAlarm(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Format); +HAL_StatusTypeDef HAL_RTC_SetAlarm_IT(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Format); +HAL_StatusTypeDef HAL_RTC_DeactivateAlarm(RTC_HandleTypeDef *hrtc, uint32_t Alarm); +HAL_StatusTypeDef HAL_RTC_GetAlarm(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Alarm, uint32_t Format); +void HAL_RTC_AlarmIRQHandler(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTC_PollForAlarmAEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +void HAL_RTC_AlarmAEventCallback(RTC_HandleTypeDef *hrtc); +/** + * @} + */ + +/** @addtogroup RTC_Exported_Functions_Group4 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +HAL_StatusTypeDef HAL_RTC_WaitForSynchro(RTC_HandleTypeDef* hrtc); +/** + * @} + */ + +/** @addtogroup RTC_Exported_Functions_Group5 + * @{ + */ +/* Peripheral State functions *************************************************/ +HAL_RTCStateTypeDef HAL_RTC_GetState(RTC_HandleTypeDef *hrtc); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RTC_Private_Constants RTC Private Constants + * @{ + */ +/* Masks Definition */ +#define RTC_TR_RESERVED_MASK 0x007F7F7FU +#define RTC_DR_RESERVED_MASK 0x00FFFF3FU +#define RTC_INIT_MASK 0xFFFFFFFFU +#define RTC_RSF_MASK 0xFFFFFF5FU + +#define RTC_TIMEOUT_VALUE 1000U + +#define RTC_EXTI_LINE_ALARM_EVENT EXTI_IMR_IM17 /*!< External interrupt line 17 Connected to the RTC Alarm event */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup RTC_Private_Macros RTC Private Macros + * @{ + */ + +/** @defgroup RTC_IS_RTC_Definitions RTC Private macros to check input parameters + * @{ + */ +#define IS_RTC_HOUR_FORMAT(__FORMAT__) (((__FORMAT__) == RTC_HOURFORMAT_12) || \ + ((__FORMAT__) == RTC_HOURFORMAT_24)) +#define IS_RTC_OUTPUT_POL(__POL__) (((__POL__) == RTC_OUTPUT_POLARITY_HIGH) || \ + ((__POL__) == RTC_OUTPUT_POLARITY_LOW)) +#define IS_RTC_OUTPUT_TYPE(__TYPE__) (((__TYPE__) == RTC_OUTPUT_TYPE_OPENDRAIN) || \ + ((__TYPE__) == RTC_OUTPUT_TYPE_PUSHPULL)) +#define IS_RTC_ASYNCH_PREDIV(__PREDIV__) ((__PREDIV__) <= 0x7FU) +#define IS_RTC_SYNCH_PREDIV(__PREDIV__) ((__PREDIV__) <= 0x7FFFU) +#define IS_RTC_HOUR12(__HOUR__) (((__HOUR__) > 0U) && ((__HOUR__) <= 12U)) +#define IS_RTC_HOUR24(__HOUR__) ((__HOUR__) <= 23U) +#define IS_RTC_MINUTES(__MINUTES__) ((__MINUTES__) <= 59U) +#define IS_RTC_SECONDS(__SECONDS__) ((__SECONDS__) <= 59U) +#define IS_RTC_HOURFORMAT12(__PM__) (((__PM__) == RTC_HOURFORMAT12_AM) || ((__PM__) == RTC_HOURFORMAT12_PM)) +#define IS_RTC_DAYLIGHT_SAVING(__SAVE__) (((__SAVE__) == RTC_DAYLIGHTSAVING_SUB1H) || \ + ((__SAVE__) == RTC_DAYLIGHTSAVING_ADD1H) || \ + ((__SAVE__) == RTC_DAYLIGHTSAVING_NONE)) +#define IS_RTC_STORE_OPERATION(__OPERATION__) (((__OPERATION__) == RTC_STOREOPERATION_RESET) || \ + ((__OPERATION__) == RTC_STOREOPERATION_SET)) +#define IS_RTC_FORMAT(__FORMAT__) (((__FORMAT__) == RTC_FORMAT_BIN) || ((__FORMAT__) == RTC_FORMAT_BCD)) +#define IS_RTC_YEAR(__YEAR__) ((__YEAR__) <= 99U) +#define IS_RTC_MONTH(__MONTH__) (((__MONTH__) >= 1U) && ((__MONTH__) <= 12U)) +#define IS_RTC_DATE(__DATE__) (((__DATE__) >= 1U) && ((__DATE__) <= 31U)) +#define IS_RTC_WEEKDAY(__WEEKDAY__) (((__WEEKDAY__) == RTC_WEEKDAY_MONDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_TUESDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_WEDNESDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_THURSDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_FRIDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_SATURDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_SUNDAY)) + +#define IS_RTC_ALARM_DATE_WEEKDAY_DATE(__DATE__) (((__DATE__) >0U) && ((__DATE__) <= 31U)) +#define IS_RTC_ALARM_DATE_WEEKDAY_WEEKDAY(__WEEKDAY__) (((__WEEKDAY__) == RTC_WEEKDAY_MONDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_TUESDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_WEDNESDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_THURSDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_FRIDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_SATURDAY) || \ + ((__WEEKDAY__) == RTC_WEEKDAY_SUNDAY)) +#define IS_RTC_ALARM_DATE_WEEKDAY_SEL(__SEL__) (((__SEL__) == RTC_ALARMDATEWEEKDAYSEL_DATE) || \ + ((__SEL__) == RTC_ALARMDATEWEEKDAYSEL_WEEKDAY)) +#define IS_RTC_ALARM_MASK(__MASK__) (((__MASK__) & 0x7F7F7F7FU) == (uint32_t)RESET) +#define IS_RTC_ALARM(__ALARM__) (((__ALARM__) == RTC_ALARM_A) || ((__ALARM__) == RTC_ALARM_B)) +#define IS_RTC_ALARM_SUB_SECOND_VALUE(__VALUE__) ((__VALUE__) <= 0x00007FFFU) +#define IS_RTC_ALARM_SUB_SECOND_MASK(__MASK__) (((__MASK__) == RTC_ALARMSUBSECONDMASK_ALL) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_1) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_2) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_3) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_4) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_5) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_6) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_7) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_8) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_9) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_10) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_11) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_12) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14_13) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_SS14) || \ + ((__MASK__) == RTC_ALARMSUBSECONDMASK_NONE)) + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup RTC_Private_Functions RTC Private Functions + * @{ + */ +HAL_StatusTypeDef RTC_EnterInitMode(RTC_HandleTypeDef* hrtc); +uint8_t RTC_ByteToBcd2(uint8_t Value); +uint8_t RTC_Bcd2ToByte(uint8_t Value); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RTC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc_ex.h new file mode 100644 index 000000000..073a2f060 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rtc_ex.h @@ -0,0 +1,1017 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rtc_ex.h + * @author MCD Application Team + * @brief Header file of RTC HAL Extension module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RTC_EX_H +#define __STM32F7xx_HAL_RTC_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RTCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RTCEx_Exported_Types RTCEx Exported Types + * @{ + */ + +/** + * @brief RTC Tamper structure definition + */ +typedef struct +{ + uint32_t Tamper; /*!< Specifies the Tamper Pin. + This parameter can be a value of @ref RTCEx_Tamper_Pins_Definitions */ + + uint32_t Interrupt; /*!< Specifies the Tamper Interrupt. + This parameter can be a value of @ref RTCEx_Tamper_Interrupt_Definitions */ + + uint32_t Trigger; /*!< Specifies the Tamper Trigger. + This parameter can be a value of @ref RTCEx_Tamper_Trigger_Definitions */ + + uint32_t NoErase; /*!< Specifies the Tamper no erase mode. + This parameter can be a value of @ref RTCEx_Tamper_EraseBackUp_Definitions */ + + uint32_t MaskFlag; /*!< Specifies the Tamper Flag masking. + This parameter can be a value of @ref RTCEx_Tamper_MaskFlag_Definitions */ + + uint32_t Filter; /*!< Specifies the RTC Filter Tamper. + This parameter can be a value of @ref RTCEx_Tamper_Filter_Definitions */ + + uint32_t SamplingFrequency; /*!< Specifies the sampling frequency. + This parameter can be a value of @ref RTCEx_Tamper_Sampling_Frequencies_Definitions */ + + uint32_t PrechargeDuration; /*!< Specifies the Precharge Duration . + This parameter can be a value of @ref RTCEx_Tamper_Pin_Precharge_Duration_Definitions */ + + uint32_t TamperPullUp; /*!< Specifies the Tamper PullUp . + This parameter can be a value of @ref RTCEx_Tamper_Pull_UP_Definitions */ + + uint32_t TimeStampOnTamperDetection; /*!< Specifies the TimeStampOnTamperDetection. + This parameter can be a value of @ref RTCEx_Tamper_TimeStampOnTamperDetection_Definitions */ +}RTC_TamperTypeDef; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RTCEx_Exported_Constants RTCEx Exported Constants + * @{ + */ + +/** @defgroup RTCEx_Output_selection_Definitions RTCEx Output selection Definitions + * @{ + */ +#define RTC_OUTPUT_DISABLE ((uint32_t)0x00000000U) +#define RTC_OUTPUT_ALARMA ((uint32_t)0x00200000U) +#define RTC_OUTPUT_ALARMB ((uint32_t)0x00400000U) +#define RTC_OUTPUT_WAKEUP ((uint32_t)0x00600000U) +/** + * @} + */ + +/** @defgroup RTCEx_Backup_Registers_Definitions RTC Backup Registers Definitions + * @{ + */ +#define RTC_BKP_DR0 ((uint32_t)0x00000000U) +#define RTC_BKP_DR1 ((uint32_t)0x00000001U) +#define RTC_BKP_DR2 ((uint32_t)0x00000002U) +#define RTC_BKP_DR3 ((uint32_t)0x00000003U) +#define RTC_BKP_DR4 ((uint32_t)0x00000004U) +#define RTC_BKP_DR5 ((uint32_t)0x00000005U) +#define RTC_BKP_DR6 ((uint32_t)0x00000006U) +#define RTC_BKP_DR7 ((uint32_t)0x00000007U) +#define RTC_BKP_DR8 ((uint32_t)0x00000008U) +#define RTC_BKP_DR9 ((uint32_t)0x00000009U) +#define RTC_BKP_DR10 ((uint32_t)0x0000000AU) +#define RTC_BKP_DR11 ((uint32_t)0x0000000BU) +#define RTC_BKP_DR12 ((uint32_t)0x0000000CU) +#define RTC_BKP_DR13 ((uint32_t)0x0000000DU) +#define RTC_BKP_DR14 ((uint32_t)0x0000000EU) +#define RTC_BKP_DR15 ((uint32_t)0x0000000FU) +#define RTC_BKP_DR16 ((uint32_t)0x00000010U) +#define RTC_BKP_DR17 ((uint32_t)0x00000011U) +#define RTC_BKP_DR18 ((uint32_t)0x00000012U) +#define RTC_BKP_DR19 ((uint32_t)0x00000013U) +#define RTC_BKP_DR20 ((uint32_t)0x00000014U) +#define RTC_BKP_DR21 ((uint32_t)0x00000015U) +#define RTC_BKP_DR22 ((uint32_t)0x00000016U) +#define RTC_BKP_DR23 ((uint32_t)0x00000017U) +#define RTC_BKP_DR24 ((uint32_t)0x00000018U) +#define RTC_BKP_DR25 ((uint32_t)0x00000019U) +#define RTC_BKP_DR26 ((uint32_t)0x0000001AU) +#define RTC_BKP_DR27 ((uint32_t)0x0000001BU) +#define RTC_BKP_DR28 ((uint32_t)0x0000001CU) +#define RTC_BKP_DR29 ((uint32_t)0x0000001DU) +#define RTC_BKP_DR30 ((uint32_t)0x0000001EU) +#define RTC_BKP_DR31 ((uint32_t)0x0000001FU) +/** + * @} + */ + +/** @defgroup RTCEx_Time_Stamp_Edges_definitions RTCEx Time Stamp Edges definitions + * @{ + */ +#define RTC_TIMESTAMPEDGE_RISING ((uint32_t)0x00000000U) +#define RTC_TIMESTAMPEDGE_FALLING ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Pins_Definitions RTCEx Tamper Pins Definitions + * @{ + */ +#define RTC_TAMPER_1 RTC_TAMPCR_TAMP1E +#define RTC_TAMPER_2 RTC_TAMPCR_TAMP2E +#define RTC_TAMPER_3 RTC_TAMPCR_TAMP3E +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Interrupt_Definitions RTCEx Tamper Interrupt Definitions + * @{ + */ +#define RTC_TAMPER1_INTERRUPT RTC_TAMPCR_TAMP1IE +#define RTC_TAMPER2_INTERRUPT RTC_TAMPCR_TAMP2IE +#define RTC_TAMPER3_INTERRUPT RTC_TAMPCR_TAMP3IE +#define RTC_ALL_TAMPER_INTERRUPT RTC_TAMPCR_TAMPIE +/** + * @} + */ + +/** @defgroup RTCEx_TimeStamp_Pin_Selection RTCEx TimeStamp Pin Selection + * @{ + */ +#define RTC_TIMESTAMPPIN_DEFAULT ((uint32_t)0x00000000U) +#define RTC_TIMESTAMPPIN_POS1 ((uint32_t)0x00000002U) +#define RTC_TIMESTAMPPIN_POS2 ((uint32_t)0x00000004U) +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Trigger_Definitions RTCEx Tamper Trigger Definitions + * @{ + */ +#define RTC_TAMPERTRIGGER_RISINGEDGE ((uint32_t)0x00000000U) +#define RTC_TAMPERTRIGGER_FALLINGEDGE ((uint32_t)0x00000002U) +#define RTC_TAMPERTRIGGER_LOWLEVEL RTC_TAMPERTRIGGER_RISINGEDGE +#define RTC_TAMPERTRIGGER_HIGHLEVEL RTC_TAMPERTRIGGER_FALLINGEDGE +/** + * @} + */ + + /** @defgroup RTCEx_Tamper_EraseBackUp_Definitions RTCEx Tamper EraseBackUp Definitions +* @{ +*/ +#define RTC_TAMPER_ERASE_BACKUP_ENABLE ((uint32_t)0x00000000U) +#define RTC_TAMPER_ERASE_BACKUP_DISABLE ((uint32_t)0x00020000U) +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_MaskFlag_Definitions RTCEx Tamper MaskFlag Definitions + * @{ + */ +#define RTC_TAMPERMASK_FLAG_DISABLE ((uint32_t)0x00000000U) +#define RTC_TAMPERMASK_FLAG_ENABLE ((uint32_t)0x00040000U) +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Filter_Definitions RTCEx Tamper Filter Definitions + * @{ + */ +#define RTC_TAMPERFILTER_DISABLE ((uint32_t)0x00000000U) /*!< Tamper filter is disabled */ + +#define RTC_TAMPERFILTER_2SAMPLE ((uint32_t)0x00000800U) /*!< Tamper is activated after 2 + consecutive samples at the active level */ +#define RTC_TAMPERFILTER_4SAMPLE ((uint32_t)0x00001000U) /*!< Tamper is activated after 4 + consecutive samples at the active level */ +#define RTC_TAMPERFILTER_8SAMPLE ((uint32_t)0x00001800U) /*!< Tamper is activated after 8 + consecutive samples at the active leve. */ +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Sampling_Frequencies_Definitions RTCEx Tamper Sampling Frequencies Definitions + * @{ + */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV32768 ((uint32_t)0x00000000U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 32768 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV16384 ((uint32_t)0x00000100U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 16384 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV8192 ((uint32_t)0x00000200U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 8192 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV4096 ((uint32_t)0x00000300U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 4096 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV2048 ((uint32_t)0x00000400U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 2048 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV1024 ((uint32_t)0x00000500U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 1024 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV512 ((uint32_t)0x00000600U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 512 */ +#define RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV256 ((uint32_t)0x00000700U) /*!< Each of the tamper inputs are sampled + with a frequency = RTCCLK / 256 */ +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Pin_Precharge_Duration_Definitions RTCEx Tamper Pin Precharge Duration Definitions + * @{ + */ +#define RTC_TAMPERPRECHARGEDURATION_1RTCCLK ((uint32_t)0x00000000U) /*!< Tamper pins are pre-charged before + sampling during 1 RTCCLK cycle */ +#define RTC_TAMPERPRECHARGEDURATION_2RTCCLK ((uint32_t)0x00002000U) /*!< Tamper pins are pre-charged before + sampling during 2 RTCCLK cycles */ +#define RTC_TAMPERPRECHARGEDURATION_4RTCCLK ((uint32_t)0x00004000U) /*!< Tamper pins are pre-charged before + sampling during 4 RTCCLK cycles */ +#define RTC_TAMPERPRECHARGEDURATION_8RTCCLK ((uint32_t)0x00006000U) /*!< Tamper pins are pre-charged before + sampling during 8 RTCCLK cycles */ +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_TimeStampOnTamperDetection_Definitions RTCEx Tamper TimeStampOnTamperDetection Definitions + * @{ + */ +#define RTC_TIMESTAMPONTAMPERDETECTION_ENABLE ((uint32_t)RTC_TAMPCR_TAMPTS) /*!< TimeStamp on Tamper Detection event saved */ +#define RTC_TIMESTAMPONTAMPERDETECTION_DISABLE ((uint32_t)0x00000000U) /*!< TimeStamp on Tamper Detection event is not saved */ +/** + * @} + */ + +/** @defgroup RTCEx_Tamper_Pull_UP_Definitions RTCEx Tamper Pull UP Definitions + * @{ + */ +#define RTC_TAMPER_PULLUP_ENABLE ((uint32_t)0x00000000U) /*!< TimeStamp on Tamper Detection event saved */ +#define RTC_TAMPER_PULLUP_DISABLE ((uint32_t)RTC_TAMPCR_TAMPPUDIS) /*!< TimeStamp on Tamper Detection event is not saved */ +/** + * @} + */ + +/** @defgroup RTCEx_Wakeup_Timer_Definitions RTCEx Wakeup Timer Definitions + * @{ + */ +#define RTC_WAKEUPCLOCK_RTCCLK_DIV16 ((uint32_t)0x00000000U) +#define RTC_WAKEUPCLOCK_RTCCLK_DIV8 ((uint32_t)0x00000001U) +#define RTC_WAKEUPCLOCK_RTCCLK_DIV4 ((uint32_t)0x00000002U) +#define RTC_WAKEUPCLOCK_RTCCLK_DIV2 ((uint32_t)0x00000003U) +#define RTC_WAKEUPCLOCK_CK_SPRE_16BITS ((uint32_t)0x00000004U) +#define RTC_WAKEUPCLOCK_CK_SPRE_17BITS ((uint32_t)0x00000006U) +/** + * @} + */ + +/** @defgroup RTCEx_Smooth_calib_period_Definitions RTCEx Smooth calib period Definitions + * @{ + */ +#define RTC_SMOOTHCALIB_PERIOD_32SEC ((uint32_t)0x00000000U) /*!< If RTCCLK = 32768 Hz, Smooth calibration + period is 32s, else 2exp20 RTCCLK seconds */ +#define RTC_SMOOTHCALIB_PERIOD_16SEC ((uint32_t)0x00002000U) /*!< If RTCCLK = 32768 Hz, Smooth calibration + period is 16s, else 2exp19 RTCCLK seconds */ +#define RTC_SMOOTHCALIB_PERIOD_8SEC ((uint32_t)0x00004000U) /*!< If RTCCLK = 32768 Hz, Smooth calibration + period is 8s, else 2exp18 RTCCLK seconds */ +/** + * @} + */ + +/** @defgroup RTCEx_Smooth_calib_Plus_pulses_Definitions RTCEx Smooth calib Plus pulses Definitions + * @{ + */ +#define RTC_SMOOTHCALIB_PLUSPULSES_SET ((uint32_t)0x00008000U) /*!< The number of RTCCLK pulses added + during a X -second window = Y - CALM[8:0] + with Y = 512, 256, 128 when X = 32, 16, 8 */ +#define RTC_SMOOTHCALIB_PLUSPULSES_RESET ((uint32_t)0x00000000U) /*!< The number of RTCCLK pulses subbstited + during a 32-second window = CALM[8:0] */ +/** + * @} + */ + +/** @defgroup RTCEx_Add_1_Second_Parameter_Definitions RTCEx Add 1 Second Parameter Definitions + * @{ + */ +#define RTC_SHIFTADD1S_RESET ((uint32_t)0x00000000U) +#define RTC_SHIFTADD1S_SET ((uint32_t)0x80000000U) +/** + * @} + */ + + /** @defgroup RTCEx_Calib_Output_selection_Definitions RTCEx Calib Output selection Definitions + * @{ + */ +#define RTC_CALIBOUTPUT_512HZ ((uint32_t)0x00000000U) +#define RTC_CALIBOUTPUT_1HZ ((uint32_t)0x00080000U) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup RTCEx_Exported_Macros RTCEx Exported Macros + * @{ + */ + +/** + * @brief Enable the RTC WakeUp Timer peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_WUTE)) + +/** + * @brief Disable the RTC WakeUp Timer peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_WUTE)) + +/** + * @brief Enable the RTC WakeUpTimer interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC WakeUpTimer interrupt sources to be enabled or disabled. + * This parameter can be: + * @arg RTC_IT_WUT: WakeUpTimer interrupt + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the RTC WakeUpTimer interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC WakeUpTimer interrupt sources to be enabled or disabled. + * This parameter can be: + * @arg RTC_IT_WUT: WakeUpTimer interrupt + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) + +/** + * @brief Check whether the specified RTC WakeUpTimer interrupt has occurred or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC WakeUpTimer interrupt sources to check. + * This parameter can be: + * @arg RTC_IT_WUT: WakeUpTimer interrupt + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_GET_IT(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__)>> 4)) != RESET) ? SET : RESET) + +/** + * @brief Check whether the specified RTC Wake Up timer interrupt has been enabled or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Wake Up timer interrupt sources to check. + * This parameter can be: + * @arg RTC_IT_WUT: WakeUpTimer interrupt + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->CR) & (__INTERRUPT__)) != RESET) ? SET : RESET) + +/** + * @brief Get the selected RTC WakeUpTimer's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC WakeUpTimer Flag is pending or not. + * This parameter can be: + * @arg RTC_FLAG_WUTF + * @arg RTC_FLAG_WUTWF + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET) ? SET : RESET) + +/** + * @brief Clear the RTC Wake Up timer's pending flags. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC WakeUpTimer Flag to clear. + * This parameter can be: + * @arg RTC_FLAG_WUTF + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR) = (~((__FLAG__) | RTC_ISR_INIT)|((__HANDLE__)->Instance->ISR & RTC_ISR_INIT)) + +/** + * @brief Enable the RTC Tamper1 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER1_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR |= (RTC_TAMPCR_TAMP1E)) + +/** + * @brief Disable the RTC Tamper1 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER1_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR &= ~(RTC_TAMPCR_TAMP1E)) + +/** + * @brief Enable the RTC Tamper2 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER2_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR |= (RTC_TAMPCR_TAMP2E)) + +/** + * @brief Disable the RTC Tamper2 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER2_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR &= ~(RTC_TAMPCR_TAMP2E)) + +/** + * @brief Enable the RTC Tamper3 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER3_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR |= (RTC_TAMPCR_TAMP3E)) + +/** + * @brief Disable the RTC Tamper3 input detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TAMPER3_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->TAMPCR &= ~(RTC_TAMPCR_TAMP3E)) + +/** + * @brief Check whether the specified RTC Tamper interrupt has occurred or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Tamper interrupt to check. + * This parameter can be: + * @arg RTC_IT_TAMP: All tampers interrupts + * @arg RTC_IT_TAMP1: Tamper1 interrupt + * @arg RTC_IT_TAMP2: Tamper2 interrupt + * @arg RTC_IT_TAMP3: Tamper3 interrupt + * @retval None + */ +#define __HAL_RTC_TAMPER_GET_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) == RTC_IT_TAMP1) ? (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__)>> 3)) != RESET) ? SET : RESET) : \ + ((__INTERRUPT__) == RTC_IT_TAMP2) ? (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__)>> 5)) != RESET) ? SET : RESET) : \ + (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__)>> 7)) != RESET) ? SET : RESET)) + +/** + * @brief Check whether the specified RTC Tamper interrupt has been enabled or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Tamper interrupt source to check. + * This parameter can be: + * @arg RTC_IT_TAMP: All tampers interrupts + * @arg RTC_IT_TAMP1: Tamper1 interrupt + * @arg RTC_IT_TAMP2: Tamper2 interrupt + * @arg RTC_IT_TAMP3: Tamper3 interrupt + * @retval None + */ +#define __HAL_RTC_TAMPER_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->TAMPCR) & (__INTERRUPT__)) != RESET) ? SET : RESET) + +/** + * @brief Get the selected RTC Tamper's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Tamper Flag is pending or not. + * This parameter can be: + * @arg RTC_FLAG_TAMP1F: Tamper1 flag + * @arg RTC_FLAG_TAMP2F: Tamper2 flag + * @arg RTC_FLAG_TAMP3F: Tamper3 flag + * @retval None + */ +#define __HAL_RTC_TAMPER_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET) ? SET : RESET) + +/** + * @brief Clear the RTC Tamper's pending flags. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Tamper Flag sources to clear. + * This parameter can be: + * @arg RTC_FLAG_TAMP1F: Tamper1 flag + * @arg RTC_FLAG_TAMP2F: Tamper2 flag + * @arg RTC_FLAG_TAMP3F: Tamper3 flag + * @retval None + */ +#define __HAL_RTC_TAMPER_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR) = (~((__FLAG__) | RTC_ISR_INIT)|((__HANDLE__)->Instance->ISR & RTC_ISR_INIT)) + +/** + * @brief Enable the RTC TimeStamp peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_TSE)) + +/** + * @brief Disable the RTC TimeStamp peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_TSE)) + +/** + * @brief Enable the RTC TimeStamp interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC TimeStamp interrupt sources to be enabled or disabled. + * This parameter can be: + * @arg RTC_IT_TS: TimeStamp interrupt + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the RTC TimeStamp interrupt. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC TimeStamp interrupt sources to be enabled or disabled. + * This parameter can be: + * @arg RTC_IT_TS: TimeStamp interrupt + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) + +/** + * @brief Check whether the specified RTC TimeStamp interrupt has occurred or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC TimeStamp interrupt sources to check. + * This parameter can be: + * @arg RTC_IT_TS: TimeStamp interrupt + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_GET_IT(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__)>> 4)) != RESET) ? SET : RESET) + +/** + * @brief Check whether the specified RTC Time Stamp interrupt has been enabled or not. + * @param __HANDLE__ specifies the RTC handle. + * @param __INTERRUPT__ specifies the RTC Time Stamp interrupt source to check. + * This parameter can be: + * @arg RTC_IT_TS: TimeStamp interrupt + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->CR) & (__INTERRUPT__)) != RESET) ? SET : RESET) + +/** + * @brief Get the selected RTC TimeStamp's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC TimeStamp Flag is pending or not. + * This parameter can be: + * @arg RTC_FLAG_TSF + * @arg RTC_FLAG_TSOVF + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET) ? SET : RESET) + +/** + * @brief Clear the RTC Time Stamp's pending flags. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Alarm Flag sources to clear. + * This parameter can be: + * @arg RTC_FLAG_TSF + * @arg RTC_FLAG_TSOVF + * @retval None + */ +#define __HAL_RTC_TIMESTAMP_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR) = (~((__FLAG__) | RTC_ISR_INIT)|((__HANDLE__)->Instance->ISR & RTC_ISR_INIT)) + +/** + * @brief Enable the RTC internal TimeStamp peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_INTERNAL_TIMESTAMP_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_ITSE)) + +/** + * @brief Disable the RTC internal TimeStamp peripheral. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_INTERNAL_TIMESTAMP_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_ITSE)) + +/** + * @brief Get the selected RTC Internal Time Stamp's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Internal Time Stamp Flag is pending or not. + * This parameter can be: + * @arg RTC_FLAG_ITSF + * @retval None + */ +#define __HAL_RTC_INTERNAL_TIMESTAMP_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET) ? SET : RESET) + +/** + * @brief Clear the RTC Internal Time Stamp's pending flags. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC Internal Time Stamp Flag source to clear. + * This parameter can be: + * @arg RTC_FLAG_ITSF + * @retval None + */ +#define __HAL_RTC_INTERNAL_TIMESTAMP_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ISR) = (~(((__FLAG__) | RTC_ISR_INIT)& 0x0003FFFF)|((__HANDLE__)->Instance->ISR & RTC_ISR_INIT)) + +/** + * @brief Enable the RTC calibration output. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_CALIBRATION_OUTPUT_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_COE)) + +/** + * @brief Disable the calibration output. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_CALIBRATION_OUTPUT_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_COE)) + +/** + * @brief Enable the clock reference detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_CLOCKREF_DETECTION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= (RTC_CR_REFCKON)) + +/** + * @brief Disable the clock reference detection. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_CLOCKREF_DETECTION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~(RTC_CR_REFCKON)) + +/** + * @brief Get the selected RTC shift operation's flag status. + * @param __HANDLE__ specifies the RTC handle. + * @param __FLAG__ specifies the RTC shift operation Flag is pending or not. + * This parameter can be: + * @arg RTC_FLAG_SHPF + * @retval None + */ +#define __HAL_RTC_SHIFT_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & (__FLAG__)) != RESET) ? SET : RESET) + +/** + * @brief Enable interrupt on the RTC WakeUp Timer associated Exti line. + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() (EXTI->IMR |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Disable interrupt on the RTC WakeUp Timer associated Exti line. + * @retval None + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() (EXTI->IMR &= ~(RTC_EXTI_LINE_WAKEUPTIMER_EVENT)) + +/** + * @brief Enable event on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_EVENT() (EXTI->EMR |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Disable event on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(RTC_EXTI_LINE_WAKEUPTIMER_EVENT)) + +/** + * @brief Enable falling edge trigger on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_FALLING_EDGE() (EXTI->FTSR |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Disable falling edge trigger on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_FALLING_EDGE() (EXTI->FTSR &= ~(RTC_EXTI_LINE_WAKEUPTIMER_EVENT)) + +/** + * @brief Enable rising edge trigger on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_RISING_EDGE() (EXTI->RTSR |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Disable rising edge trigger on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_RISING_EDGE() (EXTI->RTSR &= ~(RTC_EXTI_LINE_WAKEUPTIMER_EVENT)) + +/** + * @brief Enable rising & falling edge trigger on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_RISING_FALLING_EDGE() __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_RISING_EDGE();__HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_FALLING_EDGE(); + +/** + * @brief Disable rising & falling edge trigger on the RTC WakeUp Timer associated Exti line. + * This parameter can be: + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_RISING_FALLING_EDGE() __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_RISING_EDGE();__HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_FALLING_EDGE(); + +/** + * @brief Check whether the RTC WakeUp Timer associated Exti line interrupt flag is set or not. + * @retval Line Status. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() (EXTI->PR & RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Clear the RTC WakeUp Timer associated Exti line flag. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG() (EXTI->PR = RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Generate a Software interrupt on the RTC WakeUp Timer associated Exti line. + * @retval None. + */ +#define __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() (EXTI->SWIER |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT) + +/** + * @brief Enable interrupt on the RTC Tamper and Timestamp associated Exti line. + * @retval None + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT() (EXTI->IMR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Disable interrupt on the RTC Tamper and Timestamp associated Exti line. + * @retval None + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT() (EXTI->IMR &= ~(RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT)) + +/** + * @brief Enable event on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_EVENT() (EXTI->EMR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Disable event on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT)) + +/** + * @brief Enable falling edge trigger on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_FALLING_EDGE() (EXTI->FTSR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Disable falling edge trigger on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_FALLING_EDGE() (EXTI->FTSR &= ~(RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT)) + +/** + * @brief Enable rising edge trigger on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_RISING_EDGE() (EXTI->RTSR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Disable rising edge trigger on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_RISING_EDGE() (EXTI->RTSR &= ~(RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT)) + +/** + * @brief Enable rising & falling edge trigger on the RTC Tamper and Timestamp associated Exti line. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_RISING_FALLING_EDGE() __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_RISING_EDGE();__HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_FALLING_EDGE(); + +/** + * @brief Disable rising & falling edge trigger on the RTC Tamper and Timestamp associated Exti line. + * This parameter can be: + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_RISING_FALLING_EDGE() __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_RISING_EDGE();__HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_FALLING_EDGE(); + +/** + * @brief Check whether the RTC Tamper and Timestamp associated Exti line interrupt flag is set or not. + * @retval Line Status. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG() (EXTI->PR & RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Clear the RTC Tamper and Timestamp associated Exti line flag. + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG() (EXTI->PR = RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @brief Generate a Software interrupt on the RTC Tamper and Timestamp associated Exti line + * @retval None. + */ +#define __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT() (EXTI->SWIER |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup RTCEx_Exported_Functions RTCEx Exported Functions + * @{ + */ + +/** @addtogroup RTCEx_Exported_Functions_Group1 + * @{ + */ + +/* RTC TimeStamp and Tamper functions *****************************************/ +HAL_StatusTypeDef HAL_RTCEx_SetTimeStamp(RTC_HandleTypeDef *hrtc, uint32_t TimeStampEdge, uint32_t RTC_TimeStampPin); +HAL_StatusTypeDef HAL_RTCEx_SetTimeStamp_IT(RTC_HandleTypeDef *hrtc, uint32_t TimeStampEdge, uint32_t RTC_TimeStampPin); +HAL_StatusTypeDef HAL_RTCEx_DeactivateTimeStamp(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_SetInternalTimeStamp(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_DeactivateInternalTimeStamp(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_GetTimeStamp(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef *sTimeStamp, RTC_DateTypeDef *sTimeStampDate, uint32_t Format); + +HAL_StatusTypeDef HAL_RTCEx_SetTamper(RTC_HandleTypeDef *hrtc, RTC_TamperTypeDef* sTamper); +HAL_StatusTypeDef HAL_RTCEx_SetTamper_IT(RTC_HandleTypeDef *hrtc, RTC_TamperTypeDef* sTamper); +HAL_StatusTypeDef HAL_RTCEx_DeactivateTamper(RTC_HandleTypeDef *hrtc, uint32_t Tamper); +void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc); + +void HAL_RTCEx_Tamper1EventCallback(RTC_HandleTypeDef *hrtc); +void HAL_RTCEx_Tamper2EventCallback(RTC_HandleTypeDef *hrtc); +void HAL_RTCEx_Tamper3EventCallback(RTC_HandleTypeDef *hrtc); +void HAL_RTCEx_TimeStampEventCallback(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_PollForTimeStampEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +HAL_StatusTypeDef HAL_RTCEx_PollForTamper1Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +HAL_StatusTypeDef HAL_RTCEx_PollForTamper2Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +HAL_StatusTypeDef HAL_RTCEx_PollForTamper3Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +/** + * @} + */ + +/** @addtogroup RTCEx_Exported_Functions_Group2 + * @{ + */ +/* RTC Wake-up functions ******************************************************/ +HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer(RTC_HandleTypeDef *hrtc, uint32_t WakeUpCounter, uint32_t WakeUpClock); +HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer_IT(RTC_HandleTypeDef *hrtc, uint32_t WakeUpCounter, uint32_t WakeUpClock); +uint32_t HAL_RTCEx_DeactivateWakeUpTimer(RTC_HandleTypeDef *hrtc); +uint32_t HAL_RTCEx_GetWakeUpTimer(RTC_HandleTypeDef *hrtc); +void HAL_RTCEx_WakeUpTimerIRQHandler(RTC_HandleTypeDef *hrtc); +void HAL_RTCEx_WakeUpTimerEventCallback(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_PollForWakeUpTimerEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +/** + * @} + */ + +/** @addtogroup RTCEx_Exported_Functions_Group3 + * @{ + */ +/* Extension Control functions ************************************************/ +void HAL_RTCEx_BKUPWrite(RTC_HandleTypeDef *hrtc, uint32_t BackupRegister, uint32_t Data); +uint32_t HAL_RTCEx_BKUPRead(RTC_HandleTypeDef *hrtc, uint32_t BackupRegister); + +HAL_StatusTypeDef HAL_RTCEx_SetSmoothCalib(RTC_HandleTypeDef *hrtc, uint32_t SmoothCalibPeriod, uint32_t SmoothCalibPlusPulses, uint32_t SmouthCalibMinusPulsesValue); +HAL_StatusTypeDef HAL_RTCEx_SetSynchroShift(RTC_HandleTypeDef *hrtc, uint32_t ShiftAdd1S, uint32_t ShiftSubFS); +HAL_StatusTypeDef HAL_RTCEx_SetCalibrationOutPut(RTC_HandleTypeDef *hrtc, uint32_t CalibOutput); +HAL_StatusTypeDef HAL_RTCEx_DeactivateCalibrationOutPut(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_SetRefClock(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_DeactivateRefClock(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_EnableBypassShadow(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_DisableBypassShadow(RTC_HandleTypeDef *hrtc); +/** + * @} + */ + +/** @addtogroup RTCEx_Exported_Functions_Group4 + * @{ + */ +/* Extension RTC features functions *******************************************/ +void HAL_RTCEx_AlarmBEventCallback(RTC_HandleTypeDef *hrtc); +HAL_StatusTypeDef HAL_RTCEx_PollForAlarmBEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RTCEx_Private_Constants RTCEx Private Constants + * @{ + */ +#define RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT ((uint32_t)EXTI_IMR_IM21) /*!< External interrupt line 21 Connected to the RTC Tamper and Time Stamp events */ +#define RTC_EXTI_LINE_WAKEUPTIMER_EVENT ((uint32_t)EXTI_IMR_IM22) /*!< External interrupt line 22 Connected to the RTC Wake-up event */ +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RTCEx_Private_Constants RTCEx Private Constants + * @{ + */ +/* Masks Definition */ +#define RTC_TAMPCR_TAMPXE ((uint32_t) (RTC_TAMPCR_TAMP3E | RTC_TAMPCR_TAMP2E | RTC_TAMPCR_TAMP1E)) +#define RTC_TAMPCR_TAMPXIE ((uint32_t) (RTC_TAMPER1_INTERRUPT | RTC_TAMPER2_INTERRUPT | RTC_TAMPER3_INTERRUPT | RTC_ALL_TAMPER_INTERRUPT)) +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup RTCEx_Private_Macros RTCEx Private Macros + * @{ + */ + +/** @defgroup RTCEx_IS_RTC_Definitions Private macros to check input parameters + * @{ + */ +#define IS_RTC_OUTPUT(__OUTPUT__) (((__OUTPUT__) == RTC_OUTPUT_DISABLE) || \ + ((__OUTPUT__) == RTC_OUTPUT_ALARMA) || \ + ((__OUTPUT__) == RTC_OUTPUT_ALARMB) || \ + ((__OUTPUT__) == RTC_OUTPUT_WAKEUP)) +#define IS_RTC_BKP(__BKP__) ((__BKP__) < (uint32_t) RTC_BKP_NUMBER) +#define IS_TIMESTAMP_EDGE(__EDGE__) (((__EDGE__) == RTC_TIMESTAMPEDGE_RISING) || \ + ((__EDGE__) == RTC_TIMESTAMPEDGE_FALLING)) +#define IS_RTC_TAMPER(__TAMPER__) ((((__TAMPER__) & ((uint32_t)(0xFFFFFFFFU ^ RTC_TAMPCR_TAMPXE))) == 0x00U) && ((__TAMPER__) != (uint32_t)RESET)) + +#define IS_RTC_TAMPER_INTERRUPT(__INTERRUPT__) ((((__INTERRUPT__) & (uint32_t)(0xFFFFFFFFU ^ RTC_TAMPCR_TAMPXIE)) == 0x00U) && ((__INTERRUPT__) != (uint32_t)RESET)) + +#define IS_RTC_TIMESTAMP_PIN(__PIN__) (((__PIN__) == RTC_TIMESTAMPPIN_DEFAULT) || \ + ((__PIN__) == RTC_TIMESTAMPPIN_POS1) || \ + ((__PIN__) == RTC_TIMESTAMPPIN_POS2)) +#define IS_RTC_TAMPER_TRIGGER(__TRIGGER__) (((__TRIGGER__) == RTC_TAMPERTRIGGER_RISINGEDGE) || \ + ((__TRIGGER__) == RTC_TAMPERTRIGGER_FALLINGEDGE) || \ + ((__TRIGGER__) == RTC_TAMPERTRIGGER_LOWLEVEL) || \ + ((__TRIGGER__) == RTC_TAMPERTRIGGER_HIGHLEVEL)) +#define IS_RTC_TAMPER_ERASE_MODE(__MODE__) (((__MODE__) == RTC_TAMPER_ERASE_BACKUP_ENABLE) || \ + ((__MODE__) == RTC_TAMPER_ERASE_BACKUP_DISABLE)) +#define IS_RTC_TAMPER_MASKFLAG_STATE(__STATE__) (((__STATE__) == RTC_TAMPERMASK_FLAG_ENABLE) || \ + ((__STATE__) == RTC_TAMPERMASK_FLAG_DISABLE)) +#define IS_RTC_TAMPER_FILTER(__FILTER__) (((__FILTER__) == RTC_TAMPERFILTER_DISABLE) || \ + ((__FILTER__) == RTC_TAMPERFILTER_2SAMPLE) || \ + ((__FILTER__) == RTC_TAMPERFILTER_4SAMPLE) || \ + ((__FILTER__) == RTC_TAMPERFILTER_8SAMPLE)) +#define IS_RTC_TAMPER_SAMPLING_FREQ(__FREQ__) (((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV32768)|| \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV16384)|| \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV8192) || \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV4096) || \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV2048) || \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV1024) || \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV512) || \ + ((__FREQ__) == RTC_TAMPERSAMPLINGFREQ_RTCCLK_DIV256)) +#define IS_RTC_TAMPER_PRECHARGE_DURATION(__DURATION__) (((__DURATION__) == RTC_TAMPERPRECHARGEDURATION_1RTCCLK) || \ + ((__DURATION__) == RTC_TAMPERPRECHARGEDURATION_2RTCCLK) || \ + ((__DURATION__) == RTC_TAMPERPRECHARGEDURATION_4RTCCLK) || \ + ((__DURATION__) == RTC_TAMPERPRECHARGEDURATION_8RTCCLK)) +#define IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION(__DETECTION__) (((__DETECTION__) == RTC_TIMESTAMPONTAMPERDETECTION_ENABLE) || \ + ((__DETECTION__) == RTC_TIMESTAMPONTAMPERDETECTION_DISABLE)) +#define IS_RTC_TAMPER_PULLUP_STATE(__STATE__) (((__STATE__) == RTC_TAMPER_PULLUP_ENABLE) || \ + ((__STATE__) == RTC_TAMPER_PULLUP_DISABLE)) +#define IS_RTC_WAKEUP_CLOCK(__CLOCK__) (((__CLOCK__) == RTC_WAKEUPCLOCK_RTCCLK_DIV16) || \ + ((__CLOCK__) == RTC_WAKEUPCLOCK_RTCCLK_DIV8) || \ + ((__CLOCK__) == RTC_WAKEUPCLOCK_RTCCLK_DIV4) || \ + ((__CLOCK__) == RTC_WAKEUPCLOCK_RTCCLK_DIV2) || \ + ((__CLOCK__) == RTC_WAKEUPCLOCK_CK_SPRE_16BITS) || \ + ((__CLOCK__) == RTC_WAKEUPCLOCK_CK_SPRE_17BITS)) + +#define IS_RTC_WAKEUP_COUNTER(__COUNTER__) ((__COUNTER__) <= 0xFFFF) +#define IS_RTC_SMOOTH_CALIB_PERIOD(__PERIOD__) (((__PERIOD__) == RTC_SMOOTHCALIB_PERIOD_32SEC) || \ + ((__PERIOD__) == RTC_SMOOTHCALIB_PERIOD_16SEC) || \ + ((__PERIOD__) == RTC_SMOOTHCALIB_PERIOD_8SEC)) +#define IS_RTC_SMOOTH_CALIB_PLUS(__PLUS__) (((__PLUS__) == RTC_SMOOTHCALIB_PLUSPULSES_SET) || \ + ((__PLUS__) == RTC_SMOOTHCALIB_PLUSPULSES_RESET)) +#define IS_RTC_SMOOTH_CALIB_MINUS(__VALUE__) ((__VALUE__) <= 0x000001FF) +#define IS_RTC_SHIFT_ADD1S(__SEL__) (((__SEL__) == RTC_SHIFTADD1S_RESET) || \ + ((__SEL__) == RTC_SHIFTADD1S_SET)) +#define IS_RTC_SHIFT_SUBFS(__FS__) ((__FS__) <= 0x00007FFF) +#define IS_RTC_CALIB_OUTPUT(__OUTPUT__) (((__OUTPUT__) == RTC_CALIBOUTPUT_512HZ) || \ + ((__OUTPUT__) == RTC_CALIBOUTPUT_1HZ)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RTC_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sd.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sd.h new file mode 100644 index 000000000..305f83aa8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sd.h @@ -0,0 +1,761 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_sd.h + * @author MCD Application Team + * @brief Header file of SD HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_SD_H +#define STM32F7xx_HAL_SD_H + +#ifdef __cplusplus + extern "C" { +#endif + +#if defined(SDMMC1) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_ll_sdmmc.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup SD SD + * @brief SD HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup SD_Exported_Types SD Exported Types + * @{ + */ + +/** @defgroup SD_Exported_Types_Group1 SD State enumeration structure + * @{ + */ +typedef enum +{ + HAL_SD_STATE_RESET = 0x00000000U, /*!< SD not yet initialized or disabled */ + HAL_SD_STATE_READY = 0x00000001U, /*!< SD initialized and ready for use */ + HAL_SD_STATE_TIMEOUT = 0x00000002U, /*!< SD Timeout state */ + HAL_SD_STATE_BUSY = 0x00000003U, /*!< SD process ongoing */ + HAL_SD_STATE_PROGRAMMING = 0x00000004U, /*!< SD Programming State */ + HAL_SD_STATE_RECEIVING = 0x00000005U, /*!< SD Receiving State */ + HAL_SD_STATE_TRANSFER = 0x00000006U, /*!< SD Transfert State */ + HAL_SD_STATE_ERROR = 0x0000000FU /*!< SD is in error state */ +}HAL_SD_StateTypeDef; +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group2 SD Card State enumeration structure + * @{ + */ +typedef uint32_t HAL_SD_CardStateTypeDef; + +#define HAL_SD_CARD_READY 0x00000001U /*!< Card state is ready */ +#define HAL_SD_CARD_IDENTIFICATION 0x00000002U /*!< Card is in identification state */ +#define HAL_SD_CARD_STANDBY 0x00000003U /*!< Card is in standby state */ +#define HAL_SD_CARD_TRANSFER 0x00000004U /*!< Card is in transfer state */ +#define HAL_SD_CARD_SENDING 0x00000005U /*!< Card is sending an operation */ +#define HAL_SD_CARD_RECEIVING 0x00000006U /*!< Card is receiving operation information */ +#define HAL_SD_CARD_PROGRAMMING 0x00000007U /*!< Card is in programming state */ +#define HAL_SD_CARD_DISCONNECTED 0x00000008U /*!< Card is disconnected */ +#define HAL_SD_CARD_ERROR 0x000000FFU /*!< Card response Error */ +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group3 SD Handle Structure definition + * @{ + */ +#define SD_InitTypeDef SDMMC_InitTypeDef +#define SD_TypeDef SDMMC_TypeDef + +/** + * @brief SD Card Information Structure definition + */ +typedef struct +{ + uint32_t CardType; /*!< Specifies the card Type */ + + uint32_t CardVersion; /*!< Specifies the card version */ + + uint32_t Class; /*!< Specifies the class of the card class */ + + uint32_t RelCardAdd; /*!< Specifies the Relative Card Address */ + + uint32_t BlockNbr; /*!< Specifies the Card Capacity in blocks */ + + uint32_t BlockSize; /*!< Specifies one block size in bytes */ + + uint32_t LogBlockNbr; /*!< Specifies the Card logical Capacity in blocks */ + + uint32_t LogBlockSize; /*!< Specifies logical block size in bytes */ + +}HAL_SD_CardInfoTypeDef; + +/** + * @brief SD handle Structure definition + */ +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +typedef struct __SD_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ +{ + SD_TypeDef *Instance; /*!< SD registers base address */ + + SD_InitTypeDef Init; /*!< SD required parameters */ + + HAL_LockTypeDef Lock; /*!< SD locking object */ + + uint8_t *pTxBuffPtr; /*!< Pointer to SD Tx transfer Buffer */ + + uint32_t TxXferSize; /*!< SD Tx Transfer size */ + + uint8_t *pRxBuffPtr; /*!< Pointer to SD Rx transfer Buffer */ + + uint32_t RxXferSize; /*!< SD Rx Transfer size */ + + __IO uint32_t Context; /*!< SD transfer context */ + + __IO HAL_SD_StateTypeDef State; /*!< SD card State */ + + __IO uint32_t ErrorCode; /*!< SD Card Error codes */ + + DMA_HandleTypeDef *hdmatx; /*!< SD Tx DMA handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< SD Rx DMA handle parameters */ + + HAL_SD_CardInfoTypeDef SdCard; /*!< SD Card information */ + + uint32_t CSD[4]; /*!< SD card specific data table */ + + uint32_t CID[4]; /*!< SD card identification number table */ + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + void (* TxCpltCallback) (struct __SD_HandleTypeDef *hsd); + void (* RxCpltCallback) (struct __SD_HandleTypeDef *hsd); + void (* ErrorCallback) (struct __SD_HandleTypeDef *hsd); + void (* AbortCpltCallback) (struct __SD_HandleTypeDef *hsd); + + void (* MspInitCallback) (struct __SD_HandleTypeDef *hsd); + void (* MspDeInitCallback) (struct __SD_HandleTypeDef *hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ +}SD_HandleTypeDef; + +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group4 Card Specific Data: CSD Register + * @{ + */ +typedef struct +{ + __IO uint8_t CSDStruct; /*!< CSD structure */ + __IO uint8_t SysSpecVersion; /*!< System specification version */ + __IO uint8_t Reserved1; /*!< Reserved */ + __IO uint8_t TAAC; /*!< Data read access time 1 */ + __IO uint8_t NSAC; /*!< Data read access time 2 in CLK cycles */ + __IO uint8_t MaxBusClkFrec; /*!< Max. bus clock frequency */ + __IO uint16_t CardComdClasses; /*!< Card command classes */ + __IO uint8_t RdBlockLen; /*!< Max. read data block length */ + __IO uint8_t PartBlockRead; /*!< Partial blocks for read allowed */ + __IO uint8_t WrBlockMisalign; /*!< Write block misalignment */ + __IO uint8_t RdBlockMisalign; /*!< Read block misalignment */ + __IO uint8_t DSRImpl; /*!< DSR implemented */ + __IO uint8_t Reserved2; /*!< Reserved */ + __IO uint32_t DeviceSize; /*!< Device Size */ + __IO uint8_t MaxRdCurrentVDDMin; /*!< Max. read current @ VDD min */ + __IO uint8_t MaxRdCurrentVDDMax; /*!< Max. read current @ VDD max */ + __IO uint8_t MaxWrCurrentVDDMin; /*!< Max. write current @ VDD min */ + __IO uint8_t MaxWrCurrentVDDMax; /*!< Max. write current @ VDD max */ + __IO uint8_t DeviceSizeMul; /*!< Device size multiplier */ + __IO uint8_t EraseGrSize; /*!< Erase group size */ + __IO uint8_t EraseGrMul; /*!< Erase group size multiplier */ + __IO uint8_t WrProtectGrSize; /*!< Write protect group size */ + __IO uint8_t WrProtectGrEnable; /*!< Write protect group enable */ + __IO uint8_t ManDeflECC; /*!< Manufacturer default ECC */ + __IO uint8_t WrSpeedFact; /*!< Write speed factor */ + __IO uint8_t MaxWrBlockLen; /*!< Max. write data block length */ + __IO uint8_t WriteBlockPaPartial; /*!< Partial blocks for write allowed */ + __IO uint8_t Reserved3; /*!< Reserved */ + __IO uint8_t ContentProtectAppli; /*!< Content protection application */ + __IO uint8_t FileFormatGroup; /*!< File format group */ + __IO uint8_t CopyFlag; /*!< Copy flag (OTP) */ + __IO uint8_t PermWrProtect; /*!< Permanent write protection */ + __IO uint8_t TempWrProtect; /*!< Temporary write protection */ + __IO uint8_t FileFormat; /*!< File format */ + __IO uint8_t ECC; /*!< ECC code */ + __IO uint8_t CSD_CRC; /*!< CSD CRC */ + __IO uint8_t Reserved4; /*!< Always 1 */ +}HAL_SD_CardCSDTypeDef; +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group5 Card Identification Data: CID Register + * @{ + */ +typedef struct +{ + __IO uint8_t ManufacturerID; /*!< Manufacturer ID */ + __IO uint16_t OEM_AppliID; /*!< OEM/Application ID */ + __IO uint32_t ProdName1; /*!< Product Name part1 */ + __IO uint8_t ProdName2; /*!< Product Name part2 */ + __IO uint8_t ProdRev; /*!< Product Revision */ + __IO uint32_t ProdSN; /*!< Product Serial Number */ + __IO uint8_t Reserved1; /*!< Reserved1 */ + __IO uint16_t ManufactDate; /*!< Manufacturing Date */ + __IO uint8_t CID_CRC; /*!< CID CRC */ + __IO uint8_t Reserved2; /*!< Always 1 */ + +}HAL_SD_CardCIDTypeDef; +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group6 SD Card Status returned by ACMD13 + * @{ + */ +typedef struct +{ + __IO uint8_t DataBusWidth; /*!< Shows the currently defined data bus width */ + __IO uint8_t SecuredMode; /*!< Card is in secured mode of operation */ + __IO uint16_t CardType; /*!< Carries information about card type */ + __IO uint32_t ProtectedAreaSize; /*!< Carries information about the capacity of protected area */ + __IO uint8_t SpeedClass; /*!< Carries information about the speed class of the card */ + __IO uint8_t PerformanceMove; /*!< Carries information about the card's performance move */ + __IO uint8_t AllocationUnitSize; /*!< Carries information about the card's allocation unit size */ + __IO uint16_t EraseSize; /*!< Determines the number of AUs to be erased in one operation */ + __IO uint8_t EraseTimeout; /*!< Determines the timeout for any number of AU erase */ + __IO uint8_t EraseOffset; /*!< Carries information about the erase offset */ + +}HAL_SD_CardStatusTypeDef; +/** + * @} + */ + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +/** @defgroup SD_Exported_Types_Group7 SD Callback ID enumeration definition + * @{ + */ +typedef enum +{ + HAL_SD_TX_CPLT_CB_ID = 0x00U, /*!< SD Tx Complete Callback ID */ + HAL_SD_RX_CPLT_CB_ID = 0x01U, /*!< SD Rx Complete Callback ID */ + HAL_SD_ERROR_CB_ID = 0x02U, /*!< SD Error Callback ID */ + HAL_SD_ABORT_CB_ID = 0x03U, /*!< SD Abort Callback ID */ + + HAL_SD_MSP_INIT_CB_ID = 0x10U, /*!< SD MspInit Callback ID */ + HAL_SD_MSP_DEINIT_CB_ID = 0x11U /*!< SD MspDeInit Callback ID */ +}HAL_SD_CallbackIDTypeDef; +/** + * @} + */ + +/** @defgroup SD_Exported_Types_Group8 SD Callback pointer definition + * @{ + */ +typedef void (*pSD_CallbackTypeDef) (SD_HandleTypeDef *hsd); +/** + * @} + */ +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SD_Exported_Constants Exported Constants + * @{ + */ + +#define BLOCKSIZE 512U /*!< Block size is 512 bytes */ + +/** @defgroup SD_Exported_Constansts_Group1 SD Error status enumeration Structure definition + * @{ + */ +#define HAL_SD_ERROR_NONE SDMMC_ERROR_NONE /*!< No error */ +#define HAL_SD_ERROR_CMD_CRC_FAIL SDMMC_ERROR_CMD_CRC_FAIL /*!< Command response received (but CRC check failed) */ +#define HAL_SD_ERROR_DATA_CRC_FAIL SDMMC_ERROR_DATA_CRC_FAIL /*!< Data block sent/received (CRC check failed) */ +#define HAL_SD_ERROR_CMD_RSP_TIMEOUT SDMMC_ERROR_CMD_RSP_TIMEOUT /*!< Command response timeout */ +#define HAL_SD_ERROR_DATA_TIMEOUT SDMMC_ERROR_DATA_TIMEOUT /*!< Data timeout */ +#define HAL_SD_ERROR_TX_UNDERRUN SDMMC_ERROR_TX_UNDERRUN /*!< Transmit FIFO underrun */ +#define HAL_SD_ERROR_RX_OVERRUN SDMMC_ERROR_RX_OVERRUN /*!< Receive FIFO overrun */ +#define HAL_SD_ERROR_ADDR_MISALIGNED SDMMC_ERROR_ADDR_MISALIGNED /*!< Misaligned address */ +#define HAL_SD_ERROR_BLOCK_LEN_ERR SDMMC_ERROR_BLOCK_LEN_ERR /*!< Transferred block length is not allowed for the card or the + number of transferred bytes does not match the block length */ +#define HAL_SD_ERROR_ERASE_SEQ_ERR SDMMC_ERROR_ERASE_SEQ_ERR /*!< An error in the sequence of erase command occurs */ +#define HAL_SD_ERROR_BAD_ERASE_PARAM SDMMC_ERROR_BAD_ERASE_PARAM /*!< An invalid selection for erase groups */ +#define HAL_SD_ERROR_WRITE_PROT_VIOLATION SDMMC_ERROR_WRITE_PROT_VIOLATION /*!< Attempt to program a write protect block */ +#define HAL_SD_ERROR_LOCK_UNLOCK_FAILED SDMMC_ERROR_LOCK_UNLOCK_FAILED /*!< Sequence or password error has been detected in unlock + command or if there was an attempt to access a locked card */ +#define HAL_SD_ERROR_COM_CRC_FAILED SDMMC_ERROR_COM_CRC_FAILED /*!< CRC check of the previous command failed */ +#define HAL_SD_ERROR_ILLEGAL_CMD SDMMC_ERROR_ILLEGAL_CMD /*!< Command is not legal for the card state */ +#define HAL_SD_ERROR_CARD_ECC_FAILED SDMMC_ERROR_CARD_ECC_FAILED /*!< Card internal ECC was applied but failed to correct the data */ +#define HAL_SD_ERROR_CC_ERR SDMMC_ERROR_CC_ERR /*!< Internal card controller error */ +#define HAL_SD_ERROR_GENERAL_UNKNOWN_ERR SDMMC_ERROR_GENERAL_UNKNOWN_ERR /*!< General or unknown error */ +#define HAL_SD_ERROR_STREAM_READ_UNDERRUN SDMMC_ERROR_STREAM_READ_UNDERRUN /*!< The card could not sustain data reading in stream rmode */ +#define HAL_SD_ERROR_STREAM_WRITE_OVERRUN SDMMC_ERROR_STREAM_WRITE_OVERRUN /*!< The card could not sustain data programming in stream mode */ +#define HAL_SD_ERROR_CID_CSD_OVERWRITE SDMMC_ERROR_CID_CSD_OVERWRITE /*!< CID/CSD overwrite error */ +#define HAL_SD_ERROR_WP_ERASE_SKIP SDMMC_ERROR_WP_ERASE_SKIP /*!< Only partial address space was erased */ +#define HAL_SD_ERROR_CARD_ECC_DISABLED SDMMC_ERROR_CARD_ECC_DISABLED /*!< Command has been executed without using internal ECC */ +#define HAL_SD_ERROR_ERASE_RESET SDMMC_ERROR_ERASE_RESET /*!< Erase sequence was cleared before executing because an out + of erase sequence command was received */ +#define HAL_SD_ERROR_AKE_SEQ_ERR SDMMC_ERROR_AKE_SEQ_ERR /*!< Error in sequence of authentication */ +#define HAL_SD_ERROR_INVALID_VOLTRANGE SDMMC_ERROR_INVALID_VOLTRANGE /*!< Error in case of invalid voltage range */ +#define HAL_SD_ERROR_ADDR_OUT_OF_RANGE SDMMC_ERROR_ADDR_OUT_OF_RANGE /*!< Error when addressed block is out of range */ +#define HAL_SD_ERROR_REQUEST_NOT_APPLICABLE SDMMC_ERROR_REQUEST_NOT_APPLICABLE /*!< Error when command request is not applicable */ +#define HAL_SD_ERROR_PARAM SDMMC_ERROR_INVALID_PARAMETER /*!< the used parameter is not valid */ +#define HAL_SD_ERROR_UNSUPPORTED_FEATURE SDMMC_ERROR_UNSUPPORTED_FEATURE /*!< Error when feature is not insupported */ +#define HAL_SD_ERROR_BUSY SDMMC_ERROR_BUSY /*!< Error when transfer process is busy */ +#define HAL_SD_ERROR_DMA SDMMC_ERROR_DMA /*!< Error while DMA transfer */ +#define HAL_SD_ERROR_TIMEOUT SDMMC_ERROR_TIMEOUT /*!< Timeout error */ + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +#define HAL_SD_ERROR_INVALID_CALLBACK SDMMC_ERROR_INVALID_PARAMETER /*!< Invalid callback error */ +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup SD_Exported_Constansts_Group2 SD context enumeration + * @{ + */ +#define SD_CONTEXT_NONE 0x00000000U /*!< None */ +#define SD_CONTEXT_READ_SINGLE_BLOCK 0x00000001U /*!< Read single block operation */ +#define SD_CONTEXT_READ_MULTIPLE_BLOCK 0x00000002U /*!< Read multiple blocks operation */ +#define SD_CONTEXT_WRITE_SINGLE_BLOCK 0x00000010U /*!< Write single block operation */ +#define SD_CONTEXT_WRITE_MULTIPLE_BLOCK 0x00000020U /*!< Write multiple blocks operation */ +#define SD_CONTEXT_IT 0x00000008U /*!< Process in Interrupt mode */ +#define SD_CONTEXT_DMA 0x00000080U /*!< Process in DMA mode */ + +/** + * @} + */ + +/** @defgroup SD_Exported_Constansts_Group3 SD Supported Memory Cards + * @{ + */ +#define CARD_SDSC 0x00000000U /*!< SD Standard Capacity <2Go */ +#define CARD_SDHC_SDXC 0x00000001U /*!< SD High Capacity <32Go, SD Extended Capacity <2To */ +#define CARD_SECURED 0x00000003U + +/** + * @} + */ + +/** @defgroup SD_Exported_Constansts_Group4 SD Supported Version + * @{ + */ +#define CARD_V1_X 0x00000000U +#define CARD_V2_X 0x00000001U +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup SD_Exported_macros SD Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ +/** @brief Reset SD handle state. + * @param __HANDLE__ : SD handle. + * @retval None + */ +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +#define __HAL_SD_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_SD_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_SD_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_SD_STATE_RESET) +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + +/** + * @brief Enable the SD device. + * @retval None + */ +#define __HAL_SD_ENABLE(__HANDLE__) __SDMMC_ENABLE((__HANDLE__)->Instance) + +/** + * @brief Disable the SD device. + * @retval None + */ +#define __HAL_SD_DISABLE(__HANDLE__) __SDMMC_DISABLE((__HANDLE__)->Instance) + +/** + * @brief Enable the SDMMC DMA transfer. + * @retval None + */ +#define __HAL_SD_DMA_ENABLE(__HANDLE__) __SDMMC_DMA_ENABLE((__HANDLE__)->Instance) + +/** + * @brief Disable the SDMMC DMA transfer. + * @retval None + */ +#define __HAL_SD_DMA_DISABLE(__HANDLE__) __SDMMC_DMA_DISABLE((__HANDLE__)->Instance) + +/** + * @brief Enable the SD device interrupt. + * @param __HANDLE__: SD Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt sources to be enabled. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __HAL_SD_ENABLE_IT(__HANDLE__, __INTERRUPT__) __SDMMC_ENABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Disable the SD device interrupt. + * @param __HANDLE__: SD Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt sources to be disabled. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __HAL_SD_DISABLE_IT(__HANDLE__, __INTERRUPT__) __SDMMC_DISABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Check whether the specified SD flag is set or not. + * @param __HANDLE__: SD Handle + * @param __FLAG__: specifies the flag to check. + * This parameter can be one of the following values: + * @arg SDMMC_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDMMC_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDMMC_FLAG_CTIMEOUT: Command response timeout + * @arg SDMMC_FLAG_DTIMEOUT: Data timeout + * @arg SDMMC_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDMMC_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDMMC_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDMMC_FLAG_CMDSENT: Command sent (no response required) + * @arg SDMMC_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDMMC_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDMMC_FLAG_CMDACT: Command transfer in progress + * @arg SDMMC_FLAG_TXACT: Data transmit in progress + * @arg SDMMC_FLAG_RXACT: Data receive in progress + * @arg SDMMC_FLAG_TXFIFOHE: Transmit FIFO Half Empty + * @arg SDMMC_FLAG_RXFIFOHF: Receive FIFO Half Full + * @arg SDMMC_FLAG_TXFIFOF: Transmit FIFO full + * @arg SDMMC_FLAG_RXFIFOF: Receive FIFO full + * @arg SDMMC_FLAG_TXFIFOE: Transmit FIFO empty + * @arg SDMMC_FLAG_RXFIFOE: Receive FIFO empty + * @arg SDMMC_FLAG_TXDAVL: Data available in transmit FIFO + * @arg SDMMC_FLAG_RXDAVL: Data available in receive FIFO + * @arg SDMMC_FLAG_SDIOIT: SDIO interrupt received + * @retval The new state of SD FLAG (SET or RESET). + */ +#define __HAL_SD_GET_FLAG(__HANDLE__, __FLAG__) __SDMMC_GET_FLAG((__HANDLE__)->Instance, (__FLAG__)) + +/** + * @brief Clear the SD's pending flags. + * @param __HANDLE__: SD Handle + * @param __FLAG__: specifies the flag to clear. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDMMC_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDMMC_FLAG_CTIMEOUT: Command response timeout + * @arg SDMMC_FLAG_DTIMEOUT: Data timeout + * @arg SDMMC_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDMMC_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDMMC_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDMMC_FLAG_CMDSENT: Command sent (no response required) + * @arg SDMMC_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDMMC_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDMMC_FLAG_SDIOIT: SDIO interrupt received + * @retval None + */ +#define __HAL_SD_CLEAR_FLAG(__HANDLE__, __FLAG__) __SDMMC_CLEAR_FLAG((__HANDLE__)->Instance, (__FLAG__)) + +/** + * @brief Check whether the specified SD interrupt has occurred or not. + * @param __HANDLE__: SD Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt source to check. + * This parameter can be one of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval The new state of SD IT (SET or RESET). + */ +#define __HAL_SD_GET_IT(__HANDLE__, __INTERRUPT__) __SDMMC_GET_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Clear the SD's interrupt pending bits. + * @param __HANDLE__: SD Handle + * @param __INTERRUPT__: specifies the interrupt pending bit to clear. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __HAL_SD_CLEAR_IT(__HANDLE__, __INTERRUPT__) __SDMMC_CLEAR_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SD_Exported_Functions SD Exported Functions + * @{ + */ + +/** @defgroup SD_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_SD_Init(SD_HandleTypeDef *hsd); +HAL_StatusTypeDef HAL_SD_InitCard(SD_HandleTypeDef *hsd); +HAL_StatusTypeDef HAL_SD_DeInit (SD_HandleTypeDef *hsd); +void HAL_SD_MspInit(SD_HandleTypeDef *hsd); +void HAL_SD_MspDeInit(SD_HandleTypeDef *hsd); +/** + * @} + */ + +/** @defgroup SD_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_SD_ReadBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout); +HAL_StatusTypeDef HAL_SD_WriteBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout); +HAL_StatusTypeDef HAL_SD_Erase(SD_HandleTypeDef *hsd, uint32_t BlockStartAdd, uint32_t BlockEndAdd); +/* Non-Blocking mode: IT */ +HAL_StatusTypeDef HAL_SD_ReadBlocks_IT(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +HAL_StatusTypeDef HAL_SD_WriteBlocks_IT(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_SD_ReadBlocks_DMA(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +HAL_StatusTypeDef HAL_SD_WriteBlocks_DMA(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); + +void HAL_SD_IRQHandler(SD_HandleTypeDef *hsd); + +/* Callback in non blocking modes (DMA) */ +void HAL_SD_TxCpltCallback(SD_HandleTypeDef *hsd); +void HAL_SD_RxCpltCallback(SD_HandleTypeDef *hsd); +void HAL_SD_ErrorCallback(SD_HandleTypeDef *hsd); +void HAL_SD_AbortCallback(SD_HandleTypeDef *hsd); + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +/* SD callback registering/unregistering */ +HAL_StatusTypeDef HAL_SD_RegisterCallback (SD_HandleTypeDef *hsd, HAL_SD_CallbackIDTypeDef CallbackId, pSD_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_SD_UnRegisterCallback(SD_HandleTypeDef *hsd, HAL_SD_CallbackIDTypeDef CallbackId); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup SD_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ +HAL_StatusTypeDef HAL_SD_ConfigWideBusOperation(SD_HandleTypeDef *hsd, uint32_t WideMode); +/** + * @} + */ + +/** @defgroup SD_Exported_Functions_Group4 SD card related functions + * @{ + */ +HAL_StatusTypeDef HAL_SD_SendSDStatus(SD_HandleTypeDef *hsd, uint32_t *pSDstatus); +HAL_SD_CardStateTypeDef HAL_SD_GetCardState(SD_HandleTypeDef *hsd); +HAL_StatusTypeDef HAL_SD_GetCardCID(SD_HandleTypeDef *hsd, HAL_SD_CardCIDTypeDef *pCID); +HAL_StatusTypeDef HAL_SD_GetCardCSD(SD_HandleTypeDef *hsd, HAL_SD_CardCSDTypeDef *pCSD); +HAL_StatusTypeDef HAL_SD_GetCardStatus(SD_HandleTypeDef *hsd, HAL_SD_CardStatusTypeDef *pStatus); +HAL_StatusTypeDef HAL_SD_GetCardInfo(SD_HandleTypeDef *hsd, HAL_SD_CardInfoTypeDef *pCardInfo); +/** + * @} + */ + +/** @defgroup SD_Exported_Functions_Group5 Peripheral State and Errors functions + * @{ + */ +HAL_SD_StateTypeDef HAL_SD_GetState(SD_HandleTypeDef *hsd); +uint32_t HAL_SD_GetError(SD_HandleTypeDef *hsd); +/** + * @} + */ + +/** @defgroup SD_Exported_Functions_Group6 Perioheral Abort management + * @{ + */ +HAL_StatusTypeDef HAL_SD_Abort(SD_HandleTypeDef *hsd); +HAL_StatusTypeDef HAL_SD_Abort_IT(SD_HandleTypeDef *hsd); +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/** @defgroup SD_Private_Types SD Private Types + * @{ + */ + +/** + * @} + */ + +/* Private defines -----------------------------------------------------------*/ +/** @defgroup SD_Private_Defines SD Private Defines + * @{ + */ + +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @defgroup SD_Private_Variables SD Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup SD_Private_Constants SD Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup SD_Private_Macros SD Private Macros + * @{ + */ + +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup SD_Private_Functions_Prototypes SD Private Functions Prototypes + * @{ + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup SD_Private_Functions SD Private Functions + * @{ + */ + +/** + * @} + */ + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* SDMMC1 */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_SD_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sdram.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sdram.h new file mode 100644 index 000000000..334f31631 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_sdram.h @@ -0,0 +1,226 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_sdram.h + * @author MCD Application Team + * @brief Header file of SDRAM HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_SDRAM_H +#define __STM32F7xx_HAL_SDRAM_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_ll_fmc.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup SDRAM + * @{ + */ + +/* Exported typedef ----------------------------------------------------------*/ + +/** @defgroup SDRAM_Exported_Types SDRAM Exported Types + * @{ + */ + +/** + * @brief HAL SDRAM State structure definition + */ +typedef enum +{ + HAL_SDRAM_STATE_RESET = 0x00U, /*!< SDRAM not yet initialized or disabled */ + HAL_SDRAM_STATE_READY = 0x01U, /*!< SDRAM initialized and ready for use */ + HAL_SDRAM_STATE_BUSY = 0x02U, /*!< SDRAM internal process is ongoing */ + HAL_SDRAM_STATE_ERROR = 0x03U, /*!< SDRAM error state */ + HAL_SDRAM_STATE_WRITE_PROTECTED = 0x04U, /*!< SDRAM device write protected */ + HAL_SDRAM_STATE_PRECHARGED = 0x05U /*!< SDRAM device precharged */ + +}HAL_SDRAM_StateTypeDef; + +/** + * @brief SDRAM handle Structure definition + */ +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) +typedef struct __SDRAM_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_SDRAM_REGISTER_CALLBACKS */ +{ + FMC_SDRAM_TypeDef *Instance; /*!< Register base address */ + + FMC_SDRAM_InitTypeDef Init; /*!< SDRAM device configuration parameters */ + + __IO HAL_SDRAM_StateTypeDef State; /*!< SDRAM access state */ + + HAL_LockTypeDef Lock; /*!< SDRAM locking object */ + + DMA_HandleTypeDef *hdma; /*!< Pointer DMA handler */ + +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) + void (* MspInitCallback) ( struct __SDRAM_HandleTypeDef * hsdram); /*!< SDRAM Msp Init callback */ + void (* MspDeInitCallback) ( struct __SDRAM_HandleTypeDef * hsdram); /*!< SDRAM Msp DeInit callback */ + void (* RefreshErrorCallback) ( struct __SDRAM_HandleTypeDef * hsdram); /*!< SDRAM Refresh Error callback */ + void (* DmaXferCpltCallback) ( DMA_HandleTypeDef * hdma); /*!< SDRAM DMA Xfer Complete callback */ + void (* DmaXferErrorCallback) ( DMA_HandleTypeDef * hdma); /*!< SDRAM DMA Xfer Error callback */ +#endif +} SDRAM_HandleTypeDef; + +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) +/** + * @brief HAL SDRAM Callback ID enumeration definition + */ +typedef enum +{ + HAL_SDRAM_MSP_INIT_CB_ID = 0x00U, /*!< SDRAM MspInit Callback ID */ + HAL_SDRAM_MSP_DEINIT_CB_ID = 0x01U, /*!< SDRAM MspDeInit Callback ID */ + HAL_SDRAM_REFRESH_ERR_CB_ID = 0x02U, /*!< SDRAM Refresh Error Callback ID */ + HAL_SDRAM_DMA_XFER_CPLT_CB_ID = 0x03U, /*!< SDRAM DMA Xfer Complete Callback ID */ + HAL_SDRAM_DMA_XFER_ERR_CB_ID = 0x04U /*!< SDRAM DMA Xfer Error Callback ID */ +}HAL_SDRAM_CallbackIDTypeDef; + +/** + * @brief HAL SDRAM Callback pointer definition + */ +typedef void (*pSDRAM_CallbackTypeDef)(SDRAM_HandleTypeDef *hsdram); +typedef void (*pSDRAM_DmaCallbackTypeDef)(DMA_HandleTypeDef *hdma); +#endif +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/* Exported macro ------------------------------------------------------------*/ + +/** @defgroup SDRAM_Exported_Macros SDRAM Exported Macros + * @{ + */ + +/** @brief Reset SDRAM handle state + * @param __HANDLE__ specifies the SDRAM handle. + * @retval None + */ +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) +#define __HAL_SDRAM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_SDRAM_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_SDRAM_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_SDRAM_STATE_RESET) +#endif +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup SDRAM_Exported_Functions SDRAM Exported Functions + * @{ + */ + +/** @addtogroup SDRAM_Exported_Functions_Group1 + * @{ + */ + +/* Initialization/de-initialization functions *********************************/ +HAL_StatusTypeDef HAL_SDRAM_Init(SDRAM_HandleTypeDef *hsdram, FMC_SDRAM_TimingTypeDef *Timing); +HAL_StatusTypeDef HAL_SDRAM_DeInit(SDRAM_HandleTypeDef *hsdram); +void HAL_SDRAM_MspInit(SDRAM_HandleTypeDef *hsdram); +void HAL_SDRAM_MspDeInit(SDRAM_HandleTypeDef *hsdram); + +void HAL_SDRAM_IRQHandler(SDRAM_HandleTypeDef *hsdram); +void HAL_SDRAM_RefreshErrorCallback(SDRAM_HandleTypeDef *hsdram); +void HAL_SDRAM_DMA_XferCpltCallback(DMA_HandleTypeDef *hdma); +void HAL_SDRAM_DMA_XferErrorCallback(DMA_HandleTypeDef *hdma); + +/** + * @} + */ + +/** @addtogroup SDRAM_Exported_Functions_Group2 + * @{ + */ +/* I/O operation functions ****************************************************/ +HAL_StatusTypeDef HAL_SDRAM_Read_8b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint8_t *pDstBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Write_8b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint8_t *pSrcBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Read_16b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint16_t *pDstBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Write_16b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint16_t *pSrcBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Read_32b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pDstBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Write_32b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize); + +HAL_StatusTypeDef HAL_SDRAM_Read_DMA(SDRAM_HandleTypeDef *hsdram, uint32_t * pAddress, uint32_t *pDstBuffer, uint32_t BufferSize); +HAL_StatusTypeDef HAL_SDRAM_Write_DMA(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize); + +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) +/* SDRAM callback registering/unregistering */ +HAL_StatusTypeDef HAL_SDRAM_RegisterCallback(SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId, pSDRAM_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_SDRAM_UnRegisterCallback(SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId); +HAL_StatusTypeDef HAL_SDRAM_RegisterDmaCallback(SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId, pSDRAM_DmaCallbackTypeDef pCallback); +#endif + +/** + * @} + */ + +/** @addtogroup SDRAM_Exported_Functions_Group3 + * @{ + */ +/* SDRAM Control functions *****************************************************/ +HAL_StatusTypeDef HAL_SDRAM_WriteProtection_Enable(SDRAM_HandleTypeDef *hsdram); +HAL_StatusTypeDef HAL_SDRAM_WriteProtection_Disable(SDRAM_HandleTypeDef *hsdram); +HAL_StatusTypeDef HAL_SDRAM_SendCommand(SDRAM_HandleTypeDef *hsdram, FMC_SDRAM_CommandTypeDef *Command, uint32_t Timeout); +HAL_StatusTypeDef HAL_SDRAM_ProgramRefreshRate(SDRAM_HandleTypeDef *hsdram, uint32_t RefreshRate); +HAL_StatusTypeDef HAL_SDRAM_SetAutoRefreshNumber(SDRAM_HandleTypeDef *hsdram, uint32_t AutoRefreshNumber); +uint32_t HAL_SDRAM_GetModeStatus(SDRAM_HandleTypeDef *hsdram); + +/** + * @} + */ + +/** @addtogroup SDRAM_Exported_Functions_Group4 + * @{ + */ +/* SDRAM State functions ********************************************************/ +HAL_SDRAM_StateTypeDef HAL_SDRAM_GetState(SDRAM_HandleTypeDef *hsdram); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_SDRAM_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi.h new file mode 100644 index 000000000..4d5be75d8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi.h @@ -0,0 +1,846 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_spi.h + * @author MCD Application Team + * @brief Header file of SPI HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_SPI_H +#define STM32F7xx_HAL_SPI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup SPI + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup SPI_Exported_Types SPI Exported Types + * @{ + */ + +/** + * @brief SPI Configuration Structure definition + */ +typedef struct +{ + uint32_t Mode; /*!< Specifies the SPI operating mode. + This parameter can be a value of @ref SPI_Mode */ + + uint32_t Direction; /*!< Specifies the SPI bidirectional mode state. + This parameter can be a value of @ref SPI_Direction */ + + uint32_t DataSize; /*!< Specifies the SPI data size. + This parameter can be a value of @ref SPI_Data_Size */ + + uint32_t CLKPolarity; /*!< Specifies the serial clock steady state. + This parameter can be a value of @ref SPI_Clock_Polarity */ + + uint32_t CLKPhase; /*!< Specifies the clock active edge for the bit capture. + This parameter can be a value of @ref SPI_Clock_Phase */ + + uint32_t NSS; /*!< Specifies whether the NSS signal is managed by + hardware (NSS pin) or by software using the SSI bit. + This parameter can be a value of @ref SPI_Slave_Select_management */ + + uint32_t BaudRatePrescaler; /*!< Specifies the Baud Rate prescaler value which will be + used to configure the transmit and receive SCK clock. + This parameter can be a value of @ref SPI_BaudRate_Prescaler + @note The communication clock is derived from the master + clock. The slave clock does not need to be set. */ + + uint32_t FirstBit; /*!< Specifies whether data transfers start from MSB or LSB bit. + This parameter can be a value of @ref SPI_MSB_LSB_transmission */ + + uint32_t TIMode; /*!< Specifies if the TI mode is enabled or not. + This parameter can be a value of @ref SPI_TI_mode */ + + uint32_t CRCCalculation; /*!< Specifies if the CRC calculation is enabled or not. + This parameter can be a value of @ref SPI_CRC_Calculation */ + + uint32_t CRCPolynomial; /*!< Specifies the polynomial used for the CRC calculation. + This parameter must be an odd number between Min_Data = 1 and Max_Data = 65535 */ + + uint32_t CRCLength; /*!< Specifies the CRC Length used for the CRC calculation. + CRC Length is only used with Data8 and Data16, not other data size + This parameter can be a value of @ref SPI_CRC_length */ + + uint32_t NSSPMode; /*!< Specifies whether the NSSP signal is enabled or not . + This parameter can be a value of @ref SPI_NSSP_Mode + This mode is activated by the NSSP bit in the SPIx_CR2 register and + it takes effect only if the SPI interface is configured as Motorola SPI + master (FRF=0) with capture on the first edge (SPIx_CR1 CPHA = 0, + CPOL setting is ignored).. */ +} SPI_InitTypeDef; + +/** + * @brief HAL SPI State structure definition + */ +typedef enum +{ + HAL_SPI_STATE_RESET = 0x00U, /*!< Peripheral not Initialized */ + HAL_SPI_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_SPI_STATE_BUSY = 0x02U, /*!< an internal process is ongoing */ + HAL_SPI_STATE_BUSY_TX = 0x03U, /*!< Data Transmission process is ongoing */ + HAL_SPI_STATE_BUSY_RX = 0x04U, /*!< Data Reception process is ongoing */ + HAL_SPI_STATE_BUSY_TX_RX = 0x05U, /*!< Data Transmission and Reception process is ongoing */ + HAL_SPI_STATE_ERROR = 0x06U, /*!< SPI error state */ + HAL_SPI_STATE_ABORT = 0x07U /*!< SPI abort is ongoing */ +} HAL_SPI_StateTypeDef; + +/** + * @brief SPI handle Structure definition + */ +typedef struct __SPI_HandleTypeDef +{ + SPI_TypeDef *Instance; /*!< SPI registers base address */ + + SPI_InitTypeDef Init; /*!< SPI communication parameters */ + + uint8_t *pTxBuffPtr; /*!< Pointer to SPI Tx transfer Buffer */ + + uint16_t TxXferSize; /*!< SPI Tx Transfer size */ + + __IO uint16_t TxXferCount; /*!< SPI Tx Transfer Counter */ + + uint8_t *pRxBuffPtr; /*!< Pointer to SPI Rx transfer Buffer */ + + uint16_t RxXferSize; /*!< SPI Rx Transfer size */ + + __IO uint16_t RxXferCount; /*!< SPI Rx Transfer Counter */ + + uint32_t CRCSize; /*!< SPI CRC size used for the transfer */ + + void (*RxISR)(struct __SPI_HandleTypeDef *hspi); /*!< function pointer on Rx ISR */ + + void (*TxISR)(struct __SPI_HandleTypeDef *hspi); /*!< function pointer on Tx ISR */ + + DMA_HandleTypeDef *hdmatx; /*!< SPI Tx DMA Handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< SPI Rx DMA Handle parameters */ + + HAL_LockTypeDef Lock; /*!< Locking object */ + + __IO HAL_SPI_StateTypeDef State; /*!< SPI communication state */ + + __IO uint32_t ErrorCode; /*!< SPI Error code */ + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + void (* TxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Tx Completed callback */ + void (* RxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Rx Completed callback */ + void (* TxRxCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI TxRx Completed callback */ + void (* TxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Tx Half Completed callback */ + void (* RxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Rx Half Completed callback */ + void (* TxRxHalfCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI TxRx Half Completed callback */ + void (* ErrorCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Error callback */ + void (* AbortCpltCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Abort callback */ + void (* MspInitCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Msp Init callback */ + void (* MspDeInitCallback)(struct __SPI_HandleTypeDef *hspi); /*!< SPI Msp DeInit callback */ + +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} SPI_HandleTypeDef; + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +/** + * @brief HAL SPI Callback ID enumeration definition + */ +typedef enum +{ + HAL_SPI_TX_COMPLETE_CB_ID = 0x00U, /*!< SPI Tx Completed callback ID */ + HAL_SPI_RX_COMPLETE_CB_ID = 0x01U, /*!< SPI Rx Completed callback ID */ + HAL_SPI_TX_RX_COMPLETE_CB_ID = 0x02U, /*!< SPI TxRx Completed callback ID */ + HAL_SPI_TX_HALF_COMPLETE_CB_ID = 0x03U, /*!< SPI Tx Half Completed callback ID */ + HAL_SPI_RX_HALF_COMPLETE_CB_ID = 0x04U, /*!< SPI Rx Half Completed callback ID */ + HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID = 0x05U, /*!< SPI TxRx Half Completed callback ID */ + HAL_SPI_ERROR_CB_ID = 0x06U, /*!< SPI Error callback ID */ + HAL_SPI_ABORT_CB_ID = 0x07U, /*!< SPI Abort callback ID */ + HAL_SPI_MSPINIT_CB_ID = 0x08U, /*!< SPI Msp Init callback ID */ + HAL_SPI_MSPDEINIT_CB_ID = 0x09U /*!< SPI Msp DeInit callback ID */ + +} HAL_SPI_CallbackIDTypeDef; + +/** + * @brief HAL SPI Callback pointer definition + */ +typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to an SPI callback function */ + +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SPI_Exported_Constants SPI Exported Constants + * @{ + */ + +/** @defgroup SPI_Error_Code SPI Error Code + * @{ + */ +#define HAL_SPI_ERROR_NONE (0x00000000U) /*!< No error */ +#define HAL_SPI_ERROR_MODF (0x00000001U) /*!< MODF error */ +#define HAL_SPI_ERROR_CRC (0x00000002U) /*!< CRC error */ +#define HAL_SPI_ERROR_OVR (0x00000004U) /*!< OVR error */ +#define HAL_SPI_ERROR_FRE (0x00000008U) /*!< FRE error */ +#define HAL_SPI_ERROR_DMA (0x00000010U) /*!< DMA transfer error */ +#define HAL_SPI_ERROR_FLAG (0x00000020U) /*!< Error on RXNE/TXE/BSY/FTLVL/FRLVL Flag */ +#define HAL_SPI_ERROR_ABORT (0x00000040U) /*!< Error during SPI Abort procedure */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +#define HAL_SPI_ERROR_INVALID_CALLBACK (0x00000080U) /*!< Invalid Callback error */ +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup SPI_Mode SPI Mode + * @{ + */ +#define SPI_MODE_SLAVE (0x00000000U) +#define SPI_MODE_MASTER (SPI_CR1_MSTR | SPI_CR1_SSI) +/** + * @} + */ + +/** @defgroup SPI_Direction SPI Direction Mode + * @{ + */ +#define SPI_DIRECTION_2LINES (0x00000000U) +#define SPI_DIRECTION_2LINES_RXONLY SPI_CR1_RXONLY +#define SPI_DIRECTION_1LINE SPI_CR1_BIDIMODE +/** + * @} + */ + +/** @defgroup SPI_Data_Size SPI Data Size + * @{ + */ +#define SPI_DATASIZE_4BIT (0x00000300U) +#define SPI_DATASIZE_5BIT (0x00000400U) +#define SPI_DATASIZE_6BIT (0x00000500U) +#define SPI_DATASIZE_7BIT (0x00000600U) +#define SPI_DATASIZE_8BIT (0x00000700U) +#define SPI_DATASIZE_9BIT (0x00000800U) +#define SPI_DATASIZE_10BIT (0x00000900U) +#define SPI_DATASIZE_11BIT (0x00000A00U) +#define SPI_DATASIZE_12BIT (0x00000B00U) +#define SPI_DATASIZE_13BIT (0x00000C00U) +#define SPI_DATASIZE_14BIT (0x00000D00U) +#define SPI_DATASIZE_15BIT (0x00000E00U) +#define SPI_DATASIZE_16BIT (0x00000F00U) +/** + * @} + */ + +/** @defgroup SPI_Clock_Polarity SPI Clock Polarity + * @{ + */ +#define SPI_POLARITY_LOW (0x00000000U) +#define SPI_POLARITY_HIGH SPI_CR1_CPOL +/** + * @} + */ + +/** @defgroup SPI_Clock_Phase SPI Clock Phase + * @{ + */ +#define SPI_PHASE_1EDGE (0x00000000U) +#define SPI_PHASE_2EDGE SPI_CR1_CPHA +/** + * @} + */ + +/** @defgroup SPI_Slave_Select_management SPI Slave Select Management + * @{ + */ +#define SPI_NSS_SOFT SPI_CR1_SSM +#define SPI_NSS_HARD_INPUT (0x00000000U) +#define SPI_NSS_HARD_OUTPUT (SPI_CR2_SSOE << 16U) +/** + * @} + */ + +/** @defgroup SPI_NSSP_Mode SPI NSS Pulse Mode + * @{ + */ +#define SPI_NSS_PULSE_ENABLE SPI_CR2_NSSP +#define SPI_NSS_PULSE_DISABLE (0x00000000U) +/** + * @} + */ + +/** @defgroup SPI_BaudRate_Prescaler SPI BaudRate Prescaler + * @{ + */ +#define SPI_BAUDRATEPRESCALER_2 (0x00000000U) +#define SPI_BAUDRATEPRESCALER_4 (SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_8 (SPI_CR1_BR_1) +#define SPI_BAUDRATEPRESCALER_16 (SPI_CR1_BR_1 | SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_32 (SPI_CR1_BR_2) +#define SPI_BAUDRATEPRESCALER_64 (SPI_CR1_BR_2 | SPI_CR1_BR_0) +#define SPI_BAUDRATEPRESCALER_128 (SPI_CR1_BR_2 | SPI_CR1_BR_1) +#define SPI_BAUDRATEPRESCALER_256 (SPI_CR1_BR_2 | SPI_CR1_BR_1 | SPI_CR1_BR_0) +/** + * @} + */ + +/** @defgroup SPI_MSB_LSB_transmission SPI MSB LSB Transmission + * @{ + */ +#define SPI_FIRSTBIT_MSB (0x00000000U) +#define SPI_FIRSTBIT_LSB SPI_CR1_LSBFIRST +/** + * @} + */ + +/** @defgroup SPI_TI_mode SPI TI Mode + * @{ + */ +#define SPI_TIMODE_DISABLE (0x00000000U) +#define SPI_TIMODE_ENABLE SPI_CR2_FRF +/** + * @} + */ + +/** @defgroup SPI_CRC_Calculation SPI CRC Calculation + * @{ + */ +#define SPI_CRCCALCULATION_DISABLE (0x00000000U) +#define SPI_CRCCALCULATION_ENABLE SPI_CR1_CRCEN +/** + * @} + */ + +/** @defgroup SPI_CRC_length SPI CRC Length + * @{ + * This parameter can be one of the following values: + * SPI_CRC_LENGTH_DATASIZE: aligned with the data size + * SPI_CRC_LENGTH_8BIT : CRC 8bit + * SPI_CRC_LENGTH_16BIT : CRC 16bit + */ +#define SPI_CRC_LENGTH_DATASIZE (0x00000000U) +#define SPI_CRC_LENGTH_8BIT (0x00000001U) +#define SPI_CRC_LENGTH_16BIT (0x00000002U) +/** + * @} + */ + +/** @defgroup SPI_FIFO_reception_threshold SPI FIFO Reception Threshold + * @{ + * This parameter can be one of the following values: + * SPI_RXFIFO_THRESHOLD or SPI_RXFIFO_THRESHOLD_QF : + * RXNE event is generated if the FIFO + * level is greater or equal to 1/4(8-bits). + * SPI_RXFIFO_THRESHOLD_HF: RXNE event is generated if the FIFO + * level is greater or equal to 1/2(16 bits). */ +#define SPI_RXFIFO_THRESHOLD SPI_CR2_FRXTH +#define SPI_RXFIFO_THRESHOLD_QF SPI_CR2_FRXTH +#define SPI_RXFIFO_THRESHOLD_HF (0x00000000U) +/** + * @} + */ + +/** @defgroup SPI_Interrupt_definition SPI Interrupt Definition + * @{ + */ +#define SPI_IT_TXE SPI_CR2_TXEIE +#define SPI_IT_RXNE SPI_CR2_RXNEIE +#define SPI_IT_ERR SPI_CR2_ERRIE +/** + * @} + */ + +/** @defgroup SPI_Flags_definition SPI Flags Definition + * @{ + */ +#define SPI_FLAG_RXNE SPI_SR_RXNE /* SPI status flag: Rx buffer not empty flag */ +#define SPI_FLAG_TXE SPI_SR_TXE /* SPI status flag: Tx buffer empty flag */ +#define SPI_FLAG_BSY SPI_SR_BSY /* SPI status flag: Busy flag */ +#define SPI_FLAG_CRCERR SPI_SR_CRCERR /* SPI Error flag: CRC error flag */ +#define SPI_FLAG_MODF SPI_SR_MODF /* SPI Error flag: Mode fault flag */ +#define SPI_FLAG_OVR SPI_SR_OVR /* SPI Error flag: Overrun flag */ +#define SPI_FLAG_FRE SPI_SR_FRE /* SPI Error flag: TI mode frame format error flag */ +#define SPI_FLAG_FTLVL SPI_SR_FTLVL /* SPI fifo transmission level */ +#define SPI_FLAG_FRLVL SPI_SR_FRLVL /* SPI fifo reception level */ +#define SPI_FLAG_MASK (SPI_SR_RXNE | SPI_SR_TXE | SPI_SR_BSY | SPI_SR_CRCERR | SPI_SR_MODF | SPI_SR_OVR | SPI_SR_FRE | SPI_SR_FTLVL | SPI_SR_FRLVL) +/** + * @} + */ + +/** @defgroup SPI_transmission_fifo_status_level SPI Transmission FIFO Status Level + * @{ + */ +#define SPI_FTLVL_EMPTY (0x00000000U) +#define SPI_FTLVL_QUARTER_FULL (0x00000800U) +#define SPI_FTLVL_HALF_FULL (0x00001000U) +#define SPI_FTLVL_FULL (0x00001800U) + +/** + * @} + */ + +/** @defgroup SPI_reception_fifo_status_level SPI Reception FIFO Status Level + * @{ + */ +#define SPI_FRLVL_EMPTY (0x00000000U) +#define SPI_FRLVL_QUARTER_FULL (0x00000200U) +#define SPI_FRLVL_HALF_FULL (0x00000400U) +#define SPI_FRLVL_FULL (0x00000600U) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup SPI_Exported_Macros SPI Exported Macros + * @{ + */ + +/** @brief Reset SPI handle state. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +#define __HAL_SPI_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_SPI_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_SPI_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_SPI_STATE_RESET) +#endif + +/** @brief Enable the specified SPI interrupts. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the interrupt source to enable. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval None + */ +#define __HAL_SPI_ENABLE_IT(__HANDLE__, __INTERRUPT__) SET_BIT((__HANDLE__)->Instance->CR2, (__INTERRUPT__)) + +/** @brief Disable the specified SPI interrupts. + * @param __HANDLE__ specifies the SPI handle. + * This parameter can be SPIx where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the interrupt source to disable. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval None + */ +#define __HAL_SPI_DISABLE_IT(__HANDLE__, __INTERRUPT__) CLEAR_BIT((__HANDLE__)->Instance->CR2, (__INTERRUPT__)) + +/** @brief Check whether the specified SPI interrupt source is enabled or not. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __INTERRUPT__ specifies the SPI interrupt source to check. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval The new state of __IT__ (TRUE or FALSE). + */ +#define __HAL_SPI_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->CR2 & (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Check whether the specified SPI flag is set or not. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg SPI_FLAG_RXNE: Receive buffer not empty flag + * @arg SPI_FLAG_TXE: Transmit buffer empty flag + * @arg SPI_FLAG_CRCERR: CRC error flag + * @arg SPI_FLAG_MODF: Mode fault flag + * @arg SPI_FLAG_OVR: Overrun flag + * @arg SPI_FLAG_BSY: Busy flag + * @arg SPI_FLAG_FRE: Frame format error flag + * @arg SPI_FLAG_FTLVL: SPI fifo transmission level + * @arg SPI_FLAG_FRLVL: SPI fifo reception level + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_SPI_GET_FLAG(__HANDLE__, __FLAG__) ((((__HANDLE__)->Instance->SR) & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the SPI CRCERR pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_CRCERRFLAG(__HANDLE__) ((__HANDLE__)->Instance->SR = (uint16_t)(~SPI_FLAG_CRCERR)) + +/** @brief Clear the SPI MODF pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_MODFFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_modf = 0x00U; \ + tmpreg_modf = (__HANDLE__)->Instance->SR; \ + CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE); \ + UNUSED(tmpreg_modf); \ + } while(0U) + +/** @brief Clear the SPI OVR pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_OVRFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_ovr = 0x00U; \ + tmpreg_ovr = (__HANDLE__)->Instance->DR; \ + tmpreg_ovr = (__HANDLE__)->Instance->SR; \ + UNUSED(tmpreg_ovr); \ + } while(0U) + +/** @brief Clear the SPI FRE pending flag. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_CLEAR_FREFLAG(__HANDLE__) \ + do{ \ + __IO uint32_t tmpreg_fre = 0x00U; \ + tmpreg_fre = (__HANDLE__)->Instance->SR; \ + UNUSED(tmpreg_fre); \ + }while(0U) + +/** @brief Enable the SPI peripheral. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_ENABLE(__HANDLE__) SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE) + +/** @brief Disable the SPI peripheral. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define __HAL_SPI_DISABLE(__HANDLE__) CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_SPE) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup SPI_Private_Macros SPI Private Macros + * @{ + */ + +/** @brief Set the SPI transmit-only mode. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_1LINE_TX(__HANDLE__) SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_BIDIOE) + +/** @brief Set the SPI receive-only mode. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_1LINE_RX(__HANDLE__) CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_BIDIOE) + +/** @brief Reset the CRC calculation of the SPI. + * @param __HANDLE__ specifies the SPI Handle. + * This parameter can be SPI where x: 1, 2, or 3 to select the SPI peripheral. + * @retval None + */ +#define SPI_RESET_CRC(__HANDLE__) do{CLEAR_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_CRCEN);\ + SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_CRCEN);}while(0U) + +/** @brief Check whether the specified SPI flag is set or not. + * @param __SR__ copy of SPI SR regsiter. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg SPI_FLAG_RXNE: Receive buffer not empty flag + * @arg SPI_FLAG_TXE: Transmit buffer empty flag + * @arg SPI_FLAG_CRCERR: CRC error flag + * @arg SPI_FLAG_MODF: Mode fault flag + * @arg SPI_FLAG_OVR: Overrun flag + * @arg SPI_FLAG_BSY: Busy flag + * @arg SPI_FLAG_FRE: Frame format error flag + * @arg SPI_FLAG_FTLVL: SPI fifo transmission level + * @arg SPI_FLAG_FRLVL: SPI fifo reception level + * @retval SET or RESET. + */ +#define SPI_CHECK_FLAG(__SR__, __FLAG__) ((((__SR__) & ((__FLAG__) & SPI_FLAG_MASK)) == ((__FLAG__) & SPI_FLAG_MASK)) ? SET : RESET) + +/** @brief Check whether the specified SPI Interrupt is set or not. + * @param __CR2__ copy of SPI CR2 regsiter. + * @param __INTERRUPT__ specifies the SPI interrupt source to check. + * This parameter can be one of the following values: + * @arg SPI_IT_TXE: Tx buffer empty interrupt enable + * @arg SPI_IT_RXNE: RX buffer not empty interrupt enable + * @arg SPI_IT_ERR: Error interrupt enable + * @retval SET or RESET. + */ +#define SPI_CHECK_IT_SOURCE(__CR2__, __INTERRUPT__) ((((__CR2__) & (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Checks if SPI Mode parameter is in allowed range. + * @param __MODE__ specifies the SPI Mode. + * This parameter can be a value of @ref SPI_Mode + * @retval None + */ +#define IS_SPI_MODE(__MODE__) (((__MODE__) == SPI_MODE_SLAVE) || \ + ((__MODE__) == SPI_MODE_MASTER)) + +/** @brief Checks if SPI Direction Mode parameter is in allowed range. + * @param __MODE__ specifies the SPI Direction Mode. + * This parameter can be a value of @ref SPI_Direction + * @retval None + */ +#define IS_SPI_DIRECTION(__MODE__) (((__MODE__) == SPI_DIRECTION_2LINES) || \ + ((__MODE__) == SPI_DIRECTION_2LINES_RXONLY) || \ + ((__MODE__) == SPI_DIRECTION_1LINE)) + +/** @brief Checks if SPI Direction Mode parameter is 2 lines. + * @param __MODE__ specifies the SPI Direction Mode. + * @retval None + */ +#define IS_SPI_DIRECTION_2LINES(__MODE__) ((__MODE__) == SPI_DIRECTION_2LINES) + +/** @brief Checks if SPI Direction Mode parameter is 1 or 2 lines. + * @param __MODE__ specifies the SPI Direction Mode. + * @retval None + */ +#define IS_SPI_DIRECTION_2LINES_OR_1LINE(__MODE__) (((__MODE__) == SPI_DIRECTION_2LINES) || \ + ((__MODE__) == SPI_DIRECTION_1LINE)) + +/** @brief Checks if SPI Data Size parameter is in allowed range. + * @param __DATASIZE__ specifies the SPI Data Size. + * This parameter can be a value of @ref SPI_Data_Size + * @retval None + */ +#define IS_SPI_DATASIZE(__DATASIZE__) (((__DATASIZE__) == SPI_DATASIZE_16BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_15BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_14BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_13BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_12BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_11BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_10BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_9BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_8BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_7BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_6BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_5BIT) || \ + ((__DATASIZE__) == SPI_DATASIZE_4BIT)) + +/** @brief Checks if SPI Serial clock steady state parameter is in allowed range. + * @param __CPOL__ specifies the SPI serial clock steady state. + * This parameter can be a value of @ref SPI_Clock_Polarity + * @retval None + */ +#define IS_SPI_CPOL(__CPOL__) (((__CPOL__) == SPI_POLARITY_LOW) || \ + ((__CPOL__) == SPI_POLARITY_HIGH)) + +/** @brief Checks if SPI Clock Phase parameter is in allowed range. + * @param __CPHA__ specifies the SPI Clock Phase. + * This parameter can be a value of @ref SPI_Clock_Phase + * @retval None + */ +#define IS_SPI_CPHA(__CPHA__) (((__CPHA__) == SPI_PHASE_1EDGE) || \ + ((__CPHA__) == SPI_PHASE_2EDGE)) + +/** @brief Checks if SPI Slave Select parameter is in allowed range. + * @param __NSS__ specifies the SPI Slave Select management parameter. + * This parameter can be a value of @ref SPI_Slave_Select_management + * @retval None + */ +#define IS_SPI_NSS(__NSS__) (((__NSS__) == SPI_NSS_SOFT) || \ + ((__NSS__) == SPI_NSS_HARD_INPUT) || \ + ((__NSS__) == SPI_NSS_HARD_OUTPUT)) + +/** @brief Checks if SPI NSS Pulse parameter is in allowed range. + * @param __NSSP__ specifies the SPI NSS Pulse Mode parameter. + * This parameter can be a value of @ref SPI_NSSP_Mode + * @retval None + */ +#define IS_SPI_NSSP(__NSSP__) (((__NSSP__) == SPI_NSS_PULSE_ENABLE) || \ + ((__NSSP__) == SPI_NSS_PULSE_DISABLE)) + +/** @brief Checks if SPI Baudrate prescaler parameter is in allowed range. + * @param __PRESCALER__ specifies the SPI Baudrate prescaler. + * This parameter can be a value of @ref SPI_BaudRate_Prescaler + * @retval None + */ +#define IS_SPI_BAUDRATE_PRESCALER(__PRESCALER__) (((__PRESCALER__) == SPI_BAUDRATEPRESCALER_2) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_4) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_8) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_16) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_32) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_64) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_128) || \ + ((__PRESCALER__) == SPI_BAUDRATEPRESCALER_256)) + +/** @brief Checks if SPI MSB LSB transmission parameter is in allowed range. + * @param __BIT__ specifies the SPI MSB LSB transmission (whether data transfer starts from MSB or LSB bit). + * This parameter can be a value of @ref SPI_MSB_LSB_transmission + * @retval None + */ +#define IS_SPI_FIRST_BIT(__BIT__) (((__BIT__) == SPI_FIRSTBIT_MSB) || \ + ((__BIT__) == SPI_FIRSTBIT_LSB)) + +/** @brief Checks if SPI TI mode parameter is in allowed range. + * @param __MODE__ specifies the SPI TI mode. + * This parameter can be a value of @ref SPI_TI_mode + * @retval None + */ +#define IS_SPI_TIMODE(__MODE__) (((__MODE__) == SPI_TIMODE_DISABLE) || \ + ((__MODE__) == SPI_TIMODE_ENABLE)) + +/** @brief Checks if SPI CRC calculation enabled state is in allowed range. + * @param __CALCULATION__ specifies the SPI CRC calculation enable state. + * This parameter can be a value of @ref SPI_CRC_Calculation + * @retval None + */ +#define IS_SPI_CRC_CALCULATION(__CALCULATION__) (((__CALCULATION__) == SPI_CRCCALCULATION_DISABLE) || \ + ((__CALCULATION__) == SPI_CRCCALCULATION_ENABLE)) + +/** @brief Checks if SPI CRC length is in allowed range. + * @param __LENGTH__ specifies the SPI CRC length. + * This parameter can be a value of @ref SPI_CRC_length + * @retval None + */ +#define IS_SPI_CRC_LENGTH(__LENGTH__) (((__LENGTH__) == SPI_CRC_LENGTH_DATASIZE) ||\ + ((__LENGTH__) == SPI_CRC_LENGTH_8BIT) || \ + ((__LENGTH__) == SPI_CRC_LENGTH_16BIT)) + +/** @brief Checks if SPI polynomial value to be used for the CRC calculation, is in allowed range. + * @param __POLYNOMIAL__ specifies the SPI polynomial value to be used for the CRC calculation. + * This parameter must be a number between Min_Data = 0 and Max_Data = 65535 + * @retval None + */ +#define IS_SPI_CRC_POLYNOMIAL(__POLYNOMIAL__) (((__POLYNOMIAL__) >= 0x1U) && ((__POLYNOMIAL__) <= 0xFFFFU) && (((__POLYNOMIAL__)&0x1U) != 0U)) + +/** @brief Checks if DMA handle is valid. + * @param __HANDLE__ specifies a DMA Handle. + * @retval None + */ +#define IS_SPI_DMA_HANDLE(__HANDLE__) ((__HANDLE__) != NULL) + +/** + * @} + */ + +/* Include SPI HAL Extended module */ +#include "stm32f7xx_hal_spi_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup SPI_Exported_Functions + * @{ + */ + +/** @addtogroup SPI_Exported_Functions_Group1 + * @{ + */ +/* Initialization/de-initialization functions ********************************/ +HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DeInit(SPI_HandleTypeDef *hspi); +void HAL_SPI_MspInit(SPI_HandleTypeDef *hspi); +void HAL_SPI_MspDeInit(SPI_HandleTypeDef *hspi); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, pSPI_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup SPI_Exported_Functions_Group2 + * @{ + */ +/* I/O operation functions ***************************************************/ +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size, + uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size); +HAL_StatusTypeDef HAL_SPI_DMAPause(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DMAResume(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_DMAStop(SPI_HandleTypeDef *hspi); +/* Transfer Abort functions */ +HAL_StatusTypeDef HAL_SPI_Abort(SPI_HandleTypeDef *hspi); +HAL_StatusTypeDef HAL_SPI_Abort_IT(SPI_HandleTypeDef *hspi); + +void HAL_SPI_IRQHandler(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxRxCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_RxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_TxRxHalfCpltCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_ErrorCallback(SPI_HandleTypeDef *hspi); +void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi); +/** + * @} + */ + +/** @addtogroup SPI_Exported_Functions_Group3 + * @{ + */ +/* Peripheral State and Error functions ***************************************/ +HAL_SPI_StateTypeDef HAL_SPI_GetState(SPI_HandleTypeDef *hspi); +uint32_t HAL_SPI_GetError(SPI_HandleTypeDef *hspi); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_SPI_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi_ex.h new file mode 100644 index 000000000..63a0153a7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_spi_ex.h @@ -0,0 +1,75 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_spi_ex.h + * @author MCD Application Team + * @brief Header file of SPI HAL Extended module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_SPI_EX_H +#define STM32F7xx_HAL_SPI_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup SPIEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/* Exported macros -----------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup SPIEx_Exported_Functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +/* IO operation functions *****************************************************/ +/** @addtogroup SPIEx_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_SPIEx_FlushRxFifo(SPI_HandleTypeDef *hspi); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_SPI_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h new file mode 100644 index 000000000..c77e99c3d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h @@ -0,0 +1,2272 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim.h + * @author MCD Application Team + * @brief Header file of TIM HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_TIM_H +#define STM32F7xx_HAL_TIM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIM + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIM_Exported_Types TIM Exported Types + * @{ + */ + +/** + * @brief TIM Time base Configuration Structure definition + */ +typedef struct +{ + uint32_t Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t CounterMode; /*!< Specifies the counter mode. + This parameter can be a value of @ref TIM_Counter_Mode */ + + uint32_t Period; /*!< Specifies the period value to be loaded into the active + Auto-Reload Register at the next update event. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. */ + + uint32_t ClockDivision; /*!< Specifies the clock division. + This parameter can be a value of @ref TIM_ClockDivision */ + + uint32_t RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter + reaches zero, an update event is generated and counting restarts + from the RCR value (N). + This means in PWM mode that (N+1) corresponds to: + - the number of PWM periods in edge-aligned mode + - the number of half PWM period in center-aligned mode + GP timers: this parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. */ + + uint32_t AutoReloadPreload; /*!< Specifies the auto-reload preload. + This parameter can be a value of @ref TIM_AutoReloadPreload */ +} TIM_Base_InitTypeDef; + +/** + * @brief TIM Output Compare Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCFastMode; /*!< Specifies the Fast mode state. + This parameter can be a value of @ref TIM_Output_Fast_State + @note This parameter is valid only in PWM1 and PWM2 mode. */ + + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ +} TIM_OC_InitTypeDef; + +/** + * @brief TIM One Pulse Mode Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_OnePulse_InitTypeDef; + +/** + * @brief TIM Input Capture Configuration Structure definition + */ +typedef struct +{ + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICPrescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_IC_InitTypeDef; + +/** + * @brief TIM Encoder Configuration Structure definition + */ +typedef struct +{ + uint32_t EncoderMode; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Mode */ + + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC1Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t IC2Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC2Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC2Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC2Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_Encoder_InitTypeDef; + +/** + * @brief Clock Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClockSource; /*!< TIM clock sources + This parameter can be a value of @ref TIM_Clock_Source */ + uint32_t ClockPolarity; /*!< TIM clock polarity + This parameter can be a value of @ref TIM_Clock_Polarity */ + uint32_t ClockPrescaler; /*!< TIM clock prescaler + This parameter can be a value of @ref TIM_Clock_Prescaler */ + uint32_t ClockFilter; /*!< TIM clock filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClockConfigTypeDef; + +/** + * @brief TIM Clear Input Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClearInputState; /*!< TIM clear Input state + This parameter can be ENABLE or DISABLE */ + uint32_t ClearInputSource; /*!< TIM clear Input sources + This parameter can be a value of @ref TIM_ClearInput_Source */ + uint32_t ClearInputPolarity; /*!< TIM Clear Input polarity + This parameter can be a value of @ref TIM_ClearInput_Polarity */ + uint32_t ClearInputPrescaler; /*!< TIM Clear Input prescaler + This parameter must be 0: When OCRef clear feature is used with ETR source, ETR prescaler must be off */ + uint32_t ClearInputFilter; /*!< TIM Clear Input filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClearInputConfigTypeDef; + +/** + * @brief TIM Master configuration Structure definition + * @note Advanced timers provide TRGO2 internal line which is redirected + * to the ADC + */ +typedef struct +{ + uint32_t MasterOutputTrigger; /*!< Trigger output (TRGO) selection + This parameter can be a value of @ref TIM_Master_Mode_Selection */ + uint32_t MasterOutputTrigger2; /*!< Trigger output2 (TRGO2) selection + This parameter can be a value of @ref TIM_Master_Mode_Selection_2 */ + uint32_t MasterSlaveMode; /*!< Master/slave mode selection + This parameter can be a value of @ref TIM_Master_Slave_Mode + @note When the Master/slave mode is enabled, the effect of + an event on the trigger input (TRGI) is delayed to allow a + perfect synchronization between the current timer and its + slaves (through TRGO). It is not mandatory in case of timer + synchronization mode. */ +} TIM_MasterConfigTypeDef; + +/** + * @brief TIM Slave configuration Structure definition + */ +typedef struct +{ + uint32_t SlaveMode; /*!< Slave mode selection + This parameter can be a value of @ref TIM_Slave_Mode */ + uint32_t InputTrigger; /*!< Input Trigger source + This parameter can be a value of @ref TIM_Trigger_Selection */ + uint32_t TriggerPolarity; /*!< Input Trigger polarity + This parameter can be a value of @ref TIM_Trigger_Polarity */ + uint32_t TriggerPrescaler; /*!< Input trigger prescaler + This parameter can be a value of @ref TIM_Trigger_Prescaler */ + uint32_t TriggerFilter; /*!< Input trigger filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + +} TIM_SlaveConfigTypeDef; + +/** + * @brief TIM Break input(s) and Dead time configuration Structure definition + * @note 2 break inputs can be configured (BKIN and BKIN2) with configurable + * filter and polarity. + */ +typedef struct +{ + uint32_t OffStateRunMode; /*!< TIM off state in run mode + This parameter can be a value of @ref TIM_OSSR_Off_State_Selection_for_Run_mode_state */ + uint32_t OffStateIDLEMode; /*!< TIM off state in IDLE mode + This parameter can be a value of @ref TIM_OSSI_Off_State_Selection_for_Idle_mode_state */ + uint32_t LockLevel; /*!< TIM Lock level + This parameter can be a value of @ref TIM_Lock_level */ + uint32_t DeadTime; /*!< TIM dead Time + This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + uint32_t BreakState; /*!< TIM Break State + This parameter can be a value of @ref TIM_Break_Input_enable_disable */ + uint32_t BreakPolarity; /*!< TIM Break input polarity + This parameter can be a value of @ref TIM_Break_Polarity */ + uint32_t BreakFilter; /*!< Specifies the break input filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + uint32_t Break2State; /*!< TIM Break2 State + This parameter can be a value of @ref TIM_Break2_Input_enable_disable */ + uint32_t Break2Polarity; /*!< TIM Break2 input polarity + This parameter can be a value of @ref TIM_Break2_Polarity */ + uint32_t Break2Filter; /*!< TIM break2 input filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + uint32_t AutomaticOutput; /*!< TIM Automatic Output Enable state + This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ +} TIM_BreakDeadTimeConfigTypeDef; + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_TIM_STATE_RESET = 0x00U, /*!< Peripheral not yet initialized or disabled */ + HAL_TIM_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_TIM_STATE_BUSY = 0x02U, /*!< An internal process is ongoing */ + HAL_TIM_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_TIM_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ +} HAL_TIM_StateTypeDef; + +/** + * @brief HAL Active channel structures definition + */ +typedef enum +{ + HAL_TIM_ACTIVE_CHANNEL_1 = 0x01U, /*!< The active channel is 1 */ + HAL_TIM_ACTIVE_CHANNEL_2 = 0x02U, /*!< The active channel is 2 */ + HAL_TIM_ACTIVE_CHANNEL_3 = 0x04U, /*!< The active channel is 3 */ + HAL_TIM_ACTIVE_CHANNEL_4 = 0x08U, /*!< The active channel is 4 */ + HAL_TIM_ACTIVE_CHANNEL_5 = 0x10U, /*!< The active channel is 5 */ + HAL_TIM_ACTIVE_CHANNEL_6 = 0x20U, /*!< The active channel is 6 */ + HAL_TIM_ACTIVE_CHANNEL_CLEARED = 0x00U /*!< All active channels cleared */ +} HAL_TIM_ActiveChannel; + +/** + * @brief TIM Time Base Handle Structure definition + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +typedef struct __TIM_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +{ + TIM_TypeDef *Instance; /*!< Register base address */ + TIM_Base_InitTypeDef Init; /*!< TIM Time Base required parameters */ + HAL_TIM_ActiveChannel Channel; /*!< Active channel */ + DMA_HandleTypeDef *hdma[7]; /*!< DMA Handlers array + This array is accessed by a @ref DMA_Handle_index */ + HAL_LockTypeDef Lock; /*!< Locking object */ + __IO HAL_TIM_StateTypeDef State; /*!< TIM operation state */ + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + void (* Base_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp Init Callback */ + void (* Base_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp DeInit Callback */ + void (* IC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp Init Callback */ + void (* IC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp DeInit Callback */ + void (* OC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp Init Callback */ + void (* OC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp DeInit Callback */ + void (* PWM_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp Init Callback */ + void (* PWM_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp DeInit Callback */ + void (* OnePulse_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp Init Callback */ + void (* OnePulse_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp DeInit Callback */ + void (* Encoder_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp Init Callback */ + void (* Encoder_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp DeInit Callback */ + void (* HallSensor_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp Init Callback */ + void (* HallSensor_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp DeInit Callback */ + void (* PeriodElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed Callback */ + void (* PeriodElapsedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed half complete Callback */ + void (* TriggerCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger Callback */ + void (* TriggerHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger half complete Callback */ + void (* IC_CaptureCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture Callback */ + void (* IC_CaptureHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture half complete Callback */ + void (* OC_DelayElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Output Compare Delay Elapsed Callback */ + void (* PWM_PulseFinishedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished Callback */ + void (* PWM_PulseFinishedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished half complete Callback */ + void (* ErrorCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Error Callback */ + void (* CommutationCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation Callback */ + void (* CommutationHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation half complete Callback */ + void (* BreakCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Break Callback */ + void (* Break2Callback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Break2 Callback */ +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} TIM_HandleTypeDef; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief HAL TIM Callback ID enumeration definition + */ +typedef enum +{ + HAL_TIM_BASE_MSPINIT_CB_ID = 0x00U /*!< TIM Base MspInit Callback ID */ + ,HAL_TIM_BASE_MSPDEINIT_CB_ID = 0x01U /*!< TIM Base MspDeInit Callback ID */ + ,HAL_TIM_IC_MSPINIT_CB_ID = 0x02U /*!< TIM IC MspInit Callback ID */ + ,HAL_TIM_IC_MSPDEINIT_CB_ID = 0x03U /*!< TIM IC MspDeInit Callback ID */ + ,HAL_TIM_OC_MSPINIT_CB_ID = 0x04U /*!< TIM OC MspInit Callback ID */ + ,HAL_TIM_OC_MSPDEINIT_CB_ID = 0x05U /*!< TIM OC MspDeInit Callback ID */ + ,HAL_TIM_PWM_MSPINIT_CB_ID = 0x06U /*!< TIM PWM MspInit Callback ID */ + ,HAL_TIM_PWM_MSPDEINIT_CB_ID = 0x07U /*!< TIM PWM MspDeInit Callback ID */ + ,HAL_TIM_ONE_PULSE_MSPINIT_CB_ID = 0x08U /*!< TIM One Pulse MspInit Callback ID */ + ,HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID = 0x09U /*!< TIM One Pulse MspDeInit Callback ID */ + ,HAL_TIM_ENCODER_MSPINIT_CB_ID = 0x0AU /*!< TIM Encoder MspInit Callback ID */ + ,HAL_TIM_ENCODER_MSPDEINIT_CB_ID = 0x0BU /*!< TIM Encoder MspDeInit Callback ID */ + ,HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID = 0x0CU /*!< TIM Hall Sensor MspDeInit Callback ID */ + ,HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID = 0x0DU /*!< TIM Hall Sensor MspDeInit Callback ID */ + ,HAL_TIM_PERIOD_ELAPSED_CB_ID = 0x0EU /*!< TIM Period Elapsed Callback ID */ + ,HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID = 0x0FU /*!< TIM Period Elapsed half complete Callback ID */ + ,HAL_TIM_TRIGGER_CB_ID = 0x10U /*!< TIM Trigger Callback ID */ + ,HAL_TIM_TRIGGER_HALF_CB_ID = 0x11U /*!< TIM Trigger half complete Callback ID */ + + ,HAL_TIM_IC_CAPTURE_CB_ID = 0x12U /*!< TIM Input Capture Callback ID */ + ,HAL_TIM_IC_CAPTURE_HALF_CB_ID = 0x13U /*!< TIM Input Capture half complete Callback ID */ + ,HAL_TIM_OC_DELAY_ELAPSED_CB_ID = 0x14U /*!< TIM Output Compare Delay Elapsed Callback ID */ + ,HAL_TIM_PWM_PULSE_FINISHED_CB_ID = 0x15U /*!< TIM PWM Pulse Finished Callback ID */ + ,HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID = 0x16U /*!< TIM PWM Pulse Finished half complete Callback ID */ + ,HAL_TIM_ERROR_CB_ID = 0x17U /*!< TIM Error Callback ID */ + ,HAL_TIM_COMMUTATION_CB_ID = 0x18U /*!< TIM Commutation Callback ID */ + ,HAL_TIM_COMMUTATION_HALF_CB_ID = 0x19U /*!< TIM Commutation half complete Callback ID */ + ,HAL_TIM_BREAK_CB_ID = 0x1AU /*!< TIM Break Callback ID */ + ,HAL_TIM_BREAK2_CB_ID = 0x1BU /*!< TIM Break2 Callback ID */ +} HAL_TIM_CallbackIDTypeDef; + +/** + * @brief HAL TIM Callback pointer definition + */ +typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to the TIM callback function */ + +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIM_Exported_Constants TIM Exported Constants + * @{ + */ + +/** @defgroup TIM_ClearInput_Source TIM Clear Input Source + * @{ + */ +#define TIM_CLEARINPUTSOURCE_NONE 0x00000000U /*!< OCREF_CLR is disabled */ +#define TIM_CLEARINPUTSOURCE_ETR 0x00000001U /*!< OCREF_CLR is connected to ETRF input */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Base_address TIM DMA Base Address + * @{ + */ +#define TIM_DMABASE_CR1 0x00000000U +#define TIM_DMABASE_CR2 0x00000001U +#define TIM_DMABASE_SMCR 0x00000002U +#define TIM_DMABASE_DIER 0x00000003U +#define TIM_DMABASE_SR 0x00000004U +#define TIM_DMABASE_EGR 0x00000005U +#define TIM_DMABASE_CCMR1 0x00000006U +#define TIM_DMABASE_CCMR2 0x00000007U +#define TIM_DMABASE_CCER 0x00000008U +#define TIM_DMABASE_CNT 0x00000009U +#define TIM_DMABASE_PSC 0x0000000AU +#define TIM_DMABASE_ARR 0x0000000BU +#define TIM_DMABASE_RCR 0x0000000CU +#define TIM_DMABASE_CCR1 0x0000000DU +#define TIM_DMABASE_CCR2 0x0000000EU +#define TIM_DMABASE_CCR3 0x0000000FU +#define TIM_DMABASE_CCR4 0x00000010U +#define TIM_DMABASE_BDTR 0x00000011U +#define TIM_DMABASE_DCR 0x00000012U +#define TIM_DMABASE_DMAR 0x00000013U +#define TIM_DMABASE_OR 0x00000014U +#define TIM_DMABASE_CCMR3 0x00000015U +#define TIM_DMABASE_CCR5 0x00000016U +#define TIM_DMABASE_CCR6 0x00000017U +#if defined(TIM_BREAK_INPUT_SUPPORT) +#define TIM_DMABASE_AF1 0x00000018U +#define TIM_DMABASE_AF2 0x00000019U +#endif /* TIM_BREAK_INPUT_SUPPORT */ +/** + * @} + */ + +/** @defgroup TIM_Event_Source TIM Event Source + * @{ + */ +#define TIM_EVENTSOURCE_UPDATE TIM_EGR_UG /*!< Reinitialize the counter and generates an update of the registers */ +#define TIM_EVENTSOURCE_CC1 TIM_EGR_CC1G /*!< A capture/compare event is generated on channel 1 */ +#define TIM_EVENTSOURCE_CC2 TIM_EGR_CC2G /*!< A capture/compare event is generated on channel 2 */ +#define TIM_EVENTSOURCE_CC3 TIM_EGR_CC3G /*!< A capture/compare event is generated on channel 3 */ +#define TIM_EVENTSOURCE_CC4 TIM_EGR_CC4G /*!< A capture/compare event is generated on channel 4 */ +#define TIM_EVENTSOURCE_COM TIM_EGR_COMG /*!< A commutation event is generated */ +#define TIM_EVENTSOURCE_TRIGGER TIM_EGR_TG /*!< A trigger event is generated */ +#define TIM_EVENTSOURCE_BREAK TIM_EGR_BG /*!< A break event is generated */ +#define TIM_EVENTSOURCE_BREAK2 TIM_EGR_B2G /*!< A break 2 event is generated */ +/** + * @} + */ + +/** @defgroup TIM_Input_Channel_Polarity TIM Input Channel polarity + * @{ + */ +#define TIM_INPUTCHANNELPOLARITY_RISING 0x00000000U /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_FALLING TIM_CCER_CC1P /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_BOTHEDGE (TIM_CCER_CC1P | TIM_CCER_CC1NP) /*!< Polarity for TIx source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Polarity TIM ETR Polarity + * @{ + */ +#define TIM_ETRPOLARITY_INVERTED TIM_SMCR_ETP /*!< Polarity for ETR source */ +#define TIM_ETRPOLARITY_NONINVERTED 0x00000000U /*!< Polarity for ETR source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Prescaler TIM ETR Prescaler + * @{ + */ +#define TIM_ETRPRESCALER_DIV1 0x00000000U /*!< No prescaler is used */ +#define TIM_ETRPRESCALER_DIV2 TIM_SMCR_ETPS_0 /*!< ETR input source is divided by 2 */ +#define TIM_ETRPRESCALER_DIV4 TIM_SMCR_ETPS_1 /*!< ETR input source is divided by 4 */ +#define TIM_ETRPRESCALER_DIV8 TIM_SMCR_ETPS /*!< ETR input source is divided by 8 */ +/** + * @} + */ + +/** @defgroup TIM_Counter_Mode TIM Counter Mode + * @{ + */ +#define TIM_COUNTERMODE_UP 0x00000000U /*!< Counter used as up-counter */ +#define TIM_COUNTERMODE_DOWN TIM_CR1_DIR /*!< Counter used as down-counter */ +#define TIM_COUNTERMODE_CENTERALIGNED1 TIM_CR1_CMS_0 /*!< Center-aligned mode 1 */ +#define TIM_COUNTERMODE_CENTERALIGNED2 TIM_CR1_CMS_1 /*!< Center-aligned mode 2 */ +#define TIM_COUNTERMODE_CENTERALIGNED3 TIM_CR1_CMS /*!< Center-aligned mode 3 */ +/** + * @} + */ + +/** @defgroup TIM_Update_Interrupt_Flag_Remap TIM Update Interrupt Flag Remap + * @{ + */ +#define TIM_UIFREMAP_DISABLE 0x00000000U /*!< Update interrupt flag remap disabled */ +#define TIM_UIFREMAP_ENABLE TIM_CR1_UIFREMAP /*!< Update interrupt flag remap enabled */ +/** + * @} + */ + +/** @defgroup TIM_ClockDivision TIM Clock Division + * @{ + */ +#define TIM_CLOCKDIVISION_DIV1 0x00000000U /*!< Clock division: tDTS=tCK_INT */ +#define TIM_CLOCKDIVISION_DIV2 TIM_CR1_CKD_0 /*!< Clock division: tDTS=2*tCK_INT */ +#define TIM_CLOCKDIVISION_DIV4 TIM_CR1_CKD_1 /*!< Clock division: tDTS=4*tCK_INT */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_State TIM Output Compare State + * @{ + */ +#define TIM_OUTPUTSTATE_DISABLE 0x00000000U /*!< Capture/Compare 1 output disabled */ +#define TIM_OUTPUTSTATE_ENABLE TIM_CCER_CC1E /*!< Capture/Compare 1 output enabled */ +/** + * @} + */ + +/** @defgroup TIM_AutoReloadPreload TIM Auto-Reload Preload + * @{ + */ +#define TIM_AUTORELOAD_PRELOAD_DISABLE 0x00000000U /*!< TIMx_ARR register is not buffered */ +#define TIM_AUTORELOAD_PRELOAD_ENABLE TIM_CR1_ARPE /*!< TIMx_ARR register is buffered */ + +/** + * @} + */ + +/** @defgroup TIM_Output_Fast_State TIM Output Fast State + * @{ + */ +#define TIM_OCFAST_DISABLE 0x00000000U /*!< Output Compare fast disable */ +#define TIM_OCFAST_ENABLE TIM_CCMR1_OC1FE /*!< Output Compare fast enable */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_State TIM Complementary Output Compare State + * @{ + */ +#define TIM_OUTPUTNSTATE_DISABLE 0x00000000U /*!< OCxN is disabled */ +#define TIM_OUTPUTNSTATE_ENABLE TIM_CCER_CC1NE /*!< OCxN is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Polarity TIM Output Compare Polarity + * @{ + */ +#define TIM_OCPOLARITY_HIGH 0x00000000U /*!< Capture/Compare output polarity */ +#define TIM_OCPOLARITY_LOW TIM_CCER_CC1P /*!< Capture/Compare output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Polarity TIM Complementary Output Compare Polarity + * @{ + */ +#define TIM_OCNPOLARITY_HIGH 0x00000000U /*!< Capture/Compare complementary output polarity */ +#define TIM_OCNPOLARITY_LOW TIM_CCER_CC1NP /*!< Capture/Compare complementary output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Idle_State TIM Output Compare Idle State + * @{ + */ +#define TIM_OCIDLESTATE_SET TIM_CR2_OIS1 /*!< Output Idle state: OCx=1 when MOE=0 */ +#define TIM_OCIDLESTATE_RESET 0x00000000U /*!< Output Idle state: OCx=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Idle_State TIM Complementary Output Compare Idle State + * @{ + */ +#define TIM_OCNIDLESTATE_SET TIM_CR2_OIS1N /*!< Complementary output Idle state: OCxN=1 when MOE=0 */ +#define TIM_OCNIDLESTATE_RESET 0x00000000U /*!< Complementary output Idle state: OCxN=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Polarity TIM Input Capture Polarity + * @{ + */ +#define TIM_ICPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Capture triggered by rising edge on timer input */ +#define TIM_ICPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Capture triggered by falling edge on timer input */ +#define TIM_ICPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Capture triggered by both rising and falling edges on timer input*/ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Input_Polarity TIM Encoder Input Polarity + * @{ + */ +#define TIM_ENCODERINPUTPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Encoder input with rising edge polarity */ +#define TIM_ENCODERINPUTPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Encoder input with falling edge polarity */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Selection TIM Input Capture Selection + * @{ + */ +#define TIM_ICSELECTION_DIRECTTI TIM_CCMR1_CC1S_0 /*!< TIM Input 1, 2, 3 or 4 is selected to be + connected to IC1, IC2, IC3 or IC4, respectively */ +#define TIM_ICSELECTION_INDIRECTTI TIM_CCMR1_CC1S_1 /*!< TIM Input 1, 2, 3 or 4 is selected to be + connected to IC2, IC1, IC4 or IC3, respectively */ +#define TIM_ICSELECTION_TRC TIM_CCMR1_CC1S /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to TRC */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Prescaler TIM Input Capture Prescaler + * @{ + */ +#define TIM_ICPSC_DIV1 0x00000000U /*!< Capture performed each time an edge is detected on the capture input */ +#define TIM_ICPSC_DIV2 TIM_CCMR1_IC1PSC_0 /*!< Capture performed once every 2 events */ +#define TIM_ICPSC_DIV4 TIM_CCMR1_IC1PSC_1 /*!< Capture performed once every 4 events */ +#define TIM_ICPSC_DIV8 TIM_CCMR1_IC1PSC /*!< Capture performed once every 8 events */ +/** + * @} + */ + +/** @defgroup TIM_One_Pulse_Mode TIM One Pulse Mode + * @{ + */ +#define TIM_OPMODE_SINGLE TIM_CR1_OPM /*!< Counter stops counting at the next update event */ +#define TIM_OPMODE_REPETITIVE 0x00000000U /*!< Counter is not stopped at update event */ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Mode TIM Encoder Mode + * @{ + */ +#define TIM_ENCODERMODE_TI1 TIM_SMCR_SMS_0 /*!< Quadrature encoder mode 1, x2 mode, counts up/down on TI1FP1 edge depending on TI2FP2 level */ +#define TIM_ENCODERMODE_TI2 TIM_SMCR_SMS_1 /*!< Quadrature encoder mode 2, x2 mode, counts up/down on TI2FP2 edge depending on TI1FP1 level. */ +#define TIM_ENCODERMODE_TI12 (TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< Quadrature encoder mode 3, x4 mode, counts up/down on both TI1FP1 and TI2FP2 edges depending on the level of the other input. */ +/** + * @} + */ + +/** @defgroup TIM_Interrupt_definition TIM interrupt Definition + * @{ + */ +#define TIM_IT_UPDATE TIM_DIER_UIE /*!< Update interrupt */ +#define TIM_IT_CC1 TIM_DIER_CC1IE /*!< Capture/Compare 1 interrupt */ +#define TIM_IT_CC2 TIM_DIER_CC2IE /*!< Capture/Compare 2 interrupt */ +#define TIM_IT_CC3 TIM_DIER_CC3IE /*!< Capture/Compare 3 interrupt */ +#define TIM_IT_CC4 TIM_DIER_CC4IE /*!< Capture/Compare 4 interrupt */ +#define TIM_IT_COM TIM_DIER_COMIE /*!< Commutation interrupt */ +#define TIM_IT_TRIGGER TIM_DIER_TIE /*!< Trigger interrupt */ +#define TIM_IT_BREAK TIM_DIER_BIE /*!< Break interrupt */ +/** + * @} + */ + +/** @defgroup TIM_Commutation_Source TIM Commutation Source + * @{ + */ +#define TIM_COMMUTATION_TRGI TIM_CR2_CCUS /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit or when an rising edge occurs on trigger input */ +#define TIM_COMMUTATION_SOFTWARE 0x00000000U /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit */ +/** + * @} + */ + +/** @defgroup TIM_DMA_sources TIM DMA Sources + * @{ + */ +#define TIM_DMA_UPDATE TIM_DIER_UDE /*!< DMA request is triggered by the update event */ +#define TIM_DMA_CC1 TIM_DIER_CC1DE /*!< DMA request is triggered by the capture/compare macth 1 event */ +#define TIM_DMA_CC2 TIM_DIER_CC2DE /*!< DMA request is triggered by the capture/compare macth 2 event event */ +#define TIM_DMA_CC3 TIM_DIER_CC3DE /*!< DMA request is triggered by the capture/compare macth 3 event event */ +#define TIM_DMA_CC4 TIM_DIER_CC4DE /*!< DMA request is triggered by the capture/compare macth 4 event event */ +#define TIM_DMA_COM TIM_DIER_COMDE /*!< DMA request is triggered by the commutation event */ +#define TIM_DMA_TRIGGER TIM_DIER_TDE /*!< DMA request is triggered by the trigger event */ +/** + * @} + */ + +/** @defgroup TIM_Flag_definition TIM Flag Definition + * @{ + */ +#define TIM_FLAG_UPDATE TIM_SR_UIF /*!< Update interrupt flag */ +#define TIM_FLAG_CC1 TIM_SR_CC1IF /*!< Capture/Compare 1 interrupt flag */ +#define TIM_FLAG_CC2 TIM_SR_CC2IF /*!< Capture/Compare 2 interrupt flag */ +#define TIM_FLAG_CC3 TIM_SR_CC3IF /*!< Capture/Compare 3 interrupt flag */ +#define TIM_FLAG_CC4 TIM_SR_CC4IF /*!< Capture/Compare 4 interrupt flag */ +#define TIM_FLAG_CC5 TIM_SR_CC5IF /*!< Capture/Compare 5 interrupt flag */ +#define TIM_FLAG_CC6 TIM_SR_CC6IF /*!< Capture/Compare 6 interrupt flag */ +#define TIM_FLAG_COM TIM_SR_COMIF /*!< Commutation interrupt flag */ +#define TIM_FLAG_TRIGGER TIM_SR_TIF /*!< Trigger interrupt flag */ +#define TIM_FLAG_BREAK TIM_SR_BIF /*!< Break interrupt flag */ +#define TIM_FLAG_BREAK2 TIM_SR_B2IF /*!< Break 2 interrupt flag */ +#define TIM_FLAG_SYSTEM_BREAK TIM_SR_SBIF /*!< System Break interrupt flag */ +#define TIM_FLAG_CC1OF TIM_SR_CC1OF /*!< Capture 1 overcapture flag */ +#define TIM_FLAG_CC2OF TIM_SR_CC2OF /*!< Capture 2 overcapture flag */ +#define TIM_FLAG_CC3OF TIM_SR_CC3OF /*!< Capture 3 overcapture flag */ +#define TIM_FLAG_CC4OF TIM_SR_CC4OF /*!< Capture 4 overcapture flag */ +/** + * @} + */ + +/** @defgroup TIM_Channel TIM Channel + * @{ + */ +#define TIM_CHANNEL_1 0x00000000U /*!< Capture/compare channel 1 identifier */ +#define TIM_CHANNEL_2 0x00000004U /*!< Capture/compare channel 2 identifier */ +#define TIM_CHANNEL_3 0x00000008U /*!< Capture/compare channel 3 identifier */ +#define TIM_CHANNEL_4 0x0000000CU /*!< Capture/compare channel 4 identifier */ +#define TIM_CHANNEL_5 0x00000010U /*!< Compare channel 5 identifier */ +#define TIM_CHANNEL_6 0x00000014U /*!< Compare channel 6 identifier */ +#define TIM_CHANNEL_ALL 0x0000003CU /*!< Global Capture/compare channel identifier */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Source TIM Clock Source + * @{ + */ +#define TIM_CLOCKSOURCE_ETRMODE2 TIM_SMCR_ETPS_1 /*!< External clock source mode 2 */ +#define TIM_CLOCKSOURCE_INTERNAL TIM_SMCR_ETPS_0 /*!< Internal clock source */ +#define TIM_CLOCKSOURCE_ITR0 TIM_TS_ITR0 /*!< External clock source mode 1 (ITR0) */ +#define TIM_CLOCKSOURCE_ITR1 TIM_TS_ITR1 /*!< External clock source mode 1 (ITR1) */ +#define TIM_CLOCKSOURCE_ITR2 TIM_TS_ITR2 /*!< External clock source mode 1 (ITR2) */ +#define TIM_CLOCKSOURCE_ITR3 TIM_TS_ITR3 /*!< External clock source mode 1 (ITR3) */ +#define TIM_CLOCKSOURCE_TI1ED TIM_TS_TI1F_ED /*!< External clock source mode 1 (TTI1FP1 + edge detect.) */ +#define TIM_CLOCKSOURCE_TI1 TIM_TS_TI1FP1 /*!< External clock source mode 1 (TTI1FP1) */ +#define TIM_CLOCKSOURCE_TI2 TIM_TS_TI2FP2 /*!< External clock source mode 1 (TTI2FP2) */ +#define TIM_CLOCKSOURCE_ETRMODE1 TIM_TS_ETRF /*!< External clock source mode 1 (ETRF) */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Polarity TIM Clock Polarity + * @{ + */ +#define TIM_CLOCKPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIx clock sources */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Prescaler TIM Clock Prescaler + * @{ + */ +#define TIM_CLOCKPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLOCKPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Clock: Capture performed once every 2 events. */ +#define TIM_CLOCKPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Clock: Capture performed once every 4 events. */ +#define TIM_CLOCKPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Clock: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Polarity TIM Clear Input Polarity + * @{ + */ +#define TIM_CLEARINPUTPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx pin */ +#define TIM_CLEARINPUTPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx pin */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Prescaler TIM Clear Input Prescaler + * @{ + */ +#define TIM_CLEARINPUTPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLEARINPUTPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR pin: Capture performed once every 2 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR pin: Capture performed once every 4 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR pin: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_OSSR_Off_State_Selection_for_Run_mode_state TIM OSSR OffState Selection for Run mode state + * @{ + */ +#define TIM_OSSR_ENABLE TIM_BDTR_OSSR /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSR_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ + +/** @defgroup TIM_OSSI_Off_State_Selection_for_Idle_mode_state TIM OSSI OffState Selection for Idle mode state + * @{ + */ +#define TIM_OSSI_ENABLE TIM_BDTR_OSSI /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSI_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ +/** @defgroup TIM_Lock_level TIM Lock level + * @{ + */ +#define TIM_LOCKLEVEL_OFF 0x00000000U /*!< LOCK OFF */ +#define TIM_LOCKLEVEL_1 TIM_BDTR_LOCK_0 /*!< LOCK Level 1 */ +#define TIM_LOCKLEVEL_2 TIM_BDTR_LOCK_1 /*!< LOCK Level 2 */ +#define TIM_LOCKLEVEL_3 TIM_BDTR_LOCK /*!< LOCK Level 3 */ +/** + * @} + */ + +/** @defgroup TIM_Break_Input_enable_disable TIM Break Input Enable + * @{ + */ +#define TIM_BREAK_ENABLE TIM_BDTR_BKE /*!< Break input BRK is enabled */ +#define TIM_BREAK_DISABLE 0x00000000U /*!< Break input BRK is disabled */ +/** + * @} + */ + +/** @defgroup TIM_Break_Polarity TIM Break Input Polarity + * @{ + */ +#define TIM_BREAKPOLARITY_LOW 0x00000000U /*!< Break input BRK is active low */ +#define TIM_BREAKPOLARITY_HIGH TIM_BDTR_BKP /*!< Break input BRK is active high */ +/** + * @} + */ + +/** @defgroup TIM_Break2_Input_enable_disable TIM Break input 2 Enable + * @{ + */ +#define TIM_BREAK2_DISABLE 0x00000000U /*!< Break input BRK2 is disabled */ +#define TIM_BREAK2_ENABLE TIM_BDTR_BK2E /*!< Break input BRK2 is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Break2_Polarity TIM Break Input 2 Polarity + * @{ + */ +#define TIM_BREAK2POLARITY_LOW 0x00000000U /*!< Break input BRK2 is active low */ +#define TIM_BREAK2POLARITY_HIGH TIM_BDTR_BK2P /*!< Break input BRK2 is active high */ +/** + * @} + */ + +/** @defgroup TIM_AOE_Bit_Set_Reset TIM Automatic Output Enable + * @{ + */ +#define TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ +#define TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event + (if none of the break inputs BRK and BRK2 is active) */ +/** + * @} + */ + +/** @defgroup TIM_Group_Channel5 Group Channel 5 and Channel 1, 2 or 3 + * @{ + */ +#define TIM_GROUPCH5_NONE 0x00000000U /* !< No effect of OC5REF on OC1REFC, OC2REFC and OC3REFC */ +#define TIM_GROUPCH5_OC1REFC TIM_CCR5_GC5C1 /* !< OC1REFC is the logical AND of OC1REFC and OC5REF */ +#define TIM_GROUPCH5_OC2REFC TIM_CCR5_GC5C2 /* !< OC2REFC is the logical AND of OC2REFC and OC5REF */ +#define TIM_GROUPCH5_OC3REFC TIM_CCR5_GC5C3 /* !< OC3REFC is the logical AND of OC3REFC and OC5REF */ +/** + * @} + */ + +/** @defgroup TIM_Master_Mode_Selection TIM Master Mode Selection + * @{ + */ +#define TIM_TRGO_RESET 0x00000000U /*!< TIMx_EGR.UG bit is used as trigger output (TRGO) */ +#define TIM_TRGO_ENABLE TIM_CR2_MMS_0 /*!< TIMx_CR1.CEN bit is used as trigger output (TRGO) */ +#define TIM_TRGO_UPDATE TIM_CR2_MMS_1 /*!< Update event is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1 (TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< Capture or a compare match 1 is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1REF TIM_CR2_MMS_2 /*!< OC1REF signal is used as trigger output (TRGO) */ +#define TIM_TRGO_OC2REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_0) /*!< OC2REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC3REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1) /*!< OC3REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC4REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< OC4REF signal is used as trigger output(TRGO) */ +/** + * @} + */ + +/** @defgroup TIM_Master_Mode_Selection_2 TIM Master Mode Selection 2 (TRGO2) + * @{ + */ +#define TIM_TRGO2_RESET 0x00000000U /*!< TIMx_EGR.UG bit is used as trigger output (TRGO2) */ +#define TIM_TRGO2_ENABLE TIM_CR2_MMS2_0 /*!< TIMx_CR1.CEN bit is used as trigger output (TRGO2) */ +#define TIM_TRGO2_UPDATE TIM_CR2_MMS2_1 /*!< Update event is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC1 (TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< Capture or a compare match 1 is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC1REF TIM_CR2_MMS2_2 /*!< OC1REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC2REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_0) /*!< OC2REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC3REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1) /*!< OC3REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC4REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC4REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC5REF TIM_CR2_MMS2_3 /*!< OC5REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC6REF (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_0) /*!< OC6REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC4REF_RISINGFALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_1) /*!< OC4REF rising or falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC6REF_RISINGFALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC6REF rising or falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC4REF_RISING_OC6REF_RISING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2) /*!< OC4REF or OC6REF rising edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC4REF_RISING_OC6REF_FALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 | TIM_CR2_MMS2_0) /*!< OC4REF rising or OC6REF falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC5REF_RISING_OC6REF_RISING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 |TIM_CR2_MMS2_1) /*!< OC5REF or OC6REF rising edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC5REF_RISING_OC6REF_FALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC5REF or OC6REF rising edges generate pulses on TRGO2 */ +/** + * @} + */ + +/** @defgroup TIM_Master_Slave_Mode TIM Master/Slave Mode + * @{ + */ +#define TIM_MASTERSLAVEMODE_ENABLE TIM_SMCR_MSM /*!< No action */ +#define TIM_MASTERSLAVEMODE_DISABLE 0x00000000U /*!< Master/slave mode is selected */ +/** + * @} + */ + +/** @defgroup TIM_Slave_Mode TIM Slave mode + * @{ + */ +#define TIM_SLAVEMODE_DISABLE 0x00000000U /*!< Slave mode disabled */ +#define TIM_SLAVEMODE_RESET TIM_SMCR_SMS_2 /*!< Reset Mode */ +#define TIM_SLAVEMODE_GATED (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_0) /*!< Gated Mode */ +#define TIM_SLAVEMODE_TRIGGER (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1) /*!< Trigger Mode */ +#define TIM_SLAVEMODE_EXTERNAL1 (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< External Clock Mode 1 */ +#define TIM_SLAVEMODE_COMBINED_RESETTRIGGER TIM_SMCR_SMS_3 /*!< Combined reset + trigger mode */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_and_PWM_modes TIM Output Compare and PWM Modes + * @{ + */ +#define TIM_OCMODE_TIMING 0x00000000U /*!< Frozen */ +#define TIM_OCMODE_ACTIVE TIM_CCMR1_OC1M_0 /*!< Set channel to active level on match */ +#define TIM_OCMODE_INACTIVE TIM_CCMR1_OC1M_1 /*!< Set channel to inactive level on match */ +#define TIM_OCMODE_TOGGLE (TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< Toggle */ +#define TIM_OCMODE_PWM1 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1) /*!< PWM mode 1 */ +#define TIM_OCMODE_PWM2 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< PWM mode 2 */ +#define TIM_OCMODE_FORCED_ACTIVE (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_0) /*!< Force active level */ +#define TIM_OCMODE_FORCED_INACTIVE TIM_CCMR1_OC1M_2 /*!< Force inactive level */ +#define TIM_OCMODE_RETRIGERRABLE_OPM1 TIM_CCMR1_OC1M_3 /*!< Retrigerrable OPM mode 1 */ +#define TIM_OCMODE_RETRIGERRABLE_OPM2 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_0) /*!< Retrigerrable OPM mode 2 */ +#define TIM_OCMODE_COMBINED_PWM1 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_2) /*!< Combined PWM mode 1 */ +#define TIM_OCMODE_COMBINED_PWM2 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_0 | TIM_CCMR1_OC1M_2) /*!< Combined PWM mode 2 */ +#define TIM_OCMODE_ASSYMETRIC_PWM1 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_2) /*!< Asymmetric PWM mode 1 */ +#define TIM_OCMODE_ASSYMETRIC_PWM2 TIM_CCMR1_OC1M /*!< Asymmetric PWM mode 2 */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Selection TIM Trigger Selection + * @{ + */ +#define TIM_TS_ITR0 0x00000000U /*!< Internal Trigger 0 (ITR0) */ +#define TIM_TS_ITR1 TIM_SMCR_TS_0 /*!< Internal Trigger 1 (ITR1) */ +#define TIM_TS_ITR2 TIM_SMCR_TS_1 /*!< Internal Trigger 2 (ITR2) */ +#define TIM_TS_ITR3 (TIM_SMCR_TS_0 | TIM_SMCR_TS_1) /*!< Internal Trigger 3 (ITR3) */ +#define TIM_TS_TI1F_ED TIM_SMCR_TS_2 /*!< TI1 Edge Detector (TI1F_ED) */ +#define TIM_TS_TI1FP1 (TIM_SMCR_TS_0 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 1 (TI1FP1) */ +#define TIM_TS_TI2FP2 (TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 2 (TI2FP2) */ +#define TIM_TS_ETRF (TIM_SMCR_TS_0 | TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered External Trigger input (ETRF) */ +#define TIM_TS_NONE 0x0000FFFFU /*!< No trigger selected */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Polarity TIM Trigger Polarity + * @{ + */ +#define TIM_TRIGGERPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Prescaler TIM Trigger Prescaler + * @{ + */ +#define TIM_TRIGGERPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_TRIGGERPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Trigger: Capture performed once every 2 events. */ +#define TIM_TRIGGERPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Trigger: Capture performed once every 4 events. */ +#define TIM_TRIGGERPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Trigger: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_TI1_Selection TIM TI1 Input Selection + * @{ + */ +#define TIM_TI1SELECTION_CH1 0x00000000U /*!< The TIMx_CH1 pin is connected to TI1 input */ +#define TIM_TI1SELECTION_XORCOMBINATION TIM_CR2_TI1S /*!< The TIMx_CH1, CH2 and CH3 pins are connected to the TI1 input (XOR combination) */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Burst_Length TIM DMA Burst Length + * @{ + */ +#define TIM_DMABURSTLENGTH_1TRANSFER 0x00000000U /*!< The transfer is done to 1 register starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_2TRANSFERS 0x00000100U /*!< The transfer is done to 2 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_3TRANSFERS 0x00000200U /*!< The transfer is done to 3 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_4TRANSFERS 0x00000300U /*!< The transfer is done to 4 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_5TRANSFERS 0x00000400U /*!< The transfer is done to 5 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_6TRANSFERS 0x00000500U /*!< The transfer is done to 6 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_7TRANSFERS 0x00000600U /*!< The transfer is done to 7 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_8TRANSFERS 0x00000700U /*!< The transfer is done to 8 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_9TRANSFERS 0x00000800U /*!< The transfer is done to 9 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_10TRANSFERS 0x00000900U /*!< The transfer is done to 10 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_11TRANSFERS 0x00000A00U /*!< The transfer is done to 11 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_12TRANSFERS 0x00000B00U /*!< The transfer is done to 12 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_13TRANSFERS 0x00000C00U /*!< The transfer is done to 13 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_14TRANSFERS 0x00000D00U /*!< The transfer is done to 14 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_15TRANSFERS 0x00000E00U /*!< The transfer is done to 15 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_16TRANSFERS 0x00000F00U /*!< The transfer is done to 16 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_17TRANSFERS 0x00001000U /*!< The transfer is done to 17 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_18TRANSFERS 0x00001100U /*!< The transfer is done to 18 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +/** + * @} + */ + +/** @defgroup DMA_Handle_index TIM DMA Handle Index + * @{ + */ +#define TIM_DMA_ID_UPDATE ((uint16_t) 0x0000) /*!< Index of the DMA handle used for Update DMA requests */ +#define TIM_DMA_ID_CC1 ((uint16_t) 0x0001) /*!< Index of the DMA handle used for Capture/Compare 1 DMA requests */ +#define TIM_DMA_ID_CC2 ((uint16_t) 0x0002) /*!< Index of the DMA handle used for Capture/Compare 2 DMA requests */ +#define TIM_DMA_ID_CC3 ((uint16_t) 0x0003) /*!< Index of the DMA handle used for Capture/Compare 3 DMA requests */ +#define TIM_DMA_ID_CC4 ((uint16_t) 0x0004) /*!< Index of the DMA handle used for Capture/Compare 4 DMA requests */ +#define TIM_DMA_ID_COMMUTATION ((uint16_t) 0x0005) /*!< Index of the DMA handle used for Commutation DMA requests */ +#define TIM_DMA_ID_TRIGGER ((uint16_t) 0x0006) /*!< Index of the DMA handle used for Trigger DMA requests */ +/** + * @} + */ + +/** @defgroup Channel_CC_State TIM Capture/Compare Channel State + * @{ + */ +#define TIM_CCx_ENABLE 0x00000001U /*!< Input or output channel is enabled */ +#define TIM_CCx_DISABLE 0x00000000U /*!< Input or output channel is disabled */ +#define TIM_CCxN_ENABLE 0x00000004U /*!< Complementary output channel is enabled */ +#define TIM_CCxN_DISABLE 0x00000000U /*!< Complementary output channel is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Break_System TIM Break System + * @{ + */ +#define TIM_BREAK_SYSTEM_ECC SYSCFG_CFGR2_ECCL /*!< Enables and locks the ECC error signal with Break Input of TIM1/8/15/16/17 */ +#define TIM_BREAK_SYSTEM_PVD SYSCFG_CFGR2_PVDL /*!< Enables and locks the PVD connection with TIM1/8/15/16/17 Break Input and also the PVDE and PLS bits of the Power Control Interface */ +#define TIM_BREAK_SYSTEM_SRAM_PARITY_ERROR SYSCFG_CFGR2_SPL /*!< Enables and locks the SRAM_PARITY error signal with Break Input of TIM1/8/15/16/17 */ +#define TIM_BREAK_SYSTEM_LOCKUP SYSCFG_CFGR2_CLL /*!< Enables and locks the LOCKUP output of CortexM4 with Break Input of TIM1/8/15/16/17 */ +/** + * @} + */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup TIM_Exported_Macros TIM Exported Macros + * @{ + */ + +/** @brief Reset TIM handle state. + * @param __HANDLE__ TIM handle. + * @retval None + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->Base_MspInitCallback = NULL; \ + (__HANDLE__)->Base_MspDeInitCallback = NULL; \ + (__HANDLE__)->IC_MspInitCallback = NULL; \ + (__HANDLE__)->IC_MspDeInitCallback = NULL; \ + (__HANDLE__)->OC_MspInitCallback = NULL; \ + (__HANDLE__)->OC_MspDeInitCallback = NULL; \ + (__HANDLE__)->PWM_MspInitCallback = NULL; \ + (__HANDLE__)->PWM_MspDeInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspDeInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspDeInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_TIM_STATE_RESET) +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @brief Enable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|=(TIM_CR1_CEN)) + +/** + * @brief Enable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_MOE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->BDTR|=(TIM_BDTR_MOE)) + +/** + * @brief Disable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->CR1 &= ~(TIM_CR1_CEN); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled only if all the CCx and CCxN channels have been disabled + */ +#define __HAL_TIM_MOE_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled unconditionally + */ +#define __HAL_TIM_MOE_DISABLE_UNCONDITIONALLY(__HANDLE__) (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE) + +/** @brief Enable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to enable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER |= (__INTERRUPT__)) + +/** @brief Disable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to disable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER &= ~(__INTERRUPT__)) + +/** @brief Enable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to enable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_ENABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER |= (__DMA__)) + +/** @brief Disable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to disable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_DISABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER &= ~(__DMA__)) + +/** @brief Check whether the specified TIM interrupt flag is set or not. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to check. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_CC5: Compare 5 interrupt flag + * @arg TIM_FLAG_CC6: Compare 6 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_BREAK2: Break 2 interrupt flag + * @arg TIM_FLAG_SYSTEM_BREAK: System Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR &(__FLAG__)) == (__FLAG__)) + +/** @brief Clear the specified TIM interrupt flag. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to clear. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_CC5: Compare 5 interrupt flag + * @arg TIM_FLAG_CC6: Compare 6 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_BREAK2: Break 2 interrupt flag + * @arg TIM_FLAG_SYSTEM_BREAK: System Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->SR = ~(__FLAG__)) + +/** + * @brief Check whether the specified TIM interrupt source is enabled or not. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the TIM interrupt source to check. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval The state of TIM_IT (SET or RESET). + */ +#define __HAL_TIM_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->DIER & (__INTERRUPT__)) \ + == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Clear the TIM interrupt pending bits. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->SR = ~(__INTERRUPT__)) + +/** + * @brief Force a continuous copy of the update interrupt flag (UIF) into the timer counter register (bit 31). + * @note This allows both the counter value and a potential roll-over condition signalled by the UIFCPY flag to be read in an atomic way. + * @param __HANDLE__ TIM handle. + * @retval None +mode. + */ +#define __HAL_TIM_UIFREMAP_ENABLE(__HANDLE__) (((__HANDLE__)->Instance->CR1 |= TIM_CR1_UIFREMAP)) + +/** + * @brief Disable update interrupt flag (UIF) remapping. + * @param __HANDLE__ TIM handle. + * @retval None +mode. + */ +#define __HAL_TIM_UIFREMAP_DISABLE(__HANDLE__) (((__HANDLE__)->Instance->CR1 &= ~TIM_CR1_UIFREMAP)) + +/** + * @brief Get update interrupt flag (UIF) copy status. + * @param __COUNTER__ Counter value. + * @retval The state of UIFCPY (TRUE or FALSE). +mode. + */ +#define __HAL_TIM_GET_UIFCPY(__COUNTER__) (((__COUNTER__) & (TIM_CNT_UIFCPY)) == (TIM_CNT_UIFCPY)) + +/** + * @brief Indicates whether or not the TIM Counter is used as downcounter. + * @param __HANDLE__ TIM handle. + * @retval False (Counter used as upcounter) or True (Counter used as downcounter) + * @note This macro is particularly useful to get the counting mode when the timer operates in Center-aligned mode or Encoder +mode. + */ +#define __HAL_TIM_IS_TIM_COUNTING_DOWN(__HANDLE__) (((__HANDLE__)->Instance->CR1 &(TIM_CR1_DIR)) == (TIM_CR1_DIR)) + +/** + * @brief Set the TIM Prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __PRESC__ specifies the Prescaler new value. + * @retval None + */ +#define __HAL_TIM_SET_PRESCALER(__HANDLE__, __PRESC__) ((__HANDLE__)->Instance->PSC = (__PRESC__)) + +/** + * @brief Set the TIM Counter Register value on runtime. + * Note Please check if the bit 31 of CNT register is used as UIF copy or not, this may affect the counter range in case of 32 bits counter TIM instance. + * Bit 31 of CNT can be enabled/disabled using __HAL_TIM_UIFREMAP_ENABLE()/__HAL_TIM_UIFREMAP_DISABLE() macros. + * @param __HANDLE__ TIM handle. + * @param __COUNTER__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->CNT = (__COUNTER__)) + +/** + * @brief Get the TIM Counter Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer counter register (TIMx_CNT) + */ +#define __HAL_TIM_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->CNT) + +/** + * @brief Set the TIM Autoreload Register value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __AUTORELOAD__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_AUTORELOAD(__HANDLE__, __AUTORELOAD__) \ + do{ \ + (__HANDLE__)->Instance->ARR = (__AUTORELOAD__); \ + (__HANDLE__)->Init.Period = (__AUTORELOAD__); \ + } while(0) + +/** + * @brief Get the TIM Autoreload Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer auto-reload register(TIMx_ARR) + */ +#define __HAL_TIM_GET_AUTORELOAD(__HANDLE__) ((__HANDLE__)->Instance->ARR) + +/** + * @brief Set the TIM Clock Division value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __CKD__ specifies the clock division value. + * This parameter can be one of the following value: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + * @retval None + */ +#define __HAL_TIM_SET_CLOCKDIVISION(__HANDLE__, __CKD__) \ + do{ \ + (__HANDLE__)->Instance->CR1 &= (~TIM_CR1_CKD); \ + (__HANDLE__)->Instance->CR1 |= (__CKD__); \ + (__HANDLE__)->Init.ClockDivision = (__CKD__); \ + } while(0) + +/** + * @brief Get the TIM Clock Division value on runtime. + * @param __HANDLE__ TIM handle. + * @retval The clock division can be one of the following values: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + */ +#define __HAL_TIM_GET_CLOCKDIVISION(__HANDLE__) ((__HANDLE__)->Instance->CR1 & TIM_CR1_CKD) + +/** + * @brief Set the TIM Input Capture prescaler on runtime without calling another time HAL_TIM_IC_ConfigChannel() function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __ICPSC__ specifies the Input Capture4 prescaler new value. + * This parameter can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + * @retval None + */ +#define __HAL_TIM_SET_ICPRESCALER(__HANDLE__, __CHANNEL__, __ICPSC__) \ + do{ \ + TIM_RESET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__), (__ICPSC__)); \ + } while(0) + +/** + * @brief Get the TIM Input Capture prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get input capture 1 prescaler value + * @arg TIM_CHANNEL_2: get input capture 2 prescaler value + * @arg TIM_CHANNEL_3: get input capture 3 prescaler value + * @arg TIM_CHANNEL_4: get input capture 4 prescaler value + * @retval The input capture prescaler can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + */ +#define __HAL_TIM_GET_ICPRESCALER(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC2PSC) >> 8U) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC3PSC) :\ + (((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC4PSC)) >> 8U) + +/** + * @brief Set the TIM Capture Compare Register value on runtime without calling another time ConfigChannel function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @param __COMPARE__ specifies the Capture Compare register new value. + * @retval None + */ +#define __HAL_TIM_SET_COMPARE(__HANDLE__, __CHANNEL__, __COMPARE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCR4 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCR5 = (__COMPARE__)) :\ + ((__HANDLE__)->Instance->CCR6 = (__COMPARE__))) + +/** + * @brief Get the TIM Capture Compare Register value on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channel associated with the capture compare register + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get capture/compare 1 register value + * @arg TIM_CHANNEL_2: get capture/compare 2 register value + * @arg TIM_CHANNEL_3: get capture/compare 3 register value + * @arg TIM_CHANNEL_4: get capture/compare 4 register value + * @arg TIM_CHANNEL_5: get capture/compare 5 register value + * @arg TIM_CHANNEL_6: get capture/compare 6 register value + * @retval 16-bit or 32-bit value of the capture/compare register (TIMx_CCRy) + */ +#define __HAL_TIM_GET_COMPARE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCR4) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCR5) :\ + ((__HANDLE__)->Instance->CCR6)) + +/** + * @brief Set the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC5PE) :\ + ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC6PE)) + +/** + * @brief Reset the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC5PE) :\ + ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC6PE)) + +/** + * @brief Enable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @note When fast mode is enabled an active edge on the trigger input acts + * like a compare match on CCx output. Delay to sample the trigger + * input and to activate CCx output is reduced to 3 clock cycles. + * @note Fast mode acts only if the channel is configured in PWM1 or PWM2 mode. + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC5FE) :\ + ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC6FE)) + +/** + * @brief Disable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @note When fast mode is disabled CCx output behaves normally depending + * on counter and CCRx values even when the trigger is ON. The minimum + * delay to activate CCx output when an active edge occurs on the + * trigger input is 5 clock cycles. + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC5FE) :\ + ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC6FE)) + +/** + * @brief Set the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is set, only counter + * overflow/underflow generates an update interrupt or DMA request (if + * enabled) + * @retval None + */ +#define __HAL_TIM_URS_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|= TIM_CR1_URS) + +/** + * @brief Reset the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is reset, any of the + * following events generate an update interrupt or DMA request (if + * enabled): + * _ Counter overflow underflow + * _ Setting the UG bit + * _ Update generation through the slave mode controller + * @retval None + */ +#define __HAL_TIM_URS_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1&=~TIM_CR1_URS) + +/** + * @brief Set the TIM Capture x input polarity on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __POLARITY__ Polarity for TIx source + * @arg TIM_INPUTCHANNELPOLARITY_RISING: Rising Edge + * @arg TIM_INPUTCHANNELPOLARITY_FALLING: Falling Edge + * @arg TIM_INPUTCHANNELPOLARITY_BOTHEDGE: Rising and Falling Edge + * @retval None + */ +#define __HAL_TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + do{ \ + TIM_RESET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__), (__POLARITY__)); \ + }while(0) + +/** + * @} + */ +/* End of exported macros ----------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup TIM_Private_Constants TIM Private Constants + * @{ + */ +/* The counter of a timer instance is disabled only if all the CCx and CCxN + channels have been disabled */ +#define TIM_CCER_CCxE_MASK ((uint32_t)(TIM_CCER_CC1E | TIM_CCER_CC2E | TIM_CCER_CC3E | TIM_CCER_CC4E)) +#define TIM_CCER_CCxNE_MASK ((uint32_t)(TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) +/** + * @} + */ +/* End of private constants --------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup TIM_Private_Macros TIM Private Macros + * @{ + */ +#define IS_TIM_CLEARINPUT_SOURCE(__MODE__) (((__MODE__) == TIM_CLEARINPUTSOURCE_NONE) || \ + ((__MODE__) == TIM_CLEARINPUTSOURCE_ETR)) + +#if defined(TIM_AF1_BKINE)&&defined(TIM_AF2_BKINE) +#define IS_TIM_DMA_BASE(__BASE__) (((__BASE__) == TIM_DMABASE_CR1) || \ + ((__BASE__) == TIM_DMABASE_CR2) || \ + ((__BASE__) == TIM_DMABASE_SMCR) || \ + ((__BASE__) == TIM_DMABASE_DIER) || \ + ((__BASE__) == TIM_DMABASE_SR) || \ + ((__BASE__) == TIM_DMABASE_EGR) || \ + ((__BASE__) == TIM_DMABASE_CCMR1) || \ + ((__BASE__) == TIM_DMABASE_CCMR2) || \ + ((__BASE__) == TIM_DMABASE_CCER) || \ + ((__BASE__) == TIM_DMABASE_CNT) || \ + ((__BASE__) == TIM_DMABASE_PSC) || \ + ((__BASE__) == TIM_DMABASE_ARR) || \ + ((__BASE__) == TIM_DMABASE_RCR) || \ + ((__BASE__) == TIM_DMABASE_CCR1) || \ + ((__BASE__) == TIM_DMABASE_CCR2) || \ + ((__BASE__) == TIM_DMABASE_CCR3) || \ + ((__BASE__) == TIM_DMABASE_CCR4) || \ + ((__BASE__) == TIM_DMABASE_BDTR) || \ + ((__BASE__) == TIM_DMABASE_OR) || \ + ((__BASE__) == TIM_DMABASE_CCMR3) || \ + ((__BASE__) == TIM_DMABASE_CCR5) || \ + ((__BASE__) == TIM_DMABASE_CCR6) || \ + ((__BASE__) == TIM_DMABASE_AF1) || \ + ((__BASE__) == TIM_DMABASE_AF2)) +#else +#define IS_TIM_DMA_BASE(__BASE__) (((__BASE__) == TIM_DMABASE_CR1) || \ + ((__BASE__) == TIM_DMABASE_CR2) || \ + ((__BASE__) == TIM_DMABASE_SMCR) || \ + ((__BASE__) == TIM_DMABASE_DIER) || \ + ((__BASE__) == TIM_DMABASE_SR) || \ + ((__BASE__) == TIM_DMABASE_EGR) || \ + ((__BASE__) == TIM_DMABASE_CCMR1) || \ + ((__BASE__) == TIM_DMABASE_CCMR2) || \ + ((__BASE__) == TIM_DMABASE_CCER) || \ + ((__BASE__) == TIM_DMABASE_CNT) || \ + ((__BASE__) == TIM_DMABASE_PSC) || \ + ((__BASE__) == TIM_DMABASE_ARR) || \ + ((__BASE__) == TIM_DMABASE_RCR) || \ + ((__BASE__) == TIM_DMABASE_CCR1) || \ + ((__BASE__) == TIM_DMABASE_CCR2) || \ + ((__BASE__) == TIM_DMABASE_CCR3) || \ + ((__BASE__) == TIM_DMABASE_CCR4) || \ + ((__BASE__) == TIM_DMABASE_BDTR) || \ + ((__BASE__) == TIM_DMABASE_OR) || \ + ((__BASE__) == TIM_DMABASE_CCMR3) || \ + ((__BASE__) == TIM_DMABASE_CCR5) || \ + ((__BASE__) == TIM_DMABASE_CCR6)) +#endif /* TIM_AF1_BKINE && TIM_AF1_BKINE */ + +#define IS_TIM_EVENT_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFFFE00U) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_COUNTER_MODE(__MODE__) (((__MODE__) == TIM_COUNTERMODE_UP) || \ + ((__MODE__) == TIM_COUNTERMODE_DOWN) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED1) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED2) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED3)) + +#define IS_TIM_UIFREMAP_MODE(__MODE__) (((__MODE__) == TIM_UIFREMAP_DISABLE) || \ + ((__MODE__) == TIM_UIFREMAP_ENALE)) + +#define IS_TIM_CLOCKDIVISION_DIV(__DIV__) (((__DIV__) == TIM_CLOCKDIVISION_DIV1) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV2) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV4)) + +#define IS_TIM_AUTORELOAD_PRELOAD(PRELOAD) (((PRELOAD) == TIM_AUTORELOAD_PRELOAD_DISABLE) || \ + ((PRELOAD) == TIM_AUTORELOAD_PRELOAD_ENABLE)) + +#define IS_TIM_FAST_STATE(__STATE__) (((__STATE__) == TIM_OCFAST_DISABLE) || \ + ((__STATE__) == TIM_OCFAST_ENABLE)) + +#define IS_TIM_OC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCPOLARITY_LOW)) + +#define IS_TIM_OCN_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCNPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCNPOLARITY_LOW)) + +#define IS_TIM_OCIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCIDLESTATE_RESET)) + +#define IS_TIM_OCNIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCNIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCNIDLESTATE_RESET)) + +#define IS_TIM_ENCODERINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_FALLING)) + +#define IS_TIM_IC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ICPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_BOTHEDGE)) + +#define IS_TIM_IC_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_ICSELECTION_DIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_INDIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_TRC)) + +#define IS_TIM_IC_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_ICPSC_DIV1) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV2) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV4) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV8)) + +#define IS_TIM_OPM_MODE(__MODE__) (((__MODE__) == TIM_OPMODE_SINGLE) || \ + ((__MODE__) == TIM_OPMODE_REPETITIVE)) + +#define IS_TIM_ENCODER_MODE(__MODE__) (((__MODE__) == TIM_ENCODERMODE_TI1) || \ + ((__MODE__) == TIM_ENCODERMODE_TI2) || \ + ((__MODE__) == TIM_ENCODERMODE_TI12)) + +#define IS_TIM_DMA_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFF80FFU) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3) || \ + ((__CHANNEL__) == TIM_CHANNEL_4) || \ + ((__CHANNEL__) == TIM_CHANNEL_5) || \ + ((__CHANNEL__) == TIM_CHANNEL_6) || \ + ((__CHANNEL__) == TIM_CHANNEL_ALL)) + +#define IS_TIM_OPM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2)) + +#define IS_TIM_COMPLEMENTARY_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3)) + +#define IS_TIM_CLOCKSOURCE(__CLOCK__) (((__CLOCK__) == TIM_CLOCKSOURCE_INTERNAL) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR0) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR3) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1ED) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE1)) + +#define IS_TIM_CLOCKPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLOCKPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_BOTHEDGE)) + +#define IS_TIM_CLOCKPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV8)) + +#define IS_TIM_CLOCKFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_CLEARINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLEARINPUTPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLEARINPUTPOLARITY_NONINVERTED)) + +#define IS_TIM_CLEARINPUT_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV8)) + +#define IS_TIM_CLEARINPUT_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_OSSR_STATE(__STATE__) (((__STATE__) == TIM_OSSR_ENABLE) || \ + ((__STATE__) == TIM_OSSR_DISABLE)) + +#define IS_TIM_OSSI_STATE(__STATE__) (((__STATE__) == TIM_OSSI_ENABLE) || \ + ((__STATE__) == TIM_OSSI_DISABLE)) + +#define IS_TIM_LOCK_LEVEL(__LEVEL__) (((__LEVEL__) == TIM_LOCKLEVEL_OFF) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_1) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_2) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_3)) + +#define IS_TIM_BREAK_FILTER(__BRKFILTER__) ((__BRKFILTER__) <= 0xFUL) + + +#define IS_TIM_BREAK_STATE(__STATE__) (((__STATE__) == TIM_BREAK_ENABLE) || \ + ((__STATE__) == TIM_BREAK_DISABLE)) + +#define IS_TIM_BREAK_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAKPOLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAKPOLARITY_HIGH)) + +#define IS_TIM_BREAK2_STATE(__STATE__) (((__STATE__) == TIM_BREAK2_ENABLE) || \ + ((__STATE__) == TIM_BREAK2_DISABLE)) + +#define IS_TIM_BREAK2_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAK2POLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAK2POLARITY_HIGH)) + +#define IS_TIM_AUTOMATIC_OUTPUT_STATE(__STATE__) (((__STATE__) == TIM_AUTOMATICOUTPUT_ENABLE) || \ + ((__STATE__) == TIM_AUTOMATICOUTPUT_DISABLE)) + +#define IS_TIM_GROUPCH5(__OCREF__) ((((__OCREF__) & 0x1FFFFFFFU) == 0x00000000U)) + +#define IS_TIM_TRGO_SOURCE(__SOURCE__) (((__SOURCE__) == TIM_TRGO_RESET) || \ + ((__SOURCE__) == TIM_TRGO_ENABLE) || \ + ((__SOURCE__) == TIM_TRGO_UPDATE) || \ + ((__SOURCE__) == TIM_TRGO_OC1) || \ + ((__SOURCE__) == TIM_TRGO_OC1REF) || \ + ((__SOURCE__) == TIM_TRGO_OC2REF) || \ + ((__SOURCE__) == TIM_TRGO_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO_OC4REF)) + +#define IS_TIM_TRGO2_SOURCE(__SOURCE__) (((__SOURCE__) == TIM_TRGO2_RESET) || \ + ((__SOURCE__) == TIM_TRGO2_ENABLE) || \ + ((__SOURCE__) == TIM_TRGO2_UPDATE) || \ + ((__SOURCE__) == TIM_TRGO2_OC1) || \ + ((__SOURCE__) == TIM_TRGO2_OC1REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC2REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC6REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISINGFALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC6REF_RISINGFALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISING_OC6REF_RISING) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISING_OC6REF_FALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF_RISING_OC6REF_RISING) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF_RISING_OC6REF_FALLING)) + +#define IS_TIM_MSM_STATE(__STATE__) (((__STATE__) == TIM_MASTERSLAVEMODE_ENABLE) || \ + ((__STATE__) == TIM_MASTERSLAVEMODE_DISABLE)) + +#define IS_TIM_SLAVE_MODE(__MODE__) (((__MODE__) == TIM_SLAVEMODE_DISABLE) || \ + ((__MODE__) == TIM_SLAVEMODE_RESET) || \ + ((__MODE__) == TIM_SLAVEMODE_GATED) || \ + ((__MODE__) == TIM_SLAVEMODE_TRIGGER) || \ + ((__MODE__) == TIM_SLAVEMODE_EXTERNAL1) || \ + ((__MODE__) == TIM_SLAVEMODE_COMBINED_RESETTRIGGER)) + +#define IS_TIM_PWM_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_PWM1) || \ + ((__MODE__) == TIM_OCMODE_PWM2) || \ + ((__MODE__) == TIM_OCMODE_COMBINED_PWM1) || \ + ((__MODE__) == TIM_OCMODE_COMBINED_PWM2) || \ + ((__MODE__) == TIM_OCMODE_ASSYMETRIC_PWM1) || \ + ((__MODE__) == TIM_OCMODE_ASSYMETRIC_PWM2)) + +#define IS_TIM_OC_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_TIMING) || \ + ((__MODE__) == TIM_OCMODE_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_INACTIVE) || \ + ((__MODE__) == TIM_OCMODE_TOGGLE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_INACTIVE) || \ + ((__MODE__) == TIM_OCMODE_RETRIGERRABLE_OPM1) || \ + ((__MODE__) == TIM_OCMODE_RETRIGERRABLE_OPM2)) + +#define IS_TIM_TRIGGER_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_TI1F_ED) || \ + ((__SELECTION__) == TIM_TS_TI1FP1) || \ + ((__SELECTION__) == TIM_TS_TI2FP2) || \ + ((__SELECTION__) == TIM_TS_ETRF)) + +#define IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_NONE)) + +#define IS_TIM_TRIGGERPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_TRIGGERPOLARITY_INVERTED ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_RISING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_FALLING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_BOTHEDGE )) + +#define IS_TIM_TRIGGERPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV8)) + +#define IS_TIM_TRIGGERFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_TI1SELECTION(__TI1SELECTION__) (((__TI1SELECTION__) == TIM_TI1SELECTION_CH1) || \ + ((__TI1SELECTION__) == TIM_TI1SELECTION_XORCOMBINATION)) + +#define IS_TIM_DMA_LENGTH(__LENGTH__) (((__LENGTH__) == TIM_DMABURSTLENGTH_1TRANSFER) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_2TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_3TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_4TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_5TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_6TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_7TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_8TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_9TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_10TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_11TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_12TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_13TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_14TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_15TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_16TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_17TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_18TRANSFERS)) + +#define IS_TIM_IC_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_DEADTIME(__DEADTIME__) ((__DEADTIME__) <= 0xFFU) + +#define IS_TIM_BREAK_SYSTEM(__CONFIG__) (((__CONFIG__) == TIM_BREAK_SYSTEM_ECC) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_PVD) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_SRAM_PARITY_ERROR) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_LOCKUP)) + +#define IS_TIM_SLAVEMODE_TRIGGER_ENABLED(__TRIGGER__) (((__TRIGGER__) == TIM_SLAVEMODE_TRIGGER) || \ + ((__TRIGGER__) == TIM_SLAVEMODE_COMBINED_RESETTRIGGER)) + +#define TIM_SET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__, __ICPSC__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= (__ICPSC__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= ((__ICPSC__) << 8U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= (__ICPSC__)) :\ + ((__HANDLE__)->Instance->CCMR2 |= ((__ICPSC__) << 8U))) + +#define TIM_RESET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC) :\ + ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC)) + +#define TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER |= (__POLARITY__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 4U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 8U)) :\ + ((__HANDLE__)->Instance->CCER |= (((__POLARITY__) << 12U)))) + +#define TIM_RESET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP)) :\ + ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP))) + +/** + * @} + */ +/* End of private macros -----------------------------------------------------*/ + +/* Include TIM HAL Extended module */ +#include "stm32f7xx_hal_tim_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @addtogroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * @{ + */ +/* Time Base functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * @{ + */ +/* Timer Output Compare functions *********************************************/ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * @{ + */ +/* Timer PWM functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * @{ + */ +/* Timer Input Capture functions **********************************************/ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * @{ + */ +/* Timer One Pulse functions **************************************************/ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode); +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * @{ + */ +/* Timer Encoder functions ****************************************************/ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief IRQ handler management + * @{ + */ +/* Interrupt Handler functions ***********************************************/ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Control functions *********************************************************/ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel); +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig); +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource); +uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * @{ + */ +/* Callback in non blocking modes (Interrupt and DMA) *************************/ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +/* Peripheral State functions ************************************************/ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure); +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, uint32_t TIM_ICFilter); +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter); + +void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma); +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_DMAError(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +void TIM_ResetCallback(TIM_HandleTypeDef *htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_TIM_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h new file mode 100644 index 000000000..2897aca4d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h @@ -0,0 +1,361 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim_ex.h + * @author MCD Application Team + * @brief Header file of TIM HAL Extended module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_TIM_EX_H +#define STM32F7xx_HAL_TIM_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIMEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Types TIM Extended Exported Types + * @{ + */ + +/** + * @brief TIM Hall sensor Configuration Structure definition + */ + +typedef struct +{ + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t Commutation_Delay; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ +} TIM_HallSensor_InitTypeDef; +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** + * @brief TIM Break/Break2 input configuration + */ +typedef struct +{ + uint32_t Source; /*!< Specifies the source of the timer break input. + This parameter can be a value of @ref TIMEx_Break_Input_Source */ + uint32_t Enable; /*!< Specifies whether or not the break input source is enabled. + This parameter can be a value of @ref TIMEx_Break_Input_Source_Enable */ + uint32_t Polarity; /*!< Specifies the break input source polarity. + This parameter can be a value of @ref TIMEx_Break_Input_Source_Polarity + Not relevant when analog watchdog output of the DFSDM1 used as break input source */ +} +TIMEx_BreakInputConfigTypeDef; + +#endif /* TIM_BREAK_INPUT_SUPPORT */ +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Constants TIM Extended Exported Constants + * @{ + */ + +/** @defgroup TIMEx_Remap TIM Extended Remapping + * @{ + */ +#define TIM_TIM2_TIM8_TRGO (0x00000000U) +#define TIM_TIM2_ETH_PTP (0x00000400U) +#define TIM_TIM2_USBFS_SOF (0x00000800U) +#define TIM_TIM2_USBHS_SOF (0x00000C00U) +#define TIM_TIM5_GPIO (0x00000000U) +#define TIM_TIM5_LSI (0x00000040U) +#define TIM_TIM5_LSE (0x00000080U) +#define TIM_TIM5_RTC (0x000000C0U) +#define TIM_TIM11_GPIO (0x00000000U) +#define TIM_TIM11_SPDIFRX (0x00000001U) +#define TIM_TIM11_HSE (0x00000002U) +#define TIM_TIM11_MCO1 (0x00000003U) +/** + * @} + */ +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** @defgroup TIMEx_Break_Input TIM Extended Break input + * @{ + */ +#define TIM_BREAKINPUT_BRK 0x00000001U /* !< Timer break input */ +#define TIM_BREAKINPUT_BRK2 0x00000002U /* !< Timer break2 input */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source TIM Extended Break input source + * @{ + */ +#define TIM_BREAKINPUTSOURCE_BKIN ((uint32_t)0x00000001U) /* !< An external source (GPIO) is connected to the BKIN pin */ +#define TIM_BREAKINPUTSOURCE_DFSDM1 ((uint32_t)0x00000008U) /* !< The analog watchdog output of the DFSDM1 peripheral is connected to the break input */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source_Enable TIM Extended Break input source enabling + * @{ + */ +#define TIM_BREAKINPUTSOURCE_DISABLE 0x00000000U /* !< Break input source is disabled */ +#define TIM_BREAKINPUTSOURCE_ENABLE 0x00000001U /* !< Break input source is enabled */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source_Polarity TIM Extended Break input polarity + * @{ + */ +#define TIM_BREAKINPUTSOURCE_POLARITY_LOW 0x00000001U /* !< Break input source is active low */ +#define TIM_BREAKINPUTSOURCE_POLARITY_HIGH 0x00000000U /* !< Break input source is active_high */ +/** + * @} + */ +#endif /* TIM_BREAK_INPUT_SUPPORT */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Macros TIM Extended Exported Macros + * @{ + */ + +/** + * @} + */ +/* End of exported macro -----------------------------------------------------*/ + +/* Private macro -------------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Macros TIM Extended Private Macros + * @{ + */ +#define IS_TIM_REMAP(__TIM_REMAP__) (((__TIM_REMAP__) == TIM_TIM2_TIM8_TRGO)||\ + ((__TIM_REMAP__) == TIM_TIM2_ETH_PTP) ||\ + ((__TIM_REMAP__) == TIM_TIM2_USBFS_SOF)||\ + ((__TIM_REMAP__) == TIM_TIM2_USBHS_SOF)||\ + ((__TIM_REMAP__) == TIM_TIM5_GPIO) ||\ + ((__TIM_REMAP__) == TIM_TIM5_LSI) ||\ + ((__TIM_REMAP__) == TIM_TIM5_LSE) ||\ + ((__TIM_REMAP__) == TIM_TIM5_RTC) ||\ + ((__TIM_REMAP__) == TIM_TIM11_GPIO) ||\ + ((__TIM_REMAP__) == TIM_TIM11_SPDIFRX) ||\ + ((__TIM_REMAP__) == TIM_TIM11_HSE) ||\ + ((__TIM_REMAP__) == TIM_TIM11_MCO1)) +#if defined(TIM_BREAK_INPUT_SUPPORT) + +#define IS_TIM_BREAKINPUT(__BREAKINPUT__) (((__BREAKINPUT__) == TIM_BREAKINPUT_BRK) || \ + ((__BREAKINPUT__) == TIM_BREAKINPUT_BRK2)) + +#define IS_TIM_BREAKINPUTSOURCE(__SOURCE__) (((__SOURCE__) == TIM_BREAKINPUTSOURCE_BKIN) || \ + ((__SOURCE__) == TIM_BREAKINPUTSOURCE_DFSDM)) + +#define IS_TIM_BREAKINPUTSOURCE_STATE(__STATE__) (((__STATE__) == TIM_BREAKINPUTSOURCE_DISABLE) || \ + ((__STATE__) == TIM_BREAKINPUTSOURCE_ENABLE)) + +#define IS_TIM_BREAKINPUTSOURCE_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAKINPUTSOURCE_POLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAKINPUTSOURCE_POLARITY_HIGH)) +#endif /* TIM_BREAK_INPUT_SUPPORT */ + +/** + * @} + */ +/* End of private macro ------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @addtogroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * @{ + */ +/* Timer Hall Sensor functions **********************************************/ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim); + +void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim); + +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * @{ + */ +/* Timer Complementary Output Compare functions *****************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * @{ + */ +/* Timer Complementary PWM functions ****************************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * @{ + */ +/* Timer Complementary One Pulse functions **********************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Extended Control functions ************************************************/ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + TIM_MasterConfigTypeDef *sMasterConfig); +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig); +#if defined(TIM_BREAK_INPUT_SUPPORT) +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakInput(TIM_HandleTypeDef *htim, uint32_t BreakInput, + TIMEx_BreakInputConfigTypeDef *sBreakInputConfig); +#endif /* TIM_BREAK_INPUT_SUPPORT */ +HAL_StatusTypeDef HAL_TIMEx_GroupChannel5(TIM_HandleTypeDef *htim, uint32_t Channels); +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * @{ + */ +/* Extended Callback **********************************************************/ +void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_Break2Callback(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * @{ + */ +/* Extended Peripheral State functions ***************************************/ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @addtogroup TIMEx_Private_Functions TIMEx Private Functions + * @{ + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma); +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_TIM_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h new file mode 100644 index 000000000..eb186d6f5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h @@ -0,0 +1,1532 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart.h + * @author MCD Application Team + * @brief Header file of UART HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_UART_H +#define STM32F7xx_HAL_UART_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup UART + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UART_Exported_Types UART Exported Types + * @{ + */ + +/** + * @brief UART Init Structure definition + */ +typedef struct +{ + uint32_t BaudRate; /*!< This member configures the UART communication baud rate. + The baud rate register is computed using the following formula: + - If oversampling is 16 or in LIN mode, + Baud Rate Register = ((uart_ker_ck) / ((huart->Init.BaudRate))) + - If oversampling is 8, + Baud Rate Register[15:4] = ((2 * uart_ker_ck) / ((huart->Init.BaudRate)))[15:4] + Baud Rate Register[3] = 0 + Baud Rate Register[2:0] = (((2 * uart_ker_ck) / ((huart->Init.BaudRate)))[3:0]) >> 1 + where uart_ker_ck is the UART input clock */ + + uint32_t WordLength; /*!< Specifies the number of data bits transmitted or received in a frame. + This parameter can be a value of @ref UARTEx_Word_Length. */ + + uint32_t StopBits; /*!< Specifies the number of stop bits transmitted. + This parameter can be a value of @ref UART_Stop_Bits. */ + + uint32_t Parity; /*!< Specifies the parity mode. + This parameter can be a value of @ref UART_Parity + @note When parity is enabled, the computed parity is inserted + at the MSB position of the transmitted data (9th bit when + the word length is set to 9 data bits; 8th bit when the + word length is set to 8 data bits). */ + + uint32_t Mode; /*!< Specifies whether the Receive or Transmit mode is enabled or disabled. + This parameter can be a value of @ref UART_Mode. */ + + uint32_t HwFlowCtl; /*!< Specifies whether the hardware flow control mode is enabled + or disabled. + This parameter can be a value of @ref UART_Hardware_Flow_Control. */ + + uint32_t OverSampling; /*!< Specifies whether the Over sampling 8 is enabled or disabled, to achieve higher speed (up to f_PCLK/8). + This parameter can be a value of @ref UART_Over_Sampling. */ + + uint32_t OneBitSampling; /*!< Specifies whether a single sample or three samples' majority vote is selected. + Selecting the single sample method increases the receiver tolerance to clock + deviations. This parameter can be a value of @ref UART_OneBit_Sampling. */ + + +} UART_InitTypeDef; + +/** + * @brief UART Advanced Features initialization structure definition + */ +typedef struct +{ + uint32_t AdvFeatureInit; /*!< Specifies which advanced UART features is initialized. Several + Advanced Features may be initialized at the same time . + This parameter can be a value of @ref UART_Advanced_Features_Initialization_Type. */ + + uint32_t TxPinLevelInvert; /*!< Specifies whether the TX pin active level is inverted. + This parameter can be a value of @ref UART_Tx_Inv. */ + + uint32_t RxPinLevelInvert; /*!< Specifies whether the RX pin active level is inverted. + This parameter can be a value of @ref UART_Rx_Inv. */ + + uint32_t DataInvert; /*!< Specifies whether data are inverted (positive/direct logic + vs negative/inverted logic). + This parameter can be a value of @ref UART_Data_Inv. */ + + uint32_t Swap; /*!< Specifies whether TX and RX pins are swapped. + This parameter can be a value of @ref UART_Rx_Tx_Swap. */ + + uint32_t OverrunDisable; /*!< Specifies whether the reception overrun detection is disabled. + This parameter can be a value of @ref UART_Overrun_Disable. */ + + uint32_t DMADisableonRxError; /*!< Specifies whether the DMA is disabled in case of reception error. + This parameter can be a value of @ref UART_DMA_Disable_on_Rx_Error. */ + + uint32_t AutoBaudRateEnable; /*!< Specifies whether auto Baud rate detection is enabled. + This parameter can be a value of @ref UART_AutoBaudRate_Enable. */ + + uint32_t AutoBaudRateMode; /*!< If auto Baud rate detection is enabled, specifies how the rate + detection is carried out. + This parameter can be a value of @ref UART_AutoBaud_Rate_Mode. */ + + uint32_t MSBFirst; /*!< Specifies whether MSB is sent first on UART line. + This parameter can be a value of @ref UART_MSB_First. */ +} UART_AdvFeatureInitTypeDef; + +/** + * @brief HAL UART State definition + * @note HAL UART State value is a combination of 2 different substates: gState and RxState (see @ref UART_State_Definition). + * - gState contains UART state information related to global Handle management + * and also information related to Tx operations. + * gState value coding follow below described bitmap : + * b7-b6 Error information + * 00 : No Error + * 01 : (Not Used) + * 10 : Timeout + * 11 : Error + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral not initialized. HAL UART Init function already called) + * b4-b3 (not used) + * xx : Should be set to 00 + * b2 Intrinsic process state + * 0 : Ready + * 1 : Busy (Peripheral busy with some configuration or internal operations) + * b1 (not used) + * x : Should be set to 0 + * b0 Tx state + * 0 : Ready (no Tx operation ongoing) + * 1 : Busy (Tx operation ongoing) + * - RxState contains information related to Rx operations. + * RxState value coding follow below described bitmap : + * b7-b6 (not used) + * xx : Should be set to 00 + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral not initialized) + * b4-b2 (not used) + * xxx : Should be set to 000 + * b1 Rx state + * 0 : Ready (no Rx operation ongoing) + * 1 : Busy (Rx operation ongoing) + * b0 (not used) + * x : Should be set to 0. + */ +typedef uint32_t HAL_UART_StateTypeDef; + +/** + * @brief UART clock sources definition + */ +typedef enum +{ + UART_CLOCKSOURCE_PCLK1 = 0x00U, /*!< PCLK1 clock source */ + UART_CLOCKSOURCE_PCLK2 = 0x01U, /*!< PCLK2 clock source */ + UART_CLOCKSOURCE_HSI = 0x02U, /*!< HSI clock source */ + UART_CLOCKSOURCE_SYSCLK = 0x04U, /*!< SYSCLK clock source */ + UART_CLOCKSOURCE_LSE = 0x08U, /*!< LSE clock source */ + UART_CLOCKSOURCE_UNDEFINED = 0x10U /*!< Undefined clock source */ +} UART_ClockSourceTypeDef; + +/** + * @brief UART handle Structure definition + */ +typedef struct __UART_HandleTypeDef +{ + USART_TypeDef *Instance; /*!< UART registers base address */ + + UART_InitTypeDef Init; /*!< UART communication parameters */ + + UART_AdvFeatureInitTypeDef AdvancedInit; /*!< UART Advanced Features initialization parameters */ + + uint8_t *pTxBuffPtr; /*!< Pointer to UART Tx transfer Buffer */ + + uint16_t TxXferSize; /*!< UART Tx Transfer size */ + + __IO uint16_t TxXferCount; /*!< UART Tx Transfer Counter */ + + uint8_t *pRxBuffPtr; /*!< Pointer to UART Rx transfer Buffer */ + + uint16_t RxXferSize; /*!< UART Rx Transfer size */ + + __IO uint16_t RxXferCount; /*!< UART Rx Transfer Counter */ + + uint16_t Mask; /*!< UART Rx RDR register mask */ + + void (*RxISR)(struct __UART_HandleTypeDef *huart); /*!< Function pointer on Rx IRQ handler */ + + void (*TxISR)(struct __UART_HandleTypeDef *huart); /*!< Function pointer on Tx IRQ handler */ + + DMA_HandleTypeDef *hdmatx; /*!< UART Tx DMA Handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< UART Rx DMA Handle parameters */ + + HAL_LockTypeDef Lock; /*!< Locking object */ + + __IO HAL_UART_StateTypeDef gState; /*!< UART state information related to global Handle management + and also related to Tx operations. + This parameter can be a value of @ref HAL_UART_StateTypeDef */ + + __IO HAL_UART_StateTypeDef RxState; /*!< UART state information related to Rx operations. + This parameter can be a value of @ref HAL_UART_StateTypeDef */ + + __IO uint32_t ErrorCode; /*!< UART Error code */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + void (* TxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Half Complete Callback */ + void (* TxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Complete Callback */ + void (* RxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Half Complete Callback */ + void (* RxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Complete Callback */ + void (* ErrorCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Error Callback */ + void (* AbortCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Complete Callback */ + void (* AbortTransmitCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Transmit Complete Callback */ + void (* AbortReceiveCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Receive Complete Callback */ + void (* WakeupCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Wakeup Callback */ + + void (* MspInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp Init callback */ + void (* MspDeInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp DeInit callback */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +} UART_HandleTypeDef; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief HAL UART Callback ID enumeration definition + */ +typedef enum +{ + HAL_UART_TX_HALFCOMPLETE_CB_ID = 0x00U, /*!< UART Tx Half Complete Callback ID */ + HAL_UART_TX_COMPLETE_CB_ID = 0x01U, /*!< UART Tx Complete Callback ID */ + HAL_UART_RX_HALFCOMPLETE_CB_ID = 0x02U, /*!< UART Rx Half Complete Callback ID */ + HAL_UART_RX_COMPLETE_CB_ID = 0x03U, /*!< UART Rx Complete Callback ID */ + HAL_UART_ERROR_CB_ID = 0x04U, /*!< UART Error Callback ID */ + HAL_UART_ABORT_COMPLETE_CB_ID = 0x05U, /*!< UART Abort Complete Callback ID */ + HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID = 0x06U, /*!< UART Abort Transmit Complete Callback ID */ + HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID = 0x07U, /*!< UART Abort Receive Complete Callback ID */ + HAL_UART_WAKEUP_CB_ID = 0x08U, /*!< UART Wakeup Callback ID */ + + HAL_UART_MSPINIT_CB_ID = 0x0BU, /*!< UART MspInit callback ID */ + HAL_UART_MSPDEINIT_CB_ID = 0x0CU /*!< UART MspDeInit callback ID */ + +} HAL_UART_CallbackIDTypeDef; + +/** + * @brief HAL UART Callback pointer definition + */ +typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer to an UART callback function */ + +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UART_Exported_Constants UART Exported Constants + * @{ + */ + +/** @defgroup UART_State_Definition UART State Code Definition + * @{ + */ +#define HAL_UART_STATE_RESET 0x00000000U /*!< Peripheral is not initialized + Value is allowed for gState and RxState */ +#define HAL_UART_STATE_READY 0x00000020U /*!< Peripheral Initialized and ready for use + Value is allowed for gState and RxState */ +#define HAL_UART_STATE_BUSY 0x00000024U /*!< an internal process is ongoing + Value is allowed for gState only */ +#define HAL_UART_STATE_BUSY_TX 0x00000021U /*!< Data Transmission process is ongoing + Value is allowed for gState only */ +#define HAL_UART_STATE_BUSY_RX 0x00000022U /*!< Data Reception process is ongoing + Value is allowed for RxState only */ +#define HAL_UART_STATE_BUSY_TX_RX 0x00000023U /*!< Data Transmission and Reception process is ongoing + Not to be used for neither gState nor RxState. + Value is result of combination (Or) between gState and RxState values */ +#define HAL_UART_STATE_TIMEOUT 0x000000A0U /*!< Timeout state + Value is allowed for gState only */ +#define HAL_UART_STATE_ERROR 0x000000E0U /*!< Error + Value is allowed for gState only */ +/** + * @} + */ + +/** @defgroup UART_Error_Definition UART Error Definition + * @{ + */ +#define HAL_UART_ERROR_NONE ((uint32_t)0x00000000U) /*!< No error */ +#define HAL_UART_ERROR_PE ((uint32_t)0x00000001U) /*!< Parity error */ +#define HAL_UART_ERROR_NE ((uint32_t)0x00000002U) /*!< Noise error */ +#define HAL_UART_ERROR_FE ((uint32_t)0x00000004U) /*!< Frame error */ +#define HAL_UART_ERROR_ORE ((uint32_t)0x00000008U) /*!< Overrun error */ +#define HAL_UART_ERROR_DMA ((uint32_t)0x00000010U) /*!< DMA transfer error */ +#define HAL_UART_ERROR_RTO ((uint32_t)0x00000020U) /*!< Receiver Timeout error */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define HAL_UART_ERROR_INVALID_CALLBACK ((uint32_t)0x00000040U) /*!< Invalid Callback error */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup UART_Stop_Bits UART Number of Stop Bits + * @{ + */ +#define UART_STOPBITS_0_5 USART_CR2_STOP_0 /*!< UART frame with 0.5 stop bit */ +#define UART_STOPBITS_1 0x00000000U /*!< UART frame with 1 stop bit */ +#define UART_STOPBITS_1_5 (USART_CR2_STOP_0 | USART_CR2_STOP_1) /*!< UART frame with 1.5 stop bits */ +#define UART_STOPBITS_2 USART_CR2_STOP_1 /*!< UART frame with 2 stop bits */ +/** + * @} + */ + +/** @defgroup UART_Parity UART Parity + * @{ + */ +#define UART_PARITY_NONE 0x00000000U /*!< No parity */ +#define UART_PARITY_EVEN USART_CR1_PCE /*!< Even parity */ +#define UART_PARITY_ODD (USART_CR1_PCE | USART_CR1_PS) /*!< Odd parity */ +/** + * @} + */ + +/** @defgroup UART_Hardware_Flow_Control UART Hardware Flow Control + * @{ + */ +#define UART_HWCONTROL_NONE 0x00000000U /*!< No hardware control */ +#define UART_HWCONTROL_RTS USART_CR3_RTSE /*!< Request To Send */ +#define UART_HWCONTROL_CTS USART_CR3_CTSE /*!< Clear To Send */ +#define UART_HWCONTROL_RTS_CTS (USART_CR3_RTSE | USART_CR3_CTSE) /*!< Request and Clear To Send */ +/** + * @} + */ + +/** @defgroup UART_Mode UART Transfer Mode + * @{ + */ +#define UART_MODE_RX USART_CR1_RE /*!< RX mode */ +#define UART_MODE_TX USART_CR1_TE /*!< TX mode */ +#define UART_MODE_TX_RX (USART_CR1_TE |USART_CR1_RE) /*!< RX and TX mode */ +/** + * @} + */ + +/** @defgroup UART_State UART State + * @{ + */ +#define UART_STATE_DISABLE 0x00000000U /*!< UART disabled */ +#define UART_STATE_ENABLE USART_CR1_UE /*!< UART enabled */ +/** + * @} + */ + +/** @defgroup UART_Over_Sampling UART Over Sampling + * @{ + */ +#define UART_OVERSAMPLING_16 0x00000000U /*!< Oversampling by 16 */ +#define UART_OVERSAMPLING_8 USART_CR1_OVER8 /*!< Oversampling by 8 */ +/** + * @} + */ + +/** @defgroup UART_OneBit_Sampling UART One Bit Sampling Method + * @{ + */ +#define UART_ONE_BIT_SAMPLE_DISABLE 0x00000000U /*!< One-bit sampling disable */ +#define UART_ONE_BIT_SAMPLE_ENABLE USART_CR3_ONEBIT /*!< One-bit sampling enable */ +/** + * @} + */ + +/** @defgroup UART_AutoBaud_Rate_Mode UART Advanced Feature AutoBaud Rate Mode + * @{ + */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ONSTARTBIT 0x00000000U /*!< Auto Baud rate detection on start bit */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ONFALLINGEDGE USART_CR2_ABRMODE_0 /*!< Auto Baud rate detection on falling edge */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ON0X7FFRAME USART_CR2_ABRMODE_1 /*!< Auto Baud rate detection on 0x7F frame detection */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ON0X55FRAME USART_CR2_ABRMODE /*!< Auto Baud rate detection on 0x55 frame detection */ +/** + * @} + */ + +/** @defgroup UART_Receiver_Timeout UART Receiver Timeout + * @{ + */ +#define UART_RECEIVER_TIMEOUT_DISABLE 0x00000000U /*!< UART Receiver Timeout disable */ +#define UART_RECEIVER_TIMEOUT_ENABLE USART_CR2_RTOEN /*!< UART Receiver Timeout enable */ +/** + * @} + */ + +/** @defgroup UART_LIN UART Local Interconnection Network mode + * @{ + */ +#define UART_LIN_DISABLE 0x00000000U /*!< Local Interconnect Network disable */ +#define UART_LIN_ENABLE USART_CR2_LINEN /*!< Local Interconnect Network enable */ +/** + * @} + */ + +/** @defgroup UART_LIN_Break_Detection UART LIN Break Detection + * @{ + */ +#define UART_LINBREAKDETECTLENGTH_10B 0x00000000U /*!< LIN 10-bit break detection length */ +#define UART_LINBREAKDETECTLENGTH_11B USART_CR2_LBDL /*!< LIN 11-bit break detection length */ +/** + * @} + */ + +/** @defgroup UART_DMA_Tx UART DMA Tx + * @{ + */ +#define UART_DMA_TX_DISABLE 0x00000000U /*!< UART DMA TX disabled */ +#define UART_DMA_TX_ENABLE USART_CR3_DMAT /*!< UART DMA TX enabled */ +/** + * @} + */ + +/** @defgroup UART_DMA_Rx UART DMA Rx + * @{ + */ +#define UART_DMA_RX_DISABLE 0x00000000U /*!< UART DMA RX disabled */ +#define UART_DMA_RX_ENABLE USART_CR3_DMAR /*!< UART DMA RX enabled */ +/** + * @} + */ + +/** @defgroup UART_Half_Duplex_Selection UART Half Duplex Selection + * @{ + */ +#define UART_HALF_DUPLEX_DISABLE 0x00000000U /*!< UART half-duplex disabled */ +#define UART_HALF_DUPLEX_ENABLE USART_CR3_HDSEL /*!< UART half-duplex enabled */ +/** + * @} + */ + +/** @defgroup UART_WakeUp_Methods UART WakeUp Methods + * @{ + */ +#define UART_WAKEUPMETHOD_IDLELINE 0x00000000U /*!< UART wake-up on idle line */ +#define UART_WAKEUPMETHOD_ADDRESSMARK USART_CR1_WAKE /*!< UART wake-up on address mark */ +/** + * @} + */ + +/** @defgroup UART_Request_Parameters UART Request Parameters + * @{ + */ +#define UART_AUTOBAUD_REQUEST USART_RQR_ABRRQ /*!< Auto-Baud Rate Request */ +#define UART_SENDBREAK_REQUEST USART_RQR_SBKRQ /*!< Send Break Request */ +#define UART_MUTE_MODE_REQUEST USART_RQR_MMRQ /*!< Mute Mode Request */ +#define UART_RXDATA_FLUSH_REQUEST USART_RQR_RXFRQ /*!< Receive Data flush Request */ +#define UART_TXDATA_FLUSH_REQUEST USART_RQR_TXFRQ /*!< Transmit data flush Request */ +/** + * @} + */ + +/** @defgroup UART_Advanced_Features_Initialization_Type UART Advanced Feature Initialization Type + * @{ + */ +#define UART_ADVFEATURE_NO_INIT 0x00000000U /*!< No advanced feature initialization */ +#define UART_ADVFEATURE_TXINVERT_INIT 0x00000001U /*!< TX pin active level inversion */ +#define UART_ADVFEATURE_RXINVERT_INIT 0x00000002U /*!< RX pin active level inversion */ +#define UART_ADVFEATURE_DATAINVERT_INIT 0x00000004U /*!< Binary data inversion */ +#define UART_ADVFEATURE_SWAP_INIT 0x00000008U /*!< TX/RX pins swap */ +#define UART_ADVFEATURE_RXOVERRUNDISABLE_INIT 0x00000010U /*!< RX overrun disable */ +#define UART_ADVFEATURE_DMADISABLEONERROR_INIT 0x00000020U /*!< DMA disable on Reception Error */ +#define UART_ADVFEATURE_AUTOBAUDRATE_INIT 0x00000040U /*!< Auto Baud rate detection initialization */ +#define UART_ADVFEATURE_MSBFIRST_INIT 0x00000080U /*!< Most significant bit sent/received first */ +/** + * @} + */ + +/** @defgroup UART_Tx_Inv UART Advanced Feature TX Pin Active Level Inversion + * @{ + */ +#define UART_ADVFEATURE_TXINV_DISABLE 0x00000000U /*!< TX pin active level inversion disable */ +#define UART_ADVFEATURE_TXINV_ENABLE USART_CR2_TXINV /*!< TX pin active level inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Rx_Inv UART Advanced Feature RX Pin Active Level Inversion + * @{ + */ +#define UART_ADVFEATURE_RXINV_DISABLE 0x00000000U /*!< RX pin active level inversion disable */ +#define UART_ADVFEATURE_RXINV_ENABLE USART_CR2_RXINV /*!< RX pin active level inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Data_Inv UART Advanced Feature Binary Data Inversion + * @{ + */ +#define UART_ADVFEATURE_DATAINV_DISABLE 0x00000000U /*!< Binary data inversion disable */ +#define UART_ADVFEATURE_DATAINV_ENABLE USART_CR2_DATAINV /*!< Binary data inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Rx_Tx_Swap UART Advanced Feature RX TX Pins Swap + * @{ + */ +#define UART_ADVFEATURE_SWAP_DISABLE 0x00000000U /*!< TX/RX pins swap disable */ +#define UART_ADVFEATURE_SWAP_ENABLE USART_CR2_SWAP /*!< TX/RX pins swap enable */ +/** + * @} + */ + +/** @defgroup UART_Overrun_Disable UART Advanced Feature Overrun Disable + * @{ + */ +#define UART_ADVFEATURE_OVERRUN_ENABLE 0x00000000U /*!< RX overrun enable */ +#define UART_ADVFEATURE_OVERRUN_DISABLE USART_CR3_OVRDIS /*!< RX overrun disable */ +/** + * @} + */ + +/** @defgroup UART_AutoBaudRate_Enable UART Advanced Feature Auto BaudRate Enable + * @{ + */ +#define UART_ADVFEATURE_AUTOBAUDRATE_DISABLE 0x00000000U /*!< RX Auto Baud rate detection enable */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ENABLE USART_CR2_ABREN /*!< RX Auto Baud rate detection disable */ +/** + * @} + */ + +/** @defgroup UART_DMA_Disable_on_Rx_Error UART Advanced Feature DMA Disable On Rx Error + * @{ + */ +#define UART_ADVFEATURE_DMA_ENABLEONRXERROR 0x00000000U /*!< DMA enable on Reception Error */ +#define UART_ADVFEATURE_DMA_DISABLEONRXERROR USART_CR3_DDRE /*!< DMA disable on Reception Error */ +/** + * @} + */ + +/** @defgroup UART_MSB_First UART Advanced Feature MSB First + * @{ + */ +#define UART_ADVFEATURE_MSBFIRST_DISABLE 0x00000000U /*!< Most significant bit sent/received first disable */ +#define UART_ADVFEATURE_MSBFIRST_ENABLE USART_CR2_MSBFIRST /*!< Most significant bit sent/received first enable */ +/** + * @} + */ +#if defined(USART_CR1_UESM) + +/** @defgroup UART_Stop_Mode_Enable UART Advanced Feature Stop Mode Enable + * @{ + */ +#define UART_ADVFEATURE_STOPMODE_DISABLE 0x00000000U /*!< UART stop mode disable */ +#define UART_ADVFEATURE_STOPMODE_ENABLE USART_CR1_UESM /*!< UART stop mode enable */ +/** + * @} + */ +#endif /* USART_CR1_UESM */ + +/** @defgroup UART_Mute_Mode UART Advanced Feature Mute Mode Enable + * @{ + */ +#define UART_ADVFEATURE_MUTEMODE_DISABLE 0x00000000U /*!< UART mute mode disable */ +#define UART_ADVFEATURE_MUTEMODE_ENABLE USART_CR1_MME /*!< UART mute mode enable */ +/** + * @} + */ + +/** @defgroup UART_CR2_ADDRESS_LSB_POS UART Address-matching LSB Position In CR2 Register + * @{ + */ +#define UART_CR2_ADDRESS_LSB_POS 24U /*!< UART address-matching LSB position in CR2 register */ +/** + * @} + */ +#if defined(USART_CR1_UESM) + +/** @defgroup UART_WakeUp_from_Stop_Selection UART WakeUp From Stop Selection + * @{ + */ +#define UART_WAKEUP_ON_ADDRESS 0x00000000U /*!< UART wake-up on address */ +#define UART_WAKEUP_ON_STARTBIT USART_CR3_WUS_1 /*!< UART wake-up on start bit */ +#define UART_WAKEUP_ON_READDATA_NONEMPTY USART_CR3_WUS /*!< UART wake-up on receive data register not empty or RXFIFO is not empty */ +/** + * @} + */ +#endif /* USART_CR1_UESM */ + +/** @defgroup UART_DriverEnable_Polarity UART DriverEnable Polarity + * @{ + */ +#define UART_DE_POLARITY_HIGH 0x00000000U /*!< Driver enable signal is active high */ +#define UART_DE_POLARITY_LOW USART_CR3_DEP /*!< Driver enable signal is active low */ +/** + * @} + */ + +/** @defgroup UART_CR1_DEAT_ADDRESS_LSB_POS UART Driver Enable Assertion Time LSB Position In CR1 Register + * @{ + */ +#define UART_CR1_DEAT_ADDRESS_LSB_POS 21U /*!< UART Driver Enable assertion time LSB position in CR1 register */ +/** + * @} + */ + +/** @defgroup UART_CR1_DEDT_ADDRESS_LSB_POS UART Driver Enable DeAssertion Time LSB Position In CR1 Register + * @{ + */ +#define UART_CR1_DEDT_ADDRESS_LSB_POS 16U /*!< UART Driver Enable de-assertion time LSB position in CR1 register */ +/** + * @} + */ + +/** @defgroup UART_Interruption_Mask UART Interruptions Flag Mask + * @{ + */ +#define UART_IT_MASK 0x001FU /*!< UART interruptions flags mask */ +/** + * @} + */ + +/** @defgroup UART_TimeOut_Value UART polling-based communications time-out value + * @{ + */ +#define HAL_UART_TIMEOUT_VALUE 0x1FFFFFFU /*!< UART polling-based communications time-out value */ +/** + * @} + */ + +/** @defgroup UART_Flags UART Status Flags + * Elements values convention: 0xXXXX + * - 0xXXXX : Flag mask in the ISR register + * @{ + */ +#if defined(USART_ISR_REACK) +#define UART_FLAG_REACK USART_ISR_REACK /*!< UART receive enable acknowledge flag */ +#endif /* USART_ISR_REACK */ +#define UART_FLAG_TEACK USART_ISR_TEACK /*!< UART transmit enable acknowledge flag */ +#if defined(USART_CR1_UESM) +#define UART_FLAG_WUF USART_ISR_WUF /*!< UART wake-up from stop mode flag */ +#endif /* USART_CR1_UESM */ +#define UART_FLAG_RWU USART_ISR_RWU /*!< UART receiver wake-up from mute mode flag */ +#define UART_FLAG_SBKF USART_ISR_SBKF /*!< UART send break flag */ +#define UART_FLAG_CMF USART_ISR_CMF /*!< UART character match flag */ +#define UART_FLAG_BUSY USART_ISR_BUSY /*!< UART busy flag */ +#define UART_FLAG_ABRF USART_ISR_ABRF /*!< UART auto Baud rate flag */ +#define UART_FLAG_ABRE USART_ISR_ABRE /*!< UART auto Baud rate error */ +#define UART_FLAG_RTOF USART_ISR_RTOF /*!< UART receiver timeout flag */ +#define UART_FLAG_CTS USART_ISR_CTS /*!< UART clear to send flag */ +#define UART_FLAG_CTSIF USART_ISR_CTSIF /*!< UART clear to send interrupt flag */ +#define UART_FLAG_LBDF USART_ISR_LBDF /*!< UART LIN break detection flag */ +#define UART_FLAG_TXE USART_ISR_TXE /*!< UART transmit data register empty */ +#define UART_FLAG_TC USART_ISR_TC /*!< UART transmission complete */ +#define UART_FLAG_RXNE USART_ISR_RXNE /*!< UART read data register not empty */ +#define UART_FLAG_IDLE USART_ISR_IDLE /*!< UART idle flag */ +#define UART_FLAG_ORE USART_ISR_ORE /*!< UART overrun error */ +#define UART_FLAG_NE USART_ISR_NE /*!< UART noise error */ +#define UART_FLAG_FE USART_ISR_FE /*!< UART frame error */ +#define UART_FLAG_PE USART_ISR_PE /*!< UART parity error */ +/** + * @} + */ + +/** @defgroup UART_Interrupt_definition UART Interrupts Definition + * Elements values convention: 000ZZZZZ0XXYYYYYb + * - YYYYY : Interrupt source position in the XX register (5bits) + * - XX : Interrupt source register (2bits) + * - 01: CR1 register + * - 10: CR2 register + * - 11: CR3 register + * - ZZZZZ : Flag position in the ISR register(5bits) + * Elements values convention: 000000000XXYYYYYb + * - YYYYY : Interrupt source position in the XX register (5bits) + * - XX : Interrupt source register (2bits) + * - 01: CR1 register + * - 10: CR2 register + * - 11: CR3 register + * Elements values convention: 0000ZZZZ00000000b + * - ZZZZ : Flag position in the ISR register(4bits) + * @{ + */ +#define UART_IT_PE 0x0028U /*!< UART parity error interruption */ +#define UART_IT_TXE 0x0727U /*!< UART transmit data register empty interruption */ +#define UART_IT_TC 0x0626U /*!< UART transmission complete interruption */ +#define UART_IT_RXNE 0x0525U /*!< UART read data register not empty interruption */ +#define UART_IT_IDLE 0x0424U /*!< UART idle interruption */ +#define UART_IT_LBD 0x0846U /*!< UART LIN break detection interruption */ +#define UART_IT_CTS 0x096AU /*!< UART CTS interruption */ +#define UART_IT_CM 0x112EU /*!< UART character match interruption */ +#if defined(USART_CR1_UESM) +#define UART_IT_WUF 0x1476U /*!< UART wake-up from stop mode interruption */ +#endif /* USART_CR1_UESM */ +#define UART_IT_RTO 0x0B3AU /*!< UART receiver timeout interruption */ + +#define UART_IT_ERR 0x0060U /*!< UART error interruption */ + +#define UART_IT_ORE 0x0300U /*!< UART overrun error interruption */ +#define UART_IT_NE 0x0200U /*!< UART noise error interruption */ +#define UART_IT_FE 0x0100U /*!< UART frame error interruption */ +/** + * @} + */ + +/** @defgroup UART_IT_CLEAR_Flags UART Interruption Clear Flags + * @{ + */ +#define UART_CLEAR_PEF USART_ICR_PECF /*!< Parity Error Clear Flag */ +#define UART_CLEAR_FEF USART_ICR_FECF /*!< Framing Error Clear Flag */ +#define UART_CLEAR_NEF USART_ICR_NCF /*!< Noise Error detected Clear Flag */ +#define UART_CLEAR_OREF USART_ICR_ORECF /*!< Overrun Error Clear Flag */ +#define UART_CLEAR_IDLEF USART_ICR_IDLECF /*!< IDLE line detected Clear Flag */ +#define UART_CLEAR_TCF USART_ICR_TCCF /*!< Transmission Complete Clear Flag */ +#define UART_CLEAR_LBDF USART_ICR_LBDCF /*!< LIN Break Detection Clear Flag */ +#define UART_CLEAR_CTSF USART_ICR_CTSCF /*!< CTS Interrupt Clear Flag */ +#define UART_CLEAR_CMF USART_ICR_CMCF /*!< Character Match Clear Flag */ +#if defined(USART_CR1_UESM) +#define UART_CLEAR_WUF USART_ICR_WUCF /*!< Wake Up from stop mode Clear Flag */ +#endif /* USART_CR1_UESM */ +#define UART_CLEAR_RTOF USART_ICR_RTOCF /*!< UART receiver timeout clear flag */ +/** + * @} + */ + + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup UART_Exported_Macros UART Exported Macros + * @{ + */ + +/** @brief Reset UART handle states. + * @param __HANDLE__ UART handle. + * @retval None + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0U) +#else +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + } while(0U) +#endif /*USE_HAL_UART_REGISTER_CALLBACKS */ + +/** @brief Flush the UART Data registers. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_FLUSH_DRREGISTER(__HANDLE__) \ + do{ \ + SET_BIT((__HANDLE__)->Instance->RQR, UART_RXDATA_FLUSH_REQUEST); \ + SET_BIT((__HANDLE__)->Instance->RQR, UART_TXDATA_FLUSH_REQUEST); \ + } while(0U) + +/** @brief Clear the specified UART pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be any combination of the following values: + * @arg @ref UART_CLEAR_PEF Parity Error Clear Flag + * @arg @ref UART_CLEAR_FEF Framing Error Clear Flag + * @arg @ref UART_CLEAR_NEF Noise detected Clear Flag + * @arg @ref UART_CLEAR_OREF Overrun Error Clear Flag + * @arg @ref UART_CLEAR_IDLEF IDLE line detected Clear Flag + * @arg @ref UART_CLEAR_TCF Transmission Complete Clear Flag + * @arg @ref UART_CLEAR_RTOF Receiver Timeout clear flag + * @arg @ref UART_CLEAR_LBDF LIN Break Detection Clear Flag + * @arg @ref UART_CLEAR_CTSF CTS Interrupt Clear Flag + * @arg @ref UART_CLEAR_CMF Character Match Clear Flag +#if defined(USART_CR1_UESM) + * @arg @ref UART_CLEAR_WUF Wake Up from stop mode Clear Flag +#endif + * @retval None + */ +#define __HAL_UART_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ICR = (__FLAG__)) + +/** @brief Clear the UART PE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_PEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_PEF) + +/** @brief Clear the UART FE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_FEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_FEF) + +/** @brief Clear the UART NE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_NEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_NEF) + +/** @brief Clear the UART ORE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_OREFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_OREF) + +/** @brief Clear the UART IDLE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_IDLEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_IDLEF) + + +/** @brief Check whether the specified UART flag is set or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: +#if defined(USART_ISR_REACK) + * @arg @ref UART_FLAG_REACK Receive enable acknowledge flag +#endif + * @arg @ref UART_FLAG_TEACK Transmit enable acknowledge flag + #if defined(USART_CR1_UESM) + * @arg @ref UART_FLAG_WUF Wake up from stop mode flag + #endif + * @arg @ref UART_FLAG_RWU Receiver wake up flag (if the UART in mute mode) + * @arg @ref UART_FLAG_SBKF Send Break flag + * @arg @ref UART_FLAG_CMF Character match flag + * @arg @ref UART_FLAG_BUSY Busy flag + * @arg @ref UART_FLAG_ABRF Auto Baud rate detection flag + * @arg @ref UART_FLAG_ABRE Auto Baud rate detection error flag + * @arg @ref UART_FLAG_CTS CTS Change flag + * @arg @ref UART_FLAG_LBDF LIN Break detection flag + * @arg @ref UART_FLAG_TXE Transmit data register empty flag + * @arg @ref UART_FLAG_TC Transmission Complete flag + * @arg @ref UART_FLAG_RXNE Receive data register not empty flag + * @arg @ref UART_FLAG_RTOF Receiver Timeout flag + * @arg @ref UART_FLAG_IDLE Idle Line detection flag + * @arg @ref UART_FLAG_ORE Overrun Error flag + * @arg @ref UART_FLAG_NE Noise Error flag + * @arg @ref UART_FLAG_FE Framing Error flag + * @arg @ref UART_FLAG_PE Parity Error flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_UART_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->ISR & (__FLAG__)) == (__FLAG__)) + +/** @brief Enable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to enable. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U)? ((__HANDLE__)->Instance->CR1 |= (1U << ((__INTERRUPT__) & UART_IT_MASK))): \ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U)? ((__HANDLE__)->Instance->CR2 |= (1U << ((__INTERRUPT__) & UART_IT_MASK))): \ + ((__HANDLE__)->Instance->CR3 |= (1U << ((__INTERRUPT__) & UART_IT_MASK)))) + + +/** @brief Disable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to disable. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U)? ((__HANDLE__)->Instance->CR1 &= ~ (1U << ((__INTERRUPT__) & UART_IT_MASK))): \ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U)? ((__HANDLE__)->Instance->CR2 &= ~ (1U << ((__INTERRUPT__) & UART_IT_MASK))): \ + ((__HANDLE__)->Instance->CR3 &= ~ (1U << ((__INTERRUPT__) & UART_IT_MASK)))) + +/** @brief Check whether the specified UART interrupt has occurred or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt to check. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_UART_GET_IT(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->ISR\ + & (1U << ((__INTERRUPT__)>> 8U))) != RESET) ? SET : RESET) + +/** @brief Check whether the specified UART interrupt source is enabled or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to check. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_UART_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U) ? (__HANDLE__)->Instance->CR1 : \ + (((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U) ? (__HANDLE__)->Instance->CR2 : \ + (__HANDLE__)->Instance->CR3)) & (1U << (((uint16_t)(__INTERRUPT__)) & UART_IT_MASK))) != RESET) ? SET : RESET) + +/** @brief Clear the specified UART ISR flag, in setting the proper ICR register flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __IT_CLEAR__ specifies the interrupt clear register flag that needs to be set + * to clear the corresponding interrupt + * This parameter can be one of the following values: + * @arg @ref UART_CLEAR_PEF Parity Error Clear Flag + * @arg @ref UART_CLEAR_FEF Framing Error Clear Flag + * @arg @ref UART_CLEAR_NEF Noise detected Clear Flag + * @arg @ref UART_CLEAR_OREF Overrun Error Clear Flag + * @arg @ref UART_CLEAR_IDLEF IDLE line detected Clear Flag + * @arg @ref UART_CLEAR_RTOF Receiver timeout clear flag + * @arg @ref UART_CLEAR_TCF Transmission Complete Clear Flag + * @arg @ref UART_CLEAR_LBDF LIN Break Detection Clear Flag + * @arg @ref UART_CLEAR_CTSF CTS Interrupt Clear Flag + * @arg @ref UART_CLEAR_CMF Character Match Clear Flag + #if defined(USART_CR1_UESM) + * @arg @ref UART_CLEAR_WUF Wake Up from stop mode Clear Flag +#endif + * @retval None + */ +#define __HAL_UART_CLEAR_IT(__HANDLE__, __IT_CLEAR__) ((__HANDLE__)->Instance->ICR = (uint32_t)(__IT_CLEAR__)) + +/** @brief Set a specific UART request flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __REQ__ specifies the request flag to set + * This parameter can be one of the following values: + * @arg @ref UART_AUTOBAUD_REQUEST Auto-Baud Rate Request + * @arg @ref UART_SENDBREAK_REQUEST Send Break Request + * @arg @ref UART_MUTE_MODE_REQUEST Mute Mode Request + * @arg @ref UART_RXDATA_FLUSH_REQUEST Receive Data flush Request + * @arg @ref UART_TXDATA_FLUSH_REQUEST Transmit data flush Request + * @retval None + */ +#define __HAL_UART_SEND_REQ(__HANDLE__, __REQ__) ((__HANDLE__)->Instance->RQR |= (uint16_t)(__REQ__)) + +/** @brief Enable the UART one bit sample method. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3|= USART_CR3_ONEBIT) + +/** @brief Disable the UART one bit sample method. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3 &= ~USART_CR3_ONEBIT) + +/** @brief Enable UART. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 |= USART_CR1_UE) + +/** @brief Disable UART. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 &= ~USART_CR1_UE) + +/** @brief Enable CTS flow control. + * @note This macro allows to enable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e. __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_ENABLE(__HANDLE__) \ + do{ \ + SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_CTSE; \ + } while(0U) + +/** @brief Disable CTS flow control. + * @note This macro allows to disable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e. __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_DISABLE(__HANDLE__) \ + do{ \ + CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_CTSE); \ + } while(0U) + +/** @brief Enable RTS flow control. + * @note This macro allows to enable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e. __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_ENABLE(__HANDLE__) \ + do{ \ + SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_RTSE; \ + } while(0U) + +/** @brief Disable RTS flow control. + * @note This macro allows to disable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled (i.e. __HAL_UART_DISABLE(__HANDLE__)) + * and should be followed by an Enable macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_DISABLE(__HANDLE__) \ + do{ \ + CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE);\ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_RTSE); \ + } while(0U) +/** + * @} + */ + +/* Private macros --------------------------------------------------------*/ +/** @defgroup UART_Private_Macros UART Private Macros + * @{ + */ + + +/** @brief BRR division operation to set BRR register in 8-bit oversampling mode. + * @param __PCLK__ UART clock. + * @param __BAUD__ Baud rate set by the user. + * @retval Division result + */ +#define UART_DIV_SAMPLING8(__PCLK__, __BAUD__) ((((__PCLK__)*2U) + ((__BAUD__)/2U)) / (__BAUD__)) + +/** @brief BRR division operation to set BRR register in 16-bit oversampling mode. + * @param __PCLK__ UART clock. + * @param __BAUD__ Baud rate set by the user. + * @retval Division result + */ +#define UART_DIV_SAMPLING16(__PCLK__, __BAUD__) (((__PCLK__) + ((__BAUD__)/2U)) / (__BAUD__)) + + +/** @brief Check UART Baud rate. + * @param __BAUDRATE__ Baudrate specified by the user. + * The maximum Baud Rate is derived from the maximum clock on F7 (i.e. 216 MHz) + * divided by the smallest oversampling used on the USART (i.e. 8) + * @retval SET (__BAUDRATE__ is valid) or RESET (__BAUDRATE__ is invalid) + */ +#define IS_UART_BAUDRATE(__BAUDRATE__) ((__BAUDRATE__) < 27000001U) + +/** @brief Check UART assertion time. + * @param __TIME__ 5-bit value assertion time. + * @retval Test result (TRUE or FALSE). + */ +#define IS_UART_ASSERTIONTIME(__TIME__) ((__TIME__) <= 0x1FU) + +/** @brief Check UART deassertion time. + * @param __TIME__ 5-bit value deassertion time. + * @retval Test result (TRUE or FALSE). + */ +#define IS_UART_DEASSERTIONTIME(__TIME__) ((__TIME__) <= 0x1FU) + +/** + * @brief Ensure that UART frame number of stop bits is valid. + * @param __STOPBITS__ UART frame number of stop bits. + * @retval SET (__STOPBITS__ is valid) or RESET (__STOPBITS__ is invalid) + */ +#define IS_UART_STOPBITS(__STOPBITS__) (((__STOPBITS__) == UART_STOPBITS_0_5) || \ + ((__STOPBITS__) == UART_STOPBITS_1) || \ + ((__STOPBITS__) == UART_STOPBITS_1_5) || \ + ((__STOPBITS__) == UART_STOPBITS_2)) + + +/** + * @brief Ensure that UART frame parity is valid. + * @param __PARITY__ UART frame parity. + * @retval SET (__PARITY__ is valid) or RESET (__PARITY__ is invalid) + */ +#define IS_UART_PARITY(__PARITY__) (((__PARITY__) == UART_PARITY_NONE) || \ + ((__PARITY__) == UART_PARITY_EVEN) || \ + ((__PARITY__) == UART_PARITY_ODD)) + +/** + * @brief Ensure that UART hardware flow control is valid. + * @param __CONTROL__ UART hardware flow control. + * @retval SET (__CONTROL__ is valid) or RESET (__CONTROL__ is invalid) + */ +#define IS_UART_HARDWARE_FLOW_CONTROL(__CONTROL__)\ + (((__CONTROL__) == UART_HWCONTROL_NONE) || \ + ((__CONTROL__) == UART_HWCONTROL_RTS) || \ + ((__CONTROL__) == UART_HWCONTROL_CTS) || \ + ((__CONTROL__) == UART_HWCONTROL_RTS_CTS)) + +/** + * @brief Ensure that UART communication mode is valid. + * @param __MODE__ UART communication mode. + * @retval SET (__MODE__ is valid) or RESET (__MODE__ is invalid) + */ +#define IS_UART_MODE(__MODE__) ((((__MODE__) & (~((uint32_t)(UART_MODE_TX_RX)))) == 0x00U) && ((__MODE__) != 0x00U)) + +/** + * @brief Ensure that UART state is valid. + * @param __STATE__ UART state. + * @retval SET (__STATE__ is valid) or RESET (__STATE__ is invalid) + */ +#define IS_UART_STATE(__STATE__) (((__STATE__) == UART_STATE_DISABLE) || \ + ((__STATE__) == UART_STATE_ENABLE)) + +/** + * @brief Ensure that UART oversampling is valid. + * @param __SAMPLING__ UART oversampling. + * @retval SET (__SAMPLING__ is valid) or RESET (__SAMPLING__ is invalid) + */ +#define IS_UART_OVERSAMPLING(__SAMPLING__) (((__SAMPLING__) == UART_OVERSAMPLING_16) || \ + ((__SAMPLING__) == UART_OVERSAMPLING_8)) + +/** + * @brief Ensure that UART frame sampling is valid. + * @param __ONEBIT__ UART frame sampling. + * @retval SET (__ONEBIT__ is valid) or RESET (__ONEBIT__ is invalid) + */ +#define IS_UART_ONE_BIT_SAMPLE(__ONEBIT__) (((__ONEBIT__) == UART_ONE_BIT_SAMPLE_DISABLE) || \ + ((__ONEBIT__) == UART_ONE_BIT_SAMPLE_ENABLE)) + +/** + * @brief Ensure that UART auto Baud rate detection mode is valid. + * @param __MODE__ UART auto Baud rate detection mode. + * @retval SET (__MODE__ is valid) or RESET (__MODE__ is invalid) + */ +#define IS_UART_ADVFEATURE_AUTOBAUDRATEMODE(__MODE__) (((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ONSTARTBIT) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ONFALLINGEDGE) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ON0X7FFRAME) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ON0X55FRAME)) + +/** + * @brief Ensure that UART receiver timeout setting is valid. + * @param __TIMEOUT__ UART receiver timeout setting. + * @retval SET (__TIMEOUT__ is valid) or RESET (__TIMEOUT__ is invalid) + */ +#define IS_UART_RECEIVER_TIMEOUT(__TIMEOUT__) (((__TIMEOUT__) == UART_RECEIVER_TIMEOUT_DISABLE) || \ + ((__TIMEOUT__) == UART_RECEIVER_TIMEOUT_ENABLE)) + +/** @brief Check the receiver timeout value. + * @note The maximum UART receiver timeout value is 0xFFFFFF. + * @param __TIMEOUTVALUE__ receiver timeout value. + * @retval Test result (TRUE or FALSE) + */ +#define IS_UART_RECEIVER_TIMEOUT_VALUE(__TIMEOUTVALUE__) ((__TIMEOUTVALUE__) <= 0xFFFFFFU) + +/** + * @brief Ensure that UART LIN state is valid. + * @param __LIN__ UART LIN state. + * @retval SET (__LIN__ is valid) or RESET (__LIN__ is invalid) + */ +#define IS_UART_LIN(__LIN__) (((__LIN__) == UART_LIN_DISABLE) || \ + ((__LIN__) == UART_LIN_ENABLE)) + +/** + * @brief Ensure that UART LIN break detection length is valid. + * @param __LENGTH__ UART LIN break detection length. + * @retval SET (__LENGTH__ is valid) or RESET (__LENGTH__ is invalid) + */ +#define IS_UART_LIN_BREAK_DETECT_LENGTH(__LENGTH__) (((__LENGTH__) == UART_LINBREAKDETECTLENGTH_10B) || \ + ((__LENGTH__) == UART_LINBREAKDETECTLENGTH_11B)) + +/** + * @brief Ensure that UART DMA TX state is valid. + * @param __DMATX__ UART DMA TX state. + * @retval SET (__DMATX__ is valid) or RESET (__DMATX__ is invalid) + */ +#define IS_UART_DMA_TX(__DMATX__) (((__DMATX__) == UART_DMA_TX_DISABLE) || \ + ((__DMATX__) == UART_DMA_TX_ENABLE)) + +/** + * @brief Ensure that UART DMA RX state is valid. + * @param __DMARX__ UART DMA RX state. + * @retval SET (__DMARX__ is valid) or RESET (__DMARX__ is invalid) + */ +#define IS_UART_DMA_RX(__DMARX__) (((__DMARX__) == UART_DMA_RX_DISABLE) || \ + ((__DMARX__) == UART_DMA_RX_ENABLE)) + +/** + * @brief Ensure that UART half-duplex state is valid. + * @param __HDSEL__ UART half-duplex state. + * @retval SET (__HDSEL__ is valid) or RESET (__HDSEL__ is invalid) + */ +#define IS_UART_HALF_DUPLEX(__HDSEL__) (((__HDSEL__) == UART_HALF_DUPLEX_DISABLE) || \ + ((__HDSEL__) == UART_HALF_DUPLEX_ENABLE)) + +/** + * @brief Ensure that UART wake-up method is valid. + * @param __WAKEUP__ UART wake-up method . + * @retval SET (__WAKEUP__ is valid) or RESET (__WAKEUP__ is invalid) + */ +#define IS_UART_WAKEUPMETHOD(__WAKEUP__) (((__WAKEUP__) == UART_WAKEUPMETHOD_IDLELINE) || \ + ((__WAKEUP__) == UART_WAKEUPMETHOD_ADDRESSMARK)) + +/** + * @brief Ensure that UART request parameter is valid. + * @param __PARAM__ UART request parameter. + * @retval SET (__PARAM__ is valid) or RESET (__PARAM__ is invalid) + */ +#define IS_UART_REQUEST_PARAMETER(__PARAM__) (((__PARAM__) == UART_AUTOBAUD_REQUEST) || \ + ((__PARAM__) == UART_SENDBREAK_REQUEST) || \ + ((__PARAM__) == UART_MUTE_MODE_REQUEST) || \ + ((__PARAM__) == UART_RXDATA_FLUSH_REQUEST) || \ + ((__PARAM__) == UART_TXDATA_FLUSH_REQUEST)) + +/** + * @brief Ensure that UART advanced features initialization is valid. + * @param __INIT__ UART advanced features initialization. + * @retval SET (__INIT__ is valid) or RESET (__INIT__ is invalid) + */ +#define IS_UART_ADVFEATURE_INIT(__INIT__) ((__INIT__) <= (UART_ADVFEATURE_NO_INIT | \ + UART_ADVFEATURE_TXINVERT_INIT | \ + UART_ADVFEATURE_RXINVERT_INIT | \ + UART_ADVFEATURE_DATAINVERT_INIT | \ + UART_ADVFEATURE_SWAP_INIT | \ + UART_ADVFEATURE_RXOVERRUNDISABLE_INIT | \ + UART_ADVFEATURE_DMADISABLEONERROR_INIT | \ + UART_ADVFEATURE_AUTOBAUDRATE_INIT | \ + UART_ADVFEATURE_MSBFIRST_INIT)) + +/** + * @brief Ensure that UART frame TX inversion setting is valid. + * @param __TXINV__ UART frame TX inversion setting. + * @retval SET (__TXINV__ is valid) or RESET (__TXINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_TXINV(__TXINV__) (((__TXINV__) == UART_ADVFEATURE_TXINV_DISABLE) || \ + ((__TXINV__) == UART_ADVFEATURE_TXINV_ENABLE)) + +/** + * @brief Ensure that UART frame RX inversion setting is valid. + * @param __RXINV__ UART frame RX inversion setting. + * @retval SET (__RXINV__ is valid) or RESET (__RXINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_RXINV(__RXINV__) (((__RXINV__) == UART_ADVFEATURE_RXINV_DISABLE) || \ + ((__RXINV__) == UART_ADVFEATURE_RXINV_ENABLE)) + +/** + * @brief Ensure that UART frame data inversion setting is valid. + * @param __DATAINV__ UART frame data inversion setting. + * @retval SET (__DATAINV__ is valid) or RESET (__DATAINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_DATAINV(__DATAINV__) (((__DATAINV__) == UART_ADVFEATURE_DATAINV_DISABLE) || \ + ((__DATAINV__) == UART_ADVFEATURE_DATAINV_ENABLE)) + +/** + * @brief Ensure that UART frame RX/TX pins swap setting is valid. + * @param __SWAP__ UART frame RX/TX pins swap setting. + * @retval SET (__SWAP__ is valid) or RESET (__SWAP__ is invalid) + */ +#define IS_UART_ADVFEATURE_SWAP(__SWAP__) (((__SWAP__) == UART_ADVFEATURE_SWAP_DISABLE) || \ + ((__SWAP__) == UART_ADVFEATURE_SWAP_ENABLE)) + +/** + * @brief Ensure that UART frame overrun setting is valid. + * @param __OVERRUN__ UART frame overrun setting. + * @retval SET (__OVERRUN__ is valid) or RESET (__OVERRUN__ is invalid) + */ +#define IS_UART_OVERRUN(__OVERRUN__) (((__OVERRUN__) == UART_ADVFEATURE_OVERRUN_ENABLE) || \ + ((__OVERRUN__) == UART_ADVFEATURE_OVERRUN_DISABLE)) + +/** + * @brief Ensure that UART auto Baud rate state is valid. + * @param __AUTOBAUDRATE__ UART auto Baud rate state. + * @retval SET (__AUTOBAUDRATE__ is valid) or RESET (__AUTOBAUDRATE__ is invalid) + */ +#define IS_UART_ADVFEATURE_AUTOBAUDRATE(__AUTOBAUDRATE__) (((__AUTOBAUDRATE__) == UART_ADVFEATURE_AUTOBAUDRATE_DISABLE) || \ + ((__AUTOBAUDRATE__) == UART_ADVFEATURE_AUTOBAUDRATE_ENABLE)) + +/** + * @brief Ensure that UART DMA enabling or disabling on error setting is valid. + * @param __DMA__ UART DMA enabling or disabling on error setting. + * @retval SET (__DMA__ is valid) or RESET (__DMA__ is invalid) + */ +#define IS_UART_ADVFEATURE_DMAONRXERROR(__DMA__) (((__DMA__) == UART_ADVFEATURE_DMA_ENABLEONRXERROR) || \ + ((__DMA__) == UART_ADVFEATURE_DMA_DISABLEONRXERROR)) + +/** + * @brief Ensure that UART frame MSB first setting is valid. + * @param __MSBFIRST__ UART frame MSB first setting. + * @retval SET (__MSBFIRST__ is valid) or RESET (__MSBFIRST__ is invalid) + */ +#define IS_UART_ADVFEATURE_MSBFIRST(__MSBFIRST__) (((__MSBFIRST__) == UART_ADVFEATURE_MSBFIRST_DISABLE) || \ + ((__MSBFIRST__) == UART_ADVFEATURE_MSBFIRST_ENABLE)) + +#if defined(USART_CR1_UESM) +/** + * @brief Ensure that UART stop mode state is valid. + * @param __STOPMODE__ UART stop mode state. + * @retval SET (__STOPMODE__ is valid) or RESET (__STOPMODE__ is invalid) + */ +#define IS_UART_ADVFEATURE_STOPMODE(__STOPMODE__) (((__STOPMODE__) == UART_ADVFEATURE_STOPMODE_DISABLE) || \ + ((__STOPMODE__) == UART_ADVFEATURE_STOPMODE_ENABLE)) + +#endif /* USART_CR1_UESM */ +/** + * @brief Ensure that UART mute mode state is valid. + * @param __MUTE__ UART mute mode state. + * @retval SET (__MUTE__ is valid) or RESET (__MUTE__ is invalid) + */ +#define IS_UART_MUTE_MODE(__MUTE__) (((__MUTE__) == UART_ADVFEATURE_MUTEMODE_DISABLE) || \ + ((__MUTE__) == UART_ADVFEATURE_MUTEMODE_ENABLE)) +#if defined(USART_CR1_UESM) + +/** + * @brief Ensure that UART wake-up selection is valid. + * @param __WAKE__ UART wake-up selection. + * @retval SET (__WAKE__ is valid) or RESET (__WAKE__ is invalid) + */ +#define IS_UART_WAKEUP_SELECTION(__WAKE__) (((__WAKE__) == UART_WAKEUP_ON_ADDRESS) || \ + ((__WAKE__) == UART_WAKEUP_ON_STARTBIT) || \ + ((__WAKE__) == UART_WAKEUP_ON_READDATA_NONEMPTY)) +#endif /* USART_CR1_UESM */ + +/** + * @brief Ensure that UART driver enable polarity is valid. + * @param __POLARITY__ UART driver enable polarity. + * @retval SET (__POLARITY__ is valid) or RESET (__POLARITY__ is invalid) + */ +#define IS_UART_DE_POLARITY(__POLARITY__) (((__POLARITY__) == UART_DE_POLARITY_HIGH) || \ + ((__POLARITY__) == UART_DE_POLARITY_LOW)) + + +/** + * @} + */ + +/* Include UART HAL Extended module */ +#include "stm32f7xx_hal_uart_ex.h" + + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup UART_Exported_Functions UART Exported Functions + * @{ + */ + +/** @addtogroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength); +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod); +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart); +void HAL_UART_MspInit(UART_HandleTypeDef *huart); +void HAL_UART_MspDeInit(UART_HandleTypeDef *huart); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group2 IO operation functions + * @{ + */ + +/* IO operation functions *****************************************************/ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart); +/* Transfer Abort functions */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart); + +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart); +void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ + +/* Peripheral Control functions ************************************************/ +void HAL_UART_ReceiverTimeout_Config(UART_HandleTypeDef *huart, uint32_t TimeoutValue); +HAL_StatusTypeDef HAL_UART_EnableReceiverTimeout(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DisableReceiverTimeout(UART_HandleTypeDef *huart); + +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_EnableMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_DisableMuteMode(UART_HandleTypeDef *huart); +void HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group4 Peripheral State and Error functions + * @{ + */ + +/* Peripheral State and Errors functions **************************************************/ +HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart); +uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions -----------------------------------------------------------*/ +/** @addtogroup UART_Private_Functions UART Private Functions + * @{ + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +HAL_StatusTypeDef UART_SetConfig(UART_HandleTypeDef *huart); +HAL_StatusTypeDef UART_CheckIdleState(UART_HandleTypeDef *huart); +HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout); +void UART_AdvFeatureConfig(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_UART_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h new file mode 100644 index 000000000..e99d8d684 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h @@ -0,0 +1,425 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart_ex.h + * @author MCD Application Team + * @brief Header file of UART HAL Extended module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_UART_EX_H +#define STM32F7xx_HAL_UART_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup UARTEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UARTEx_Exported_Types UARTEx Exported Types + * @{ + */ + +#if defined(USART_CR1_UESM) +/** + * @brief UART wake up from stop mode parameters + */ +typedef struct +{ + uint32_t WakeUpEvent; /*!< Specifies which event will activate the Wakeup from Stop mode flag (WUF). + This parameter can be a value of @ref UART_WakeUp_from_Stop_Selection. + If set to UART_WAKEUP_ON_ADDRESS, the two other fields below must + be filled up. */ + + uint16_t AddressLength; /*!< Specifies whether the address is 4 or 7-bit long. + This parameter can be a value of @ref UARTEx_WakeUp_Address_Length. */ + + uint8_t Address; /*!< UART/USART node address (7-bit long max). */ +} UART_WakeUpTypeDef; + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UARTEx_Exported_Constants UARTEx Exported Constants + * @{ + */ + +/** @defgroup UARTEx_Word_Length UARTEx Word Length + * @{ + */ +#define UART_WORDLENGTH_7B USART_CR1_M1 /*!< 7-bit long UART frame */ +#define UART_WORDLENGTH_8B 0x00000000U /*!< 8-bit long UART frame */ +#define UART_WORDLENGTH_9B USART_CR1_M0 /*!< 9-bit long UART frame */ +/** + * @} + */ + +/** @defgroup UARTEx_WakeUp_Address_Length UARTEx WakeUp Address Length + * @{ + */ +#define UART_ADDRESS_DETECT_4B 0x00000000U /*!< 4-bit long wake-up address */ +#define UART_ADDRESS_DETECT_7B USART_CR2_ADDM7 /*!< 7-bit long wake-up address */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup UARTEx_Exported_Functions + * @{ + */ + +/** @addtogroup UARTEx_Exported_Functions_Group1 + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_RS485Ex_Init(UART_HandleTypeDef *huart, uint32_t Polarity, uint32_t AssertionTime, + uint32_t DeassertionTime); + +/** + * @} + */ + +/** @addtogroup UARTEx_Exported_Functions_Group2 + * @{ + */ + +#if defined(USART_CR1_UESM) +void HAL_UARTEx_WakeupCallback(UART_HandleTypeDef *huart); + +#endif /* USART_CR1_UESM */ + +/** + * @} + */ + +/** @addtogroup UARTEx_Exported_Functions_Group3 + * @{ + */ + +/* Peripheral Control functions **********************************************/ +#if defined(USART_CR1_UESM) +HAL_StatusTypeDef HAL_UARTEx_StopModeWakeUpSourceConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection); +HAL_StatusTypeDef HAL_UARTEx_EnableStopMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UARTEx_DisableStopMode(UART_HandleTypeDef *huart); + +#endif/* USART_CR1_UESM */ +#if defined(USART_CR3_UCESM) +HAL_StatusTypeDef HAL_UARTEx_EnableClockStopMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UARTEx_DisableClockStopMode(UART_HandleTypeDef *huart); + +#endif /* USART_CR3_UCESM */ +HAL_StatusTypeDef HAL_MultiProcessorEx_AddressLength_Set(UART_HandleTypeDef *huart, uint32_t AddressLength); + + +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup UARTEx_Private_Macros UARTEx Private Macros + * @{ + */ + +/** @brief Report the UART clock source. + * @param __HANDLE__ specifies the UART Handle. + * @param __CLOCKSOURCE__ output variable. + * @retval UART clocking source, written in __CLOCKSOURCE__. + */ +#define UART_GETCLOCKSOURCE(__HANDLE__,__CLOCKSOURCE__) \ + do { \ + if((__HANDLE__)->Instance == USART1) \ + { \ + switch(__HAL_RCC_GET_USART1_SOURCE()) \ + { \ + case RCC_USART1CLKSOURCE_PCLK2: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK2; \ + break; \ + case RCC_USART1CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART1CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART1CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART2) \ + { \ + switch(__HAL_RCC_GET_USART2_SOURCE()) \ + { \ + case RCC_USART2CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_USART2CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART2CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART2CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART3) \ + { \ + switch(__HAL_RCC_GET_USART3_SOURCE()) \ + { \ + case RCC_USART3CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_USART3CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART3CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART3CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == UART4) \ + { \ + switch(__HAL_RCC_GET_UART4_SOURCE()) \ + { \ + case RCC_UART4CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART4CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART4CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART4CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART5) \ + { \ + switch(__HAL_RCC_GET_UART5_SOURCE()) \ + { \ + case RCC_UART5CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART5CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART5CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART5CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART6) \ + { \ + switch(__HAL_RCC_GET_USART6_SOURCE()) \ + { \ + case RCC_USART6CLKSOURCE_PCLK2: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK2; \ + break; \ + case RCC_USART6CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART6CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART6CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART7) \ + { \ + switch(__HAL_RCC_GET_UART7_SOURCE()) \ + { \ + case RCC_UART7CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART7CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART7CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART7CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART8) \ + { \ + switch(__HAL_RCC_GET_UART8_SOURCE()) \ + { \ + case RCC_UART8CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART8CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART8CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART8CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else \ + { \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + } \ + } while(0U) + +/** @brief Report the UART mask to apply to retrieve the received data + * according to the word length and to the parity bits activation. + * @note If PCE = 1, the parity bit is not included in the data extracted + * by the reception API(). + * This masking operation is not carried out in the case of + * DMA transfers. + * @param __HANDLE__ specifies the UART Handle. + * @retval None, the mask to apply to UART RDR register is stored in (__HANDLE__)->Mask field. + */ +#define UART_MASK_COMPUTATION(__HANDLE__) \ + do { \ + if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_9B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x01FFU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x00FFU ; \ + } \ + } \ + else if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_8B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x00FFU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x007FU ; \ + } \ + } \ + else if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_7B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x007FU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x003FU ; \ + } \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x0000U; \ + } \ + } while(0U) + +/** + * @brief Ensure that UART frame length is valid. + * @param __LENGTH__ UART frame length. + * @retval SET (__LENGTH__ is valid) or RESET (__LENGTH__ is invalid) + */ +#define IS_UART_WORD_LENGTH(__LENGTH__) (((__LENGTH__) == UART_WORDLENGTH_7B) || \ + ((__LENGTH__) == UART_WORDLENGTH_8B) || \ + ((__LENGTH__) == UART_WORDLENGTH_9B)) + +/** + * @brief Ensure that UART wake-up address length is valid. + * @param __ADDRESS__ UART wake-up address length. + * @retval SET (__ADDRESS__ is valid) or RESET (__ADDRESS__ is invalid) + */ +#define IS_UART_ADDRESSLENGTH_DETECT(__ADDRESS__) (((__ADDRESS__) == UART_ADDRESS_DETECT_4B) || \ + ((__ADDRESS__) == UART_ADDRESS_DETECT_7B)) + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_UART_EX_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_fmc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_fmc.h new file mode 100644 index 000000000..11bf9b151 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_fmc.h @@ -0,0 +1,1321 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_fmc.h + * @author MCD Application Team + * @brief Header file of FMC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_LL_FMC_H +#define __STM32F7xx_LL_FMC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup FMC_LL + * @{ + */ + +/** @addtogroup FMC_LL_Private_Macros + * @{ + */ +#define IS_FMC_NORSRAM_BANK(BANK) (((BANK) == FMC_NORSRAM_BANK1) || \ + ((BANK) == FMC_NORSRAM_BANK2) || \ + ((BANK) == FMC_NORSRAM_BANK3) || \ + ((BANK) == FMC_NORSRAM_BANK4)) + +#define IS_FMC_MUX(__MUX__) (((__MUX__) == FMC_DATA_ADDRESS_MUX_DISABLE) || \ + ((__MUX__) == FMC_DATA_ADDRESS_MUX_ENABLE)) + +#define IS_FMC_MEMORY(__MEMORY__) (((__MEMORY__) == FMC_MEMORY_TYPE_SRAM) || \ + ((__MEMORY__) == FMC_MEMORY_TYPE_PSRAM)|| \ + ((__MEMORY__) == FMC_MEMORY_TYPE_NOR)) + +#define IS_FMC_NORSRAM_MEMORY_WIDTH(__WIDTH__) (((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_8) || \ + ((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_16) || \ + ((__WIDTH__) == FMC_NORSRAM_MEM_BUS_WIDTH_32)) + +#define IS_FMC_ACCESS_MODE(__MODE__) (((__MODE__) == FMC_ACCESS_MODE_A) || \ + ((__MODE__) == FMC_ACCESS_MODE_B) || \ + ((__MODE__) == FMC_ACCESS_MODE_C) || \ + ((__MODE__) == FMC_ACCESS_MODE_D)) + +#define IS_FMC_NAND_BANK(BANK) ((BANK) == FMC_NAND_BANK3) + +#define IS_FMC_WAIT_FEATURE(FEATURE) (((FEATURE) == FMC_NAND_WAIT_FEATURE_DISABLE) || \ + ((FEATURE) == FMC_NAND_WAIT_FEATURE_ENABLE)) + +#define IS_FMC_NAND_MEMORY_WIDTH(WIDTH) (((WIDTH) == FMC_NAND_MEM_BUS_WIDTH_8) || \ + ((WIDTH) == FMC_NAND_MEM_BUS_WIDTH_16)) + +#define IS_FMC_ECC_STATE(STATE) (((STATE) == FMC_NAND_ECC_DISABLE) || \ + ((STATE) == FMC_NAND_ECC_ENABLE)) + +#define IS_FMC_ECCPAGE_SIZE(SIZE) (((SIZE) == FMC_NAND_ECC_PAGE_SIZE_256BYTE) || \ + ((SIZE) == FMC_NAND_ECC_PAGE_SIZE_512BYTE) || \ + ((SIZE) == FMC_NAND_ECC_PAGE_SIZE_1024BYTE) || \ + ((SIZE) == FMC_NAND_ECC_PAGE_SIZE_2048BYTE) || \ + ((SIZE) == FMC_NAND_ECC_PAGE_SIZE_4096BYTE) || \ + ((SIZE) == FMC_NAND_ECC_PAGE_SIZE_8192BYTE)) + +#define IS_FMC_SDMEMORY_WIDTH(WIDTH) (((WIDTH) == FMC_SDRAM_MEM_BUS_WIDTH_8) || \ + ((WIDTH) == FMC_SDRAM_MEM_BUS_WIDTH_16) || \ + ((WIDTH) == FMC_SDRAM_MEM_BUS_WIDTH_32)) + +#define IS_FMC_WRITE_PROTECTION(__WRITE__) (((__WRITE__) == FMC_SDRAM_WRITE_PROTECTION_DISABLE) || \ + ((__WRITE__) == FMC_SDRAM_WRITE_PROTECTION_ENABLE)) + +#define IS_FMC_SDCLOCK_PERIOD(__PERIOD__) (((__PERIOD__) == FMC_SDRAM_CLOCK_DISABLE) || \ + ((__PERIOD__) == FMC_SDRAM_CLOCK_PERIOD_2) || \ + ((__PERIOD__) == FMC_SDRAM_CLOCK_PERIOD_3)) + +#define IS_FMC_READ_BURST(__RBURST__) (((__RBURST__) == FMC_SDRAM_RBURST_DISABLE) || \ + ((__RBURST__) == FMC_SDRAM_RBURST_ENABLE)) + +#define IS_FMC_READPIPE_DELAY(__DELAY__) (((__DELAY__) == FMC_SDRAM_RPIPE_DELAY_0) || \ + ((__DELAY__) == FMC_SDRAM_RPIPE_DELAY_1) || \ + ((__DELAY__) == FMC_SDRAM_RPIPE_DELAY_2)) + +#define IS_FMC_COMMAND_MODE(__COMMAND__) (((__COMMAND__) == FMC_SDRAM_CMD_NORMAL_MODE) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_CLK_ENABLE) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_PALL) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_AUTOREFRESH_MODE) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_LOAD_MODE) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_SELFREFRESH_MODE) || \ + ((__COMMAND__) == FMC_SDRAM_CMD_POWERDOWN_MODE)) + +#define IS_FMC_COMMAND_TARGET(__TARGET__) (((__TARGET__) == FMC_SDRAM_CMD_TARGET_BANK1) || \ + ((__TARGET__) == FMC_SDRAM_CMD_TARGET_BANK2) || \ + ((__TARGET__) == FMC_SDRAM_CMD_TARGET_BANK1_2)) + +/** @defgroup FMC_TCLR_Setup_Time FMC TCLR Setup Time + * @{ + */ +#define IS_FMC_TCLR_TIME(__TIME__) ((__TIME__) <= 255) +/** + * @} + */ + +/** @defgroup FMC_TAR_Setup_Time FMC TAR Setup Time + * @{ + */ +#define IS_FMC_TAR_TIME(TIME) ((TIME) <= 255) +/** + * @} + */ + +/** @defgroup FMC_Setup_Time FMC Setup Time + * @{ + */ +#define IS_FMC_SETUP_TIME(TIME) ((TIME) <= 254) +/** + * @} + */ + +/** @defgroup FMC_Wait_Setup_Time FMC Wait Setup Time + * @{ + */ +#define IS_FMC_WAIT_TIME(TIME) ((TIME) <= 254) +/** + * @} + */ + +/** @defgroup FMC_Hold_Setup_Time FMC Hold Setup Time + * @{ + */ +#define IS_FMC_HOLD_TIME(TIME) ((TIME) <= 254) +/** + * @} + */ + +/** @defgroup FMC_HiZ_Setup_Time FMC HiZ Setup Time + * @{ + */ +#define IS_FMC_HIZ_TIME(TIME) ((TIME) <= 254) +/** + * @} + */ + +#define IS_FMC_BURSTMODE(__STATE__) (((__STATE__) == FMC_BURST_ACCESS_MODE_DISABLE) || \ + ((__STATE__) == FMC_BURST_ACCESS_MODE_ENABLE)) + +#define IS_FMC_WAIT_POLARITY(__POLARITY__) (((__POLARITY__) == FMC_WAIT_SIGNAL_POLARITY_LOW) || \ + ((__POLARITY__) == FMC_WAIT_SIGNAL_POLARITY_HIGH)) + +#define IS_FMC_WAIT_SIGNAL_ACTIVE(__ACTIVE__) (((__ACTIVE__) == FMC_WAIT_TIMING_BEFORE_WS) || \ + ((__ACTIVE__) == FMC_WAIT_TIMING_DURING_WS)) + +#define IS_FMC_WRITE_OPERATION(__OPERATION__) (((__OPERATION__) == FMC_WRITE_OPERATION_DISABLE) || \ + ((__OPERATION__) == FMC_WRITE_OPERATION_ENABLE)) + +#define IS_FMC_WAITE_SIGNAL(__SIGNAL__) (((__SIGNAL__) == FMC_WAIT_SIGNAL_DISABLE) || \ + ((__SIGNAL__) == FMC_WAIT_SIGNAL_ENABLE)) + +#define IS_FMC_EXTENDED_MODE(__MODE__) (((__MODE__) == FMC_EXTENDED_MODE_DISABLE) || \ + ((__MODE__) == FMC_EXTENDED_MODE_ENABLE)) + +#define IS_FMC_ASYNWAIT(__STATE__) (((__STATE__) == FMC_ASYNCHRONOUS_WAIT_DISABLE) || \ + ((__STATE__) == FMC_ASYNCHRONOUS_WAIT_ENABLE)) + +/** @defgroup FMC_Data_Latency FMC Data Latency + * @{ + */ +#define IS_FMC_DATA_LATENCY(__LATENCY__) (((__LATENCY__) > 1) && ((__LATENCY__) <= 17)) +/** + * @} + */ + +#define IS_FMC_WRITE_BURST(__BURST__) (((__BURST__) == FMC_WRITE_BURST_DISABLE) || \ + ((__BURST__) == FMC_WRITE_BURST_ENABLE)) + +#define IS_FMC_CONTINOUS_CLOCK(CCLOCK) (((CCLOCK) == FMC_CONTINUOUS_CLOCK_SYNC_ONLY) || \ + ((CCLOCK) == FMC_CONTINUOUS_CLOCK_SYNC_ASYNC)) + + +/** @defgroup FMC_Address_Setup_Time FMC Address Setup Time + * @{ + */ +#define IS_FMC_ADDRESS_SETUP_TIME(__TIME__) ((__TIME__) <= 15) +/** + * @} + */ + +/** @defgroup FMC_Address_Hold_Time FMC Address Hold Time + * @{ + */ +#define IS_FMC_ADDRESS_HOLD_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 15)) +/** + * @} + */ + +/** @defgroup FMC_Data_Setup_Time FMC Data Setup Time + * @{ + */ +#define IS_FMC_DATASETUP_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 255)) +/** + * @} + */ + +/** @defgroup FMC_Bus_Turn_around_Duration FMC Bus Turn around Duration + * @{ + */ +#define IS_FMC_TURNAROUND_TIME(__TIME__) ((__TIME__) <= 15) +/** + * @} + */ + +/** @defgroup FMC_CLK_Division FMC CLK Division + * @{ + */ +#define IS_FMC_CLK_DIV(DIV) (((DIV) > 1) && ((DIV) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_LoadToActive_Delay FMC SDRAM LoadToActive Delay + * @{ + */ +#define IS_FMC_LOADTOACTIVE_DELAY(__DELAY__) (((__DELAY__) > 0) && ((__DELAY__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_ExitSelfRefresh_Delay FMC SDRAM ExitSelfRefresh Delay + * @{ + */ +#define IS_FMC_EXITSELFREFRESH_DELAY(__DELAY__) (((__DELAY__) > 0) && ((__DELAY__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_SelfRefresh_Time FMC SDRAM SelfRefresh Time + * @{ + */ +#define IS_FMC_SELFREFRESH_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_RowCycle_Delay FMC SDRAM RowCycle Delay + * @{ + */ +#define IS_FMC_ROWCYCLE_DELAY(__DELAY__) (((__DELAY__) > 0) && ((__DELAY__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Write_Recovery_Time FMC SDRAM Write Recovery Time + * @{ + */ +#define IS_FMC_WRITE_RECOVERY_TIME(__TIME__) (((__TIME__) > 0) && ((__TIME__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_RP_Delay FMC SDRAM RP Delay + * @{ + */ +#define IS_FMC_RP_DELAY(__DELAY__) (((__DELAY__) > 0) && ((__DELAY__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_RCD_Delay FMC SDRAM RCD Delay + * @{ + */ +#define IS_FMC_RCD_DELAY(__DELAY__) (((__DELAY__) > 0) && ((__DELAY__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_AutoRefresh_Number FMC SDRAM AutoRefresh Number + * @{ + */ +#define IS_FMC_AUTOREFRESH_NUMBER(__NUMBER__) (((__NUMBER__) > 0) && ((__NUMBER__) <= 16)) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_ModeRegister_Definition FMC SDRAM ModeRegister Definition + * @{ + */ +#define IS_FMC_MODE_REGISTER(__CONTENT__) ((__CONTENT__) <= 8191) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Refresh_rate FMC SDRAM Refresh rate + * @{ + */ +#define IS_FMC_REFRESH_RATE(__RATE__) ((__RATE__) <= 8191) +/** + * @} + */ + +/** @defgroup FMC_NORSRAM_Device_Instance FMC NORSRAM Device Instance + * @{ + */ +#define IS_FMC_NORSRAM_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NORSRAM_DEVICE) +/** + * @} + */ + +/** @defgroup FMC_NORSRAM_EXTENDED_Device_Instance FMC NORSRAM EXTENDED Device Instance + * @{ + */ +#define IS_FMC_NORSRAM_EXTENDED_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NORSRAM_EXTENDED_DEVICE) +/** + * @} + */ + +/** @defgroup FMC_NAND_Device_Instance FMC NAND Device Instance + * @{ + */ +#define IS_FMC_NAND_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_NAND_DEVICE) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Device_Instance FMC SDRAM Device Instance + * @{ + */ +#define IS_FMC_SDRAM_DEVICE(__INSTANCE__) ((__INSTANCE__) == FMC_SDRAM_DEVICE) +/** + * @} + */ + +#define IS_FMC_SDRAM_BANK(BANK) (((BANK) == FMC_SDRAM_BANK1) || \ + ((BANK) == FMC_SDRAM_BANK2)) + +#define IS_FMC_COLUMNBITS_NUMBER(COLUMN) (((COLUMN) == FMC_SDRAM_COLUMN_BITS_NUM_8) || \ + ((COLUMN) == FMC_SDRAM_COLUMN_BITS_NUM_9) || \ + ((COLUMN) == FMC_SDRAM_COLUMN_BITS_NUM_10) || \ + ((COLUMN) == FMC_SDRAM_COLUMN_BITS_NUM_11)) + +#define IS_FMC_ROWBITS_NUMBER(ROW) (((ROW) == FMC_SDRAM_ROW_BITS_NUM_11) || \ + ((ROW) == FMC_SDRAM_ROW_BITS_NUM_12) || \ + ((ROW) == FMC_SDRAM_ROW_BITS_NUM_13)) + +#define IS_FMC_INTERNALBANK_NUMBER(NUMBER) (((NUMBER) == FMC_SDRAM_INTERN_BANKS_NUM_2) || \ + ((NUMBER) == FMC_SDRAM_INTERN_BANKS_NUM_4)) + + +#define IS_FMC_CAS_LATENCY(LATENCY) (((LATENCY) == FMC_SDRAM_CAS_LATENCY_1) || \ + ((LATENCY) == FMC_SDRAM_CAS_LATENCY_2) || \ + ((LATENCY) == FMC_SDRAM_CAS_LATENCY_3)) + +#define IS_FMC_PAGESIZE(__SIZE__) (((__SIZE__) == FMC_PAGE_SIZE_NONE) || \ + ((__SIZE__) == FMC_PAGE_SIZE_128) || \ + ((__SIZE__) == FMC_PAGE_SIZE_256) || \ + ((__SIZE__) == FMC_PAGE_SIZE_512) || \ + ((__SIZE__) == FMC_PAGE_SIZE_1024)) + +#define IS_FMC_WRITE_FIFO(__FIFO__) (((__FIFO__) == FMC_WRITE_FIFO_DISABLE) || \ + ((__FIFO__) == FMC_WRITE_FIFO_ENABLE)) +/** + * @} + */ + +/* Exported typedef ----------------------------------------------------------*/ +/** @defgroup FMC_Exported_typedef FMC Low Layer Exported Types + * @{ + */ +#define FMC_NORSRAM_TypeDef FMC_Bank1_TypeDef +#define FMC_NORSRAM_EXTENDED_TypeDef FMC_Bank1E_TypeDef +#define FMC_NAND_TypeDef FMC_Bank3_TypeDef +#define FMC_SDRAM_TypeDef FMC_Bank5_6_TypeDef + +#define FMC_NORSRAM_DEVICE FMC_Bank1 +#define FMC_NORSRAM_EXTENDED_DEVICE FMC_Bank1E +#define FMC_NAND_DEVICE FMC_Bank3 +#define FMC_SDRAM_DEVICE FMC_Bank5_6 + +/** + * @brief FMC NORSRAM Configuration Structure definition + */ +typedef struct +{ + uint32_t NSBank; /*!< Specifies the NORSRAM memory device that will be used. + This parameter can be a value of @ref FMC_NORSRAM_Bank */ + + uint32_t DataAddressMux; /*!< Specifies whether the address and data values are + multiplexed on the data bus or not. + This parameter can be a value of @ref FMC_Data_Address_Bus_Multiplexing */ + + uint32_t MemoryType; /*!< Specifies the type of external memory attached to + the corresponding memory device. + This parameter can be a value of @ref FMC_Memory_Type */ + + uint32_t MemoryDataWidth; /*!< Specifies the external memory device width. + This parameter can be a value of @ref FMC_NORSRAM_Data_Width */ + + uint32_t BurstAccessMode; /*!< Enables or disables the burst access mode for Flash memory, + valid only with synchronous burst Flash memories. + This parameter can be a value of @ref FMC_Burst_Access_Mode */ + + uint32_t WaitSignalPolarity; /*!< Specifies the wait signal polarity, valid only when accessing + the Flash memory in burst mode. + This parameter can be a value of @ref FMC_Wait_Signal_Polarity */ + + uint32_t WaitSignalActive; /*!< Specifies if the wait signal is asserted by the memory one + clock cycle before the wait state or during the wait state, + valid only when accessing memories in burst mode. + This parameter can be a value of @ref FMC_Wait_Timing */ + + uint32_t WriteOperation; /*!< Enables or disables the write operation in the selected device by the FMC. + This parameter can be a value of @ref FMC_Write_Operation */ + + uint32_t WaitSignal; /*!< Enables or disables the wait state insertion via wait + signal, valid for Flash memory access in burst mode. + This parameter can be a value of @ref FMC_Wait_Signal */ + + uint32_t ExtendedMode; /*!< Enables or disables the extended mode. + This parameter can be a value of @ref FMC_Extended_Mode */ + + uint32_t AsynchronousWait; /*!< Enables or disables wait signal during asynchronous transfers, + valid only with asynchronous Flash memories. + This parameter can be a value of @ref FMC_AsynchronousWait */ + + uint32_t WriteBurst; /*!< Enables or disables the write burst operation. + This parameter can be a value of @ref FMC_Write_Burst */ + + uint32_t ContinuousClock; /*!< Enables or disables the FMC clock output to external memory devices. + This parameter is only enabled through the FMC_BCR1 register, and don't care + through FMC_BCR2..4 registers. + This parameter can be a value of @ref FMC_Continous_Clock */ + + uint32_t WriteFifo; /*!< Enables or disables the write FIFO used by the FMC controller. + This parameter is only enabled through the FMC_BCR1 register, and don't care + through FMC_BCR2..4 registers. + This parameter can be a value of @ref FMC_Write_FIFO */ + + uint32_t PageSize; /*!< Specifies the memory page size. + This parameter can be a value of @ref FMC_Page_Size */ + +}FMC_NORSRAM_InitTypeDef; + +/** + * @brief FMC NORSRAM Timing parameters structure definition + */ +typedef struct +{ + uint32_t AddressSetupTime; /*!< Defines the number of HCLK cycles to configure + the duration of the address setup time. + This parameter can be a value between Min_Data = 0 and Max_Data = 15. + @note This parameter is not used with synchronous NOR Flash memories. */ + + uint32_t AddressHoldTime; /*!< Defines the number of HCLK cycles to configure + the duration of the address hold time. + This parameter can be a value between Min_Data = 1 and Max_Data = 15. + @note This parameter is not used with synchronous NOR Flash memories. */ + + uint32_t DataSetupTime; /*!< Defines the number of HCLK cycles to configure + the duration of the data setup time. + This parameter can be a value between Min_Data = 1 and Max_Data = 255. + @note This parameter is used for SRAMs, ROMs and asynchronous multiplexed + NOR Flash memories. */ + + uint32_t BusTurnAroundDuration; /*!< Defines the number of HCLK cycles to configure + the duration of the bus turnaround. + This parameter can be a value between Min_Data = 0 and Max_Data = 15. + @note This parameter is only used for multiplexed NOR Flash memories. */ + + uint32_t CLKDivision; /*!< Defines the period of CLK clock output signal, expressed in number of + HCLK cycles. This parameter can be a value between Min_Data = 2 and Max_Data = 16. + @note This parameter is not used for asynchronous NOR Flash, SRAM or ROM + accesses. */ + + uint32_t DataLatency; /*!< Defines the number of memory clock cycles to issue + to the memory before getting the first data. + The parameter value depends on the memory type as shown below: + - It must be set to 0 in case of a CRAM + - It is don't care in asynchronous NOR, SRAM or ROM accesses + - It may assume a value between Min_Data = 2 and Max_Data = 17 in NOR Flash memories + with synchronous burst mode enable */ + + uint32_t AccessMode; /*!< Specifies the asynchronous access mode. + This parameter can be a value of @ref FMC_Access_Mode */ +}FMC_NORSRAM_TimingTypeDef; + +/** + * @brief FMC NAND Configuration Structure definition + */ +typedef struct +{ + uint32_t NandBank; /*!< Specifies the NAND memory device that will be used. + This parameter can be a value of @ref FMC_NAND_Bank */ + + uint32_t Waitfeature; /*!< Enables or disables the Wait feature for the NAND Memory device. + This parameter can be any value of @ref FMC_Wait_feature */ + + uint32_t MemoryDataWidth; /*!< Specifies the external memory device width. + This parameter can be any value of @ref FMC_NAND_Data_Width */ + + uint32_t EccComputation; /*!< Enables or disables the ECC computation. + This parameter can be any value of @ref FMC_ECC */ + + uint32_t ECCPageSize; /*!< Defines the page size for the extended ECC. + This parameter can be any value of @ref FMC_ECC_Page_Size */ + + uint32_t TCLRSetupTime; /*!< Defines the number of HCLK cycles to configure the + delay between CLE low and RE low. + This parameter can be a value between Min_Data = 0 and Max_Data = 255 */ + + uint32_t TARSetupTime; /*!< Defines the number of HCLK cycles to configure the + delay between ALE low and RE low. + This parameter can be a number between Min_Data = 0 and Max_Data = 255 */ +}FMC_NAND_InitTypeDef; + +/** + * @brief FMC NAND Timing parameters structure definition + */ +typedef struct +{ + uint32_t SetupTime; /*!< Defines the number of HCLK cycles to setup address before + the command assertion for NAND-Flash read or write access + to common/Attribute or I/O memory space (depending on + the memory space timing to be configured). + This parameter can be a value between Min_Data = 0 and Max_Data = 254 */ + + uint32_t WaitSetupTime; /*!< Defines the minimum number of HCLK cycles to assert the + command for NAND-Flash read or write access to + common/Attribute or I/O memory space (depending on the + memory space timing to be configured). + This parameter can be a number between Min_Data = 0 and Max_Data = 254 */ + + uint32_t HoldSetupTime; /*!< Defines the number of HCLK clock cycles to hold address + (and data for write access) after the command de-assertion + for NAND-Flash read or write access to common/Attribute + or I/O memory space (depending on the memory space timing + to be configured). + This parameter can be a number between Min_Data = 0 and Max_Data = 254 */ + + uint32_t HiZSetupTime; /*!< Defines the number of HCLK clock cycles during which the + data bus is kept in HiZ after the start of a NAND-Flash + write access to common/Attribute or I/O memory space (depending + on the memory space timing to be configured). + This parameter can be a number between Min_Data = 0 and Max_Data = 254 */ +}FMC_NAND_PCC_TimingTypeDef; + +/** + * @brief FMC SDRAM Configuration Structure definition + */ +typedef struct +{ + uint32_t SDBank; /*!< Specifies the SDRAM memory device that will be used. + This parameter can be a value of @ref FMC_SDRAM_Bank */ + + uint32_t ColumnBitsNumber; /*!< Defines the number of bits of column address. + This parameter can be a value of @ref FMC_SDRAM_Column_Bits_number. */ + + uint32_t RowBitsNumber; /*!< Defines the number of bits of column address. + This parameter can be a value of @ref FMC_SDRAM_Row_Bits_number. */ + + uint32_t MemoryDataWidth; /*!< Defines the memory device width. + This parameter can be a value of @ref FMC_SDRAM_Memory_Bus_Width. */ + + uint32_t InternalBankNumber; /*!< Defines the number of the device's internal banks. + This parameter can be of @ref FMC_SDRAM_Internal_Banks_Number. */ + + uint32_t CASLatency; /*!< Defines the SDRAM CAS latency in number of memory clock cycles. + This parameter can be a value of @ref FMC_SDRAM_CAS_Latency. */ + + uint32_t WriteProtection; /*!< Enables the SDRAM device to be accessed in write mode. + This parameter can be a value of @ref FMC_SDRAM_Write_Protection. */ + + uint32_t SDClockPeriod; /*!< Define the SDRAM Clock Period for both SDRAM devices and they allow + to disable the clock before changing frequency. + This parameter can be a value of @ref FMC_SDRAM_Clock_Period. */ + + uint32_t ReadBurst; /*!< This bit enable the SDRAM controller to anticipate the next read + commands during the CAS latency and stores data in the Read FIFO. + This parameter can be a value of @ref FMC_SDRAM_Read_Burst. */ + + uint32_t ReadPipeDelay; /*!< Define the delay in system clock cycles on read data path. + This parameter can be a value of @ref FMC_SDRAM_Read_Pipe_Delay. */ +}FMC_SDRAM_InitTypeDef; + +/** + * @brief FMC SDRAM Timing parameters structure definition + */ +typedef struct +{ + uint32_t LoadToActiveDelay; /*!< Defines the delay between a Load Mode Register command and + an active or Refresh command in number of memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t ExitSelfRefreshDelay; /*!< Defines the delay from releasing the self refresh command to + issuing the Activate command in number of memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t SelfRefreshTime; /*!< Defines the minimum Self Refresh period in number of memory clock + cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t RowCycleDelay; /*!< Defines the delay between the Refresh command and the Activate command + and the delay between two consecutive Refresh commands in number of + memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t WriteRecoveryTime; /*!< Defines the Write recovery Time in number of memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t RPDelay; /*!< Defines the delay between a Precharge Command and an other command + in number of memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + + uint32_t RCDDelay; /*!< Defines the delay between the Activate Command and a Read/Write + command in number of memory clock cycles. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ +}FMC_SDRAM_TimingTypeDef; + +/** + * @brief SDRAM command parameters structure definition + */ +typedef struct +{ + uint32_t CommandMode; /*!< Defines the command issued to the SDRAM device. + This parameter can be a value of @ref FMC_SDRAM_Command_Mode. */ + + uint32_t CommandTarget; /*!< Defines which device (1 or 2) the command will be issued to. + This parameter can be a value of @ref FMC_SDRAM_Command_Target. */ + + uint32_t AutoRefreshNumber; /*!< Defines the number of consecutive auto refresh command issued + in auto refresh mode. + This parameter can be a value between Min_Data = 1 and Max_Data = 16 */ + uint32_t ModeRegisterDefinition; /*!< Defines the SDRAM Mode register content */ +}FMC_SDRAM_CommandTypeDef; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @addtogroup FMC_LL_Exported_Constants FMC Low Layer Exported Constants + * @{ + */ + +/** @defgroup FMC_LL_NOR_SRAM_Controller FMC NOR/SRAM Controller + * @{ + */ + +/** @defgroup FMC_NORSRAM_Bank FMC NOR/SRAM Bank + * @{ + */ +#define FMC_NORSRAM_BANK1 ((uint32_t)0x00000000U) +#define FMC_NORSRAM_BANK2 ((uint32_t)0x00000002U) +#define FMC_NORSRAM_BANK3 ((uint32_t)0x00000004U) +#define FMC_NORSRAM_BANK4 ((uint32_t)0x00000006U) +/** + * @} + */ + +/** @defgroup FMC_Data_Address_Bus_Multiplexing FMC Data Address Bus Multiplexing + * @{ + */ +#define FMC_DATA_ADDRESS_MUX_DISABLE ((uint32_t)0x00000000U) +#define FMC_DATA_ADDRESS_MUX_ENABLE ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup FMC_Memory_Type FMC Memory Type + * @{ + */ +#define FMC_MEMORY_TYPE_SRAM ((uint32_t)0x00000000U) +#define FMC_MEMORY_TYPE_PSRAM ((uint32_t)0x00000004U) +#define FMC_MEMORY_TYPE_NOR ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup FMC_NORSRAM_Data_Width FMC NORSRAM Data Width + * @{ + */ +#define FMC_NORSRAM_MEM_BUS_WIDTH_8 ((uint32_t)0x00000000U) +#define FMC_NORSRAM_MEM_BUS_WIDTH_16 ((uint32_t)0x00000010U) +#define FMC_NORSRAM_MEM_BUS_WIDTH_32 ((uint32_t)0x00000020U) +/** + * @} + */ + +/** @defgroup FMC_NORSRAM_Flash_Access FMC NOR/SRAM Flash Access + * @{ + */ +#define FMC_NORSRAM_FLASH_ACCESS_ENABLE ((uint32_t)0x00000040U) +#define FMC_NORSRAM_FLASH_ACCESS_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup FMC_Burst_Access_Mode FMC Burst Access Mode + * @{ + */ +#define FMC_BURST_ACCESS_MODE_DISABLE ((uint32_t)0x00000000U) +#define FMC_BURST_ACCESS_MODE_ENABLE ((uint32_t)0x00000100U) +/** + * @} + */ + +/** @defgroup FMC_Wait_Signal_Polarity FMC Wait Signal Polarity + * @{ + */ +#define FMC_WAIT_SIGNAL_POLARITY_LOW ((uint32_t)0x00000000U) +#define FMC_WAIT_SIGNAL_POLARITY_HIGH ((uint32_t)0x00000200U) +/** + * @} + */ + +/** @defgroup FMC_Wait_Timing FMC Wait Timing + * @{ + */ +#define FMC_WAIT_TIMING_BEFORE_WS ((uint32_t)0x00000000U) +#define FMC_WAIT_TIMING_DURING_WS ((uint32_t)0x00000800U) +/** + * @} + */ + +/** @defgroup FMC_Write_Operation FMC Write Operation + * @{ + */ +#define FMC_WRITE_OPERATION_DISABLE ((uint32_t)0x00000000U) +#define FMC_WRITE_OPERATION_ENABLE ((uint32_t)0x00001000U) +/** + * @} + */ + +/** @defgroup FMC_Wait_Signal FMC Wait Signal + * @{ + */ +#define FMC_WAIT_SIGNAL_DISABLE ((uint32_t)0x00000000U) +#define FMC_WAIT_SIGNAL_ENABLE ((uint32_t)0x00002000U) +/** + * @} + */ + +/** @defgroup FMC_Extended_Mode FMC Extended Mode + * @{ + */ +#define FMC_EXTENDED_MODE_DISABLE ((uint32_t)0x00000000U) +#define FMC_EXTENDED_MODE_ENABLE ((uint32_t)0x00004000U) +/** + * @} + */ + +/** @defgroup FMC_AsynchronousWait FMC Asynchronous Wait + * @{ + */ +#define FMC_ASYNCHRONOUS_WAIT_DISABLE ((uint32_t)0x00000000U) +#define FMC_ASYNCHRONOUS_WAIT_ENABLE ((uint32_t)0x00008000U) +/** + * @} + */ + +/** @defgroup FMC_Page_Size FMC Page Size + * @{ + */ +#define FMC_PAGE_SIZE_NONE ((uint32_t)0x00000000U) +#define FMC_PAGE_SIZE_128 ((uint32_t)FMC_BCR1_CPSIZE_0) +#define FMC_PAGE_SIZE_256 ((uint32_t)FMC_BCR1_CPSIZE_1) +#define FMC_PAGE_SIZE_512 ((uint32_t)(FMC_BCR1_CPSIZE_0 | FMC_BCR1_CPSIZE_1)) +#define FMC_PAGE_SIZE_1024 ((uint32_t)FMC_BCR1_CPSIZE_2) +/** + * @} + */ + +/** @defgroup FMC_Write_Burst FMC Write Burst + * @{ + */ +#define FMC_WRITE_BURST_DISABLE ((uint32_t)0x00000000U) +#define FMC_WRITE_BURST_ENABLE ((uint32_t)0x00080000U) +/** + * @} + */ + +/** @defgroup FMC_Continous_Clock FMC Continuous Clock + * @{ + */ +#define FMC_CONTINUOUS_CLOCK_SYNC_ONLY ((uint32_t)0x00000000U) +#define FMC_CONTINUOUS_CLOCK_SYNC_ASYNC ((uint32_t)0x00100000U) +/** + * @} + */ + +/** @defgroup FMC_Write_FIFO FMC Write FIFO + * @{ + */ +#define FMC_WRITE_FIFO_DISABLE ((uint32_t)FMC_BCR1_WFDIS) +#define FMC_WRITE_FIFO_ENABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup FMC_Access_Mode FMC Access Mode + * @{ + */ +#define FMC_ACCESS_MODE_A ((uint32_t)0x00000000U) +#define FMC_ACCESS_MODE_B ((uint32_t)0x10000000U) +#define FMC_ACCESS_MODE_C ((uint32_t)0x20000000U) +#define FMC_ACCESS_MODE_D ((uint32_t)0x30000000) +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup FMC_LL_NAND_Controller FMC NAND Controller + * @{ + */ +/** @defgroup FMC_NAND_Bank FMC NAND Bank + * @{ + */ +#define FMC_NAND_BANK3 ((uint32_t)0x00000100U) +/** + * @} + */ + +/** @defgroup FMC_Wait_feature FMC Wait feature + * @{ + */ +#define FMC_NAND_WAIT_FEATURE_DISABLE ((uint32_t)0x00000000U) +#define FMC_NAND_WAIT_FEATURE_ENABLE ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup FMC_PCR_Memory_Type FMC PCR Memory Type + * @{ + */ +#define FMC_PCR_MEMORY_TYPE_NAND ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup FMC_NAND_Data_Width FMC NAND Data Width + * @{ + */ +#define FMC_NAND_MEM_BUS_WIDTH_8 ((uint32_t)0x00000000U) +#define FMC_NAND_MEM_BUS_WIDTH_16 ((uint32_t)0x00000010U) +/** + * @} + */ + +/** @defgroup FMC_ECC FMC ECC + * @{ + */ +#define FMC_NAND_ECC_DISABLE ((uint32_t)0x00000000U) +#define FMC_NAND_ECC_ENABLE ((uint32_t)0x00000040U) +/** + * @} + */ + +/** @defgroup FMC_ECC_Page_Size FMC ECC Page Size + * @{ + */ +#define FMC_NAND_ECC_PAGE_SIZE_256BYTE ((uint32_t)0x00000000U) +#define FMC_NAND_ECC_PAGE_SIZE_512BYTE ((uint32_t)0x00020000U) +#define FMC_NAND_ECC_PAGE_SIZE_1024BYTE ((uint32_t)0x00040000U) +#define FMC_NAND_ECC_PAGE_SIZE_2048BYTE ((uint32_t)0x00060000U) +#define FMC_NAND_ECC_PAGE_SIZE_4096BYTE ((uint32_t)0x00080000U) +#define FMC_NAND_ECC_PAGE_SIZE_8192BYTE ((uint32_t)0x000A0000U) +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup FMC_LL_SDRAM_Controller FMC SDRAM Controller + * @{ + */ +/** @defgroup FMC_SDRAM_Bank FMC SDRAM Bank + * @{ + */ +#define FMC_SDRAM_BANK1 ((uint32_t)0x00000000U) +#define FMC_SDRAM_BANK2 ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Column_Bits_number FMC SDRAM Column Bits number + * @{ + */ +#define FMC_SDRAM_COLUMN_BITS_NUM_8 ((uint32_t)0x00000000U) +#define FMC_SDRAM_COLUMN_BITS_NUM_9 ((uint32_t)0x00000001U) +#define FMC_SDRAM_COLUMN_BITS_NUM_10 ((uint32_t)0x00000002U) +#define FMC_SDRAM_COLUMN_BITS_NUM_11 ((uint32_t)0x00000003U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Row_Bits_number FMC SDRAM Row Bits number + * @{ + */ +#define FMC_SDRAM_ROW_BITS_NUM_11 ((uint32_t)0x00000000U) +#define FMC_SDRAM_ROW_BITS_NUM_12 ((uint32_t)0x00000004U) +#define FMC_SDRAM_ROW_BITS_NUM_13 ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Memory_Bus_Width FMC SDRAM Memory Bus Width + * @{ + */ +#define FMC_SDRAM_MEM_BUS_WIDTH_8 ((uint32_t)0x00000000U) +#define FMC_SDRAM_MEM_BUS_WIDTH_16 ((uint32_t)0x00000010U) +#define FMC_SDRAM_MEM_BUS_WIDTH_32 ((uint32_t)0x00000020U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Internal_Banks_Number FMC SDRAM Internal Banks Number + * @{ + */ +#define FMC_SDRAM_INTERN_BANKS_NUM_2 ((uint32_t)0x00000000U) +#define FMC_SDRAM_INTERN_BANKS_NUM_4 ((uint32_t)0x00000040U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_CAS_Latency FMC SDRAM CAS Latency + * @{ + */ +#define FMC_SDRAM_CAS_LATENCY_1 ((uint32_t)0x00000080U) +#define FMC_SDRAM_CAS_LATENCY_2 ((uint32_t)0x00000100U) +#define FMC_SDRAM_CAS_LATENCY_3 ((uint32_t)0x00000180) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Write_Protection FMC SDRAM Write Protection + * @{ + */ +#define FMC_SDRAM_WRITE_PROTECTION_DISABLE ((uint32_t)0x00000000U) +#define FMC_SDRAM_WRITE_PROTECTION_ENABLE ((uint32_t)0x00000200U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Clock_Period FMC SDRAM Clock Period + * @{ + */ +#define FMC_SDRAM_CLOCK_DISABLE ((uint32_t)0x00000000U) +#define FMC_SDRAM_CLOCK_PERIOD_2 ((uint32_t)0x00000800U) +#define FMC_SDRAM_CLOCK_PERIOD_3 ((uint32_t)0x00000C00) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Read_Burst FMC SDRAM Read Burst + * @{ + */ +#define FMC_SDRAM_RBURST_DISABLE ((uint32_t)0x00000000U) +#define FMC_SDRAM_RBURST_ENABLE ((uint32_t)0x00001000U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Read_Pipe_Delay FMC SDRAM Read Pipe Delay + * @{ + */ +#define FMC_SDRAM_RPIPE_DELAY_0 ((uint32_t)0x00000000U) +#define FMC_SDRAM_RPIPE_DELAY_1 ((uint32_t)0x00002000U) +#define FMC_SDRAM_RPIPE_DELAY_2 ((uint32_t)0x00004000U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Command_Mode FMC SDRAM Command Mode + * @{ + */ +#define FMC_SDRAM_CMD_NORMAL_MODE ((uint32_t)0x00000000U) +#define FMC_SDRAM_CMD_CLK_ENABLE ((uint32_t)0x00000001U) +#define FMC_SDRAM_CMD_PALL ((uint32_t)0x00000002U) +#define FMC_SDRAM_CMD_AUTOREFRESH_MODE ((uint32_t)0x00000003U) +#define FMC_SDRAM_CMD_LOAD_MODE ((uint32_t)0x00000004U) +#define FMC_SDRAM_CMD_SELFREFRESH_MODE ((uint32_t)0x00000005U) +#define FMC_SDRAM_CMD_POWERDOWN_MODE ((uint32_t)0x00000006U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Command_Target FMC SDRAM Command Target + * @{ + */ +#define FMC_SDRAM_CMD_TARGET_BANK2 FMC_SDCMR_CTB2 +#define FMC_SDRAM_CMD_TARGET_BANK1 FMC_SDCMR_CTB1 +#define FMC_SDRAM_CMD_TARGET_BANK1_2 ((uint32_t)0x00000018U) +/** + * @} + */ + +/** @defgroup FMC_SDRAM_Mode_Status FMC SDRAM Mode Status + * @{ + */ +#define FMC_SDRAM_NORMAL_MODE ((uint32_t)0x00000000U) +#define FMC_SDRAM_SELF_REFRESH_MODE FMC_SDSR_MODES1_0 +#define FMC_SDRAM_POWER_DOWN_MODE FMC_SDSR_MODES1_1 +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup FMC_LL_Interrupt_definition FMC Low Layer Interrupt definition + * @{ + */ +#define FMC_IT_RISING_EDGE ((uint32_t)0x00000008U) +#define FMC_IT_LEVEL ((uint32_t)0x00000010U) +#define FMC_IT_FALLING_EDGE ((uint32_t)0x00000020U) +#define FMC_IT_REFRESH_ERROR ((uint32_t)0x00004000U) +/** + * @} + */ + +/** @defgroup FMC_LL_Flag_definition FMC Low Layer Flag definition + * @{ + */ +#define FMC_FLAG_RISING_EDGE ((uint32_t)0x00000001U) +#define FMC_FLAG_LEVEL ((uint32_t)0x00000002U) +#define FMC_FLAG_FALLING_EDGE ((uint32_t)0x00000004U) +#define FMC_FLAG_FEMPT ((uint32_t)0x00000040U) +#define FMC_SDRAM_FLAG_REFRESH_IT FMC_SDSR_RE +#define FMC_SDRAM_FLAG_BUSY FMC_SDSR_BUSY +#define FMC_SDRAM_FLAG_REFRESH_ERROR FMC_SDRTR_CRE +/** + * @} + */ +/** + * @} + */ + +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/** @defgroup FMC_LL_Private_Macros FMC_LL Private Macros + * @{ + */ + +/** @defgroup FMC_LL_NOR_Macros FMC NOR/SRAM Macros + * @brief macros to handle NOR device enable/disable and read/write operations + * @{ + */ + +/** + * @brief Enable the NORSRAM device access. + * @param __INSTANCE__ FMC_NORSRAM Instance + * @param __BANK__ FMC_NORSRAM Bank + * @retval None + */ +#define __FMC_NORSRAM_ENABLE(__INSTANCE__, __BANK__) ((__INSTANCE__)->BTCR[(__BANK__)] |= FMC_BCR1_MBKEN) + +/** + * @brief Disable the NORSRAM device access. + * @param __INSTANCE__ FMC_NORSRAM Instance + * @param __BANK__ FMC_NORSRAM Bank + * @retval None + */ +#define __FMC_NORSRAM_DISABLE(__INSTANCE__, __BANK__) ((__INSTANCE__)->BTCR[(__BANK__)] &= ~FMC_BCR1_MBKEN) + +/** + * @} + */ + +/** @defgroup FMC_LL_NAND_Macros FMC NAND Macros + * @brief macros to handle NAND device enable/disable + * @{ + */ + +/** + * @brief Enable the NAND device access. + * @param __INSTANCE__ FMC_NAND Instance + * @retval None + */ +#define __FMC_NAND_ENABLE(__INSTANCE__) ((__INSTANCE__)->PCR |= FMC_PCR_PBKEN) + +/** + * @brief Disable the NAND device access. + * @param __INSTANCE__ FMC_NAND Instance + * @retval None + */ +#define __FMC_NAND_DISABLE(__INSTANCE__) ((__INSTANCE__)->PCR &= ~FMC_PCR_PBKEN) + +/** + * @} + */ + +/** @defgroup FMC_Interrupt FMC Interrupt + * @brief macros to handle FMC interrupts + * @{ + */ + +/** + * @brief Enable the NAND device interrupt. + * @param __INSTANCE__ FMC_NAND instance + * @param __INTERRUPT__ FMC_NAND interrupt + * This parameter can be any combination of the following values: + * @arg FMC_IT_RISING_EDGE: Interrupt rising edge. + * @arg FMC_IT_LEVEL: Interrupt level. + * @arg FMC_IT_FALLING_EDGE: Interrupt falling edge. + * @retval None + */ +#define __FMC_NAND_ENABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->SR |= (__INTERRUPT__)) + +/** + * @brief Disable the NAND device interrupt. + * @param __INSTANCE__ FMC_NAND Instance + * @param __INTERRUPT__ FMC_NAND interrupt + * This parameter can be any combination of the following values: + * @arg FMC_IT_RISING_EDGE: Interrupt rising edge. + * @arg FMC_IT_LEVEL: Interrupt level. + * @arg FMC_IT_FALLING_EDGE: Interrupt falling edge. + * @retval None + */ +#define __FMC_NAND_DISABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->SR &= ~(__INTERRUPT__)) + +/** + * @brief Get flag status of the NAND device. + * @param __INSTANCE__ FMC_NAND Instance + * @param __BANK__ FMC_NAND Bank + * @param __FLAG__ FMC_NAND flag + * This parameter can be any combination of the following values: + * @arg FMC_FLAG_RISING_EDGE: Interrupt rising edge flag. + * @arg FMC_FLAG_LEVEL: Interrupt level edge flag. + * @arg FMC_FLAG_FALLING_EDGE: Interrupt falling edge flag. + * @arg FMC_FLAG_FEMPT: FIFO empty flag. + * @retval The state of FLAG (SET or RESET). + */ +#define __FMC_NAND_GET_FLAG(__INSTANCE__, __BANK__, __FLAG__) (((__INSTANCE__)->SR &(__FLAG__)) == (__FLAG__)) + +/** + * @brief Clear flag status of the NAND device. + * @param __INSTANCE__ FMC_NAND Instance + * @param __FLAG__ FMC_NAND flag + * This parameter can be any combination of the following values: + * @arg FMC_FLAG_RISING_EDGE: Interrupt rising edge flag. + * @arg FMC_FLAG_LEVEL: Interrupt level edge flag. + * @arg FMC_FLAG_FALLING_EDGE: Interrupt falling edge flag. + * @arg FMC_FLAG_FEMPT: FIFO empty flag. + * @retval None + */ +#define __FMC_NAND_CLEAR_FLAG(__INSTANCE__, __FLAG__) ((__INSTANCE__)->SR &= ~(__FLAG__)) + +/** + * @brief Enable the SDRAM device interrupt. + * @param __INSTANCE__ FMC_SDRAM instance + * @param __INTERRUPT__ FMC_SDRAM interrupt + * This parameter can be any combination of the following values: + * @arg FMC_IT_REFRESH_ERROR: Interrupt refresh error + * @retval None + */ +#define __FMC_SDRAM_ENABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->SDRTR |= (__INTERRUPT__)) + +/** + * @brief Disable the SDRAM device interrupt. + * @param __INSTANCE__ FMC_SDRAM instance + * @param __INTERRUPT__ FMC_SDRAM interrupt + * This parameter can be any combination of the following values: + * @arg FMC_IT_REFRESH_ERROR: Interrupt refresh error + * @retval None + */ +#define __FMC_SDRAM_DISABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->SDRTR &= ~(__INTERRUPT__)) + +/** + * @brief Get flag status of the SDRAM device. + * @param __INSTANCE__ FMC_SDRAM instance + * @param __FLAG__ FMC_SDRAM flag + * This parameter can be any combination of the following values: + * @arg FMC_SDRAM_FLAG_REFRESH_IT: Interrupt refresh error. + * @arg FMC_SDRAM_FLAG_BUSY: SDRAM busy flag. + * @arg FMC_SDRAM_FLAG_REFRESH_ERROR: Refresh error flag. + * @retval The state of FLAG (SET or RESET). + */ +#define __FMC_SDRAM_GET_FLAG(__INSTANCE__, __FLAG__) (((__INSTANCE__)->SDSR &(__FLAG__)) == (__FLAG__)) + +/** + * @brief Clear flag status of the SDRAM device. + * @param __INSTANCE__ FMC_SDRAM instance + * @param __FLAG__ FMC_SDRAM flag + * This parameter can be any combination of the following values: + * @arg FMC_SDRAM_FLAG_REFRESH_ERROR + * @retval None + */ +#define __FMC_SDRAM_CLEAR_FLAG(__INSTANCE__, __FLAG__) ((__INSTANCE__)->SDRTR |= (__FLAG__)) +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FMC_LL_Private_Functions FMC LL Private Functions + * @{ + */ + +/** @defgroup FMC_LL_NORSRAM NOR SRAM + * @{ + */ +/** @defgroup FMC_LL_NORSRAM_Private_Functions_Group1 NOR SRAM Initialization/de-initialization functions + * @{ + */ +HAL_StatusTypeDef FMC_NORSRAM_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_InitTypeDef *Init); +HAL_StatusTypeDef FMC_NORSRAM_Timing_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank); +HAL_StatusTypeDef FMC_NORSRAM_Extended_Timing_Init(FMC_NORSRAM_EXTENDED_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank, uint32_t ExtendedMode); +HAL_StatusTypeDef FMC_NORSRAM_DeInit(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_EXTENDED_TypeDef *ExDevice, uint32_t Bank); +/** + * @} + */ + +/** @defgroup FMC_LL_NORSRAM_Private_Functions_Group2 NOR SRAM Control functions + * @{ + */ +HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Enable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank); +HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Disable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank); +/** + * @} + */ +/** + * @} + */ + +/** @defgroup FMC_LL_NAND NAND + * @{ + */ +/** @defgroup FMC_LL_NAND_Private_Functions_Group1 NAND Initialization/de-initialization functions + * @{ + */ +HAL_StatusTypeDef FMC_NAND_Init(FMC_NAND_TypeDef *Device, FMC_NAND_InitTypeDef *Init); +HAL_StatusTypeDef FMC_NAND_CommonSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank); +HAL_StatusTypeDef FMC_NAND_AttributeSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank); +HAL_StatusTypeDef FMC_NAND_DeInit(FMC_NAND_TypeDef *Device, uint32_t Bank); +/** + * @} + */ + +/** @defgroup FMC_LL_NAND_Private_Functions_Group2 NAND Control functions + * @{ + */ +HAL_StatusTypeDef FMC_NAND_ECC_Enable(FMC_NAND_TypeDef *Device, uint32_t Bank); +HAL_StatusTypeDef FMC_NAND_ECC_Disable(FMC_NAND_TypeDef *Device, uint32_t Bank); +HAL_StatusTypeDef FMC_NAND_GetECC(FMC_NAND_TypeDef *Device, uint32_t *ECCval, uint32_t Bank, uint32_t Timeout); +/** + * @} + */ + +/** @defgroup FMC_LL_SDRAM SDRAM + * @{ + */ +/** @defgroup FMC_LL_SDRAM_Private_Functions_Group1 SDRAM Initialization/de-initialization functions + * @{ + */ +HAL_StatusTypeDef FMC_SDRAM_Init(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_InitTypeDef *Init); +HAL_StatusTypeDef FMC_SDRAM_Timing_Init(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_TimingTypeDef *Timing, uint32_t Bank); +HAL_StatusTypeDef FMC_SDRAM_DeInit(FMC_SDRAM_TypeDef *Device, uint32_t Bank); + +/** + * @} + */ + +/** @defgroup FMC_LL_SDRAM_Private_Functions_Group2 SDRAM Control functions + * @{ + */ +HAL_StatusTypeDef FMC_SDRAM_WriteProtection_Enable(FMC_SDRAM_TypeDef *Device, uint32_t Bank); +HAL_StatusTypeDef FMC_SDRAM_WriteProtection_Disable(FMC_SDRAM_TypeDef *Device, uint32_t Bank); +HAL_StatusTypeDef FMC_SDRAM_SendCommand(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_CommandTypeDef *Command, uint32_t Timeout); +HAL_StatusTypeDef FMC_SDRAM_ProgramRefreshRate(FMC_SDRAM_TypeDef *Device, uint32_t RefreshRate); +HAL_StatusTypeDef FMC_SDRAM_SetAutoRefreshNumber(FMC_SDRAM_TypeDef *Device, uint32_t AutoRefreshNumber); +uint32_t FMC_SDRAM_GetModeStatus(FMC_SDRAM_TypeDef *Device, uint32_t Bank); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_LL_FMC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_sdmmc.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_sdmmc.h new file mode 100644 index 000000000..3eaef86c9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_sdmmc.h @@ -0,0 +1,1017 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_sdmmc.h + * @author MCD Application Team + * @brief Header file of SDMMC HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_LL_SDMMC_H +#define STM32F7xx_LL_SDMMC_H + +#ifdef __cplusplus + extern "C" { +#endif + +#if defined(SDMMC1) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_Driver + * @{ + */ + +/** @addtogroup SDMMC_LL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup SDMMC_LL_Exported_Types SDMMC_LL Exported Types + * @{ + */ + +/** + * @brief SDMMC Configuration Structure definition + */ +typedef struct +{ + uint32_t ClockEdge; /*!< Specifies the clock transition on which the bit capture is made. + This parameter can be a value of @ref SDMMC_LL_Clock_Edge */ + + uint32_t ClockBypass; /*!< Specifies whether the SDMMC Clock divider bypass is + enabled or disabled. + This parameter can be a value of @ref SDMMC_LL_Clock_Bypass */ + + uint32_t ClockPowerSave; /*!< Specifies whether SDMMC Clock output is enabled or + disabled when the bus is idle. + This parameter can be a value of @ref SDMMC_LL_Clock_Power_Save */ + + uint32_t BusWide; /*!< Specifies the SDMMC bus width. + This parameter can be a value of @ref SDMMC_LL_Bus_Wide */ + + uint32_t HardwareFlowControl; /*!< Specifies whether the SDMMC hardware flow control is enabled or disabled. + This parameter can be a value of @ref SDMMC_LL_Hardware_Flow_Control */ + + uint32_t ClockDiv; /*!< Specifies the clock frequency of the SDMMC controller. + This parameter can be a value between Min_Data = 0 and Max_Data = 255 */ + +}SDMMC_InitTypeDef; + + +/** + * @brief SDMMC Command Control structure + */ +typedef struct +{ + uint32_t Argument; /*!< Specifies the SDMMC command argument which is sent + to a card as part of a command message. If a command + contains an argument, it must be loaded into this register + before writing the command to the command register. */ + + uint32_t CmdIndex; /*!< Specifies the SDMMC command index. It must be Min_Data = 0 and + Max_Data = 64 */ + + uint32_t Response; /*!< Specifies the SDMMC response type. + This parameter can be a value of @ref SDMMC_LL_Response_Type */ + + uint32_t WaitForInterrupt; /*!< Specifies whether SDMMC wait for interrupt request is + enabled or disabled. + This parameter can be a value of @ref SDMMC_LL_Wait_Interrupt_State */ + + uint32_t CPSM; /*!< Specifies whether SDMMC Command path state machine (CPSM) + is enabled or disabled. + This parameter can be a value of @ref SDMMC_LL_CPSM_State */ +}SDMMC_CmdInitTypeDef; + + +/** + * @brief SDMMC Data Control structure + */ +typedef struct +{ + uint32_t DataTimeOut; /*!< Specifies the data timeout period in card bus clock periods. */ + + uint32_t DataLength; /*!< Specifies the number of data bytes to be transferred. */ + + uint32_t DataBlockSize; /*!< Specifies the data block size for block transfer. + This parameter can be a value of @ref SDMMC_LL_Data_Block_Size */ + + uint32_t TransferDir; /*!< Specifies the data transfer direction, whether the transfer + is a read or write. + This parameter can be a value of @ref SDMMC_LL_Transfer_Direction */ + + uint32_t TransferMode; /*!< Specifies whether data transfer is in stream or block mode. + This parameter can be a value of @ref SDMMC_LL_Transfer_Type */ + + uint32_t DPSM; /*!< Specifies whether SDMMC Data path state machine (DPSM) + is enabled or disabled. + This parameter can be a value of @ref SDMMC_LL_DPSM_State */ +}SDMMC_DataInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SDMMC_LL_Exported_Constants SDMMC_LL Exported Constants + * @{ + */ +#define SDMMC_ERROR_NONE 0x00000000U /*!< No error */ +#define SDMMC_ERROR_CMD_CRC_FAIL 0x00000001U /*!< Command response received (but CRC check failed) */ +#define SDMMC_ERROR_DATA_CRC_FAIL 0x00000002U /*!< Data block sent/received (CRC check failed) */ +#define SDMMC_ERROR_CMD_RSP_TIMEOUT 0x00000004U /*!< Command response timeout */ +#define SDMMC_ERROR_DATA_TIMEOUT 0x00000008U /*!< Data timeout */ +#define SDMMC_ERROR_TX_UNDERRUN 0x00000010U /*!< Transmit FIFO underrun */ +#define SDMMC_ERROR_RX_OVERRUN 0x00000020U /*!< Receive FIFO overrun */ +#define SDMMC_ERROR_ADDR_MISALIGNED 0x00000040U /*!< Misaligned address */ +#define SDMMC_ERROR_BLOCK_LEN_ERR 0x00000080U /*!< Transferred block length is not allowed for the card or the + number of transferred bytes does not match the block length */ +#define SDMMC_ERROR_ERASE_SEQ_ERR 0x00000100U /*!< An error in the sequence of erase command occurs */ +#define SDMMC_ERROR_BAD_ERASE_PARAM 0x00000200U /*!< An invalid selection for erase groups */ +#define SDMMC_ERROR_WRITE_PROT_VIOLATION 0x00000400U /*!< Attempt to program a write protect block */ +#define SDMMC_ERROR_LOCK_UNLOCK_FAILED 0x00000800U /*!< Sequence or password error has been detected in unlock + command or if there was an attempt to access a locked card */ +#define SDMMC_ERROR_COM_CRC_FAILED 0x00001000U /*!< CRC check of the previous command failed */ +#define SDMMC_ERROR_ILLEGAL_CMD 0x00002000U /*!< Command is not legal for the card state */ +#define SDMMC_ERROR_CARD_ECC_FAILED 0x00004000U /*!< Card internal ECC was applied but failed to correct the data */ +#define SDMMC_ERROR_CC_ERR 0x00008000U /*!< Internal card controller error */ +#define SDMMC_ERROR_GENERAL_UNKNOWN_ERR 0x00010000U /*!< General or unknown error */ +#define SDMMC_ERROR_STREAM_READ_UNDERRUN 0x00020000U /*!< The card could not sustain data reading in stream rmode */ +#define SDMMC_ERROR_STREAM_WRITE_OVERRUN 0x00040000U /*!< The card could not sustain data programming in stream mode */ +#define SDMMC_ERROR_CID_CSD_OVERWRITE 0x00080000U /*!< CID/CSD overwrite error */ +#define SDMMC_ERROR_WP_ERASE_SKIP 0x00100000U /*!< Only partial address space was erased */ +#define SDMMC_ERROR_CARD_ECC_DISABLED 0x00200000U /*!< Command has been executed without using internal ECC */ +#define SDMMC_ERROR_ERASE_RESET 0x00400000U /*!< Erase sequence was cleared before executing because an out + of erase sequence command was received */ +#define SDMMC_ERROR_AKE_SEQ_ERR 0x00800000U /*!< Error in sequence of authentication */ +#define SDMMC_ERROR_INVALID_VOLTRANGE 0x01000000U /*!< Error in case of invalid voltage range */ +#define SDMMC_ERROR_ADDR_OUT_OF_RANGE 0x02000000U /*!< Error when addressed block is out of range */ +#define SDMMC_ERROR_REQUEST_NOT_APPLICABLE 0x04000000U /*!< Error when command request is not applicable */ +#define SDMMC_ERROR_INVALID_PARAMETER 0x08000000U /*!< the used parameter is not valid */ +#define SDMMC_ERROR_UNSUPPORTED_FEATURE 0x10000000U /*!< Error when feature is not insupported */ +#define SDMMC_ERROR_BUSY 0x20000000U /*!< Error when transfer process is busy */ +#define SDMMC_ERROR_DMA 0x40000000U /*!< Error while DMA transfer */ +#define SDMMC_ERROR_TIMEOUT 0x80000000U /*!< Timeout error */ + +/** + * @brief SDMMC Commands Index + */ +#define SDMMC_CMD_GO_IDLE_STATE 0U /*!< Resets the SD memory card. */ +#define SDMMC_CMD_SEND_OP_COND 1U /*!< Sends host capacity support information and activates the card's initialization process. */ +#define SDMMC_CMD_ALL_SEND_CID 2U /*!< Asks any card connected to the host to send the CID numbers on the CMD line. */ +#define SDMMC_CMD_SET_REL_ADDR 3U /*!< Asks the card to publish a new relative address (RCA). */ +#define SDMMC_CMD_SET_DSR 4U /*!< Programs the DSR of all cards. */ +#define SDMMC_CMD_SDMMC_SEN_OP_COND 5U /*!< Sends host capacity support information (HCS) and asks the accessed card to send its + operating condition register (OCR) content in the response on the CMD line. */ +#define SDMMC_CMD_HS_SWITCH 6U /*!< Checks switchable function (mode 0) and switch card function (mode 1). */ +#define SDMMC_CMD_SEL_DESEL_CARD 7U /*!< Selects the card by its own relative address and gets deselected by any other address */ +#define SDMMC_CMD_HS_SEND_EXT_CSD 8U /*!< Sends SD Memory Card interface condition, which includes host supply voltage information + and asks the card whether card supports voltage. */ +#define SDMMC_CMD_SEND_CSD 9U /*!< Addressed card sends its card specific data (CSD) on the CMD line. */ +#define SDMMC_CMD_SEND_CID 10U /*!< Addressed card sends its card identification (CID) on the CMD line. */ +#define SDMMC_CMD_READ_DAT_UNTIL_STOP 11U /*!< SD card doesn't support it. */ +#define SDMMC_CMD_STOP_TRANSMISSION 12U /*!< Forces the card to stop transmission. */ +#define SDMMC_CMD_SEND_STATUS 13U /*!< Addressed card sends its status register. */ +#define SDMMC_CMD_HS_BUSTEST_READ 14U /*!< Reserved */ +#define SDMMC_CMD_GO_INACTIVE_STATE 15U /*!< Sends an addressed card into the inactive state. */ +#define SDMMC_CMD_SET_BLOCKLEN 16U /*!< Sets the block length (in bytes for SDSC) for all following block commands + (read, write, lock). Default block length is fixed to 512 Bytes. Not effective + for SDHS and SDXC. */ +#define SDMMC_CMD_READ_SINGLE_BLOCK 17U /*!< Reads single block of size selected by SET_BLOCKLEN in case of SDSC, and a block of + fixed 512 bytes in case of SDHC and SDXC. */ +#define SDMMC_CMD_READ_MULT_BLOCK 18U /*!< Continuously transfers data blocks from card to host until interrupted by + STOP_TRANSMISSION command. */ +#define SDMMC_CMD_HS_BUSTEST_WRITE 19U /*!< 64 bytes tuning pattern is sent for SDR50 and SDR104. */ +#define SDMMC_CMD_WRITE_DAT_UNTIL_STOP 20U /*!< Speed class control command. */ +#define SDMMC_CMD_SET_BLOCK_COUNT 23U /*!< Specify block count for CMD18 and CMD25. */ +#define SDMMC_CMD_WRITE_SINGLE_BLOCK 24U /*!< Writes single block of size selected by SET_BLOCKLEN in case of SDSC, and a block of + fixed 512 bytes in case of SDHC and SDXC. */ +#define SDMMC_CMD_WRITE_MULT_BLOCK 25U /*!< Continuously writes blocks of data until a STOP_TRANSMISSION follows. */ +#define SDMMC_CMD_PROG_CID 26U /*!< Reserved for manufacturers. */ +#define SDMMC_CMD_PROG_CSD 27U /*!< Programming of the programmable bits of the CSD. */ +#define SDMMC_CMD_SET_WRITE_PROT 28U /*!< Sets the write protection bit of the addressed group. */ +#define SDMMC_CMD_CLR_WRITE_PROT 29U /*!< Clears the write protection bit of the addressed group. */ +#define SDMMC_CMD_SEND_WRITE_PROT 30U /*!< Asks the card to send the status of the write protection bits. */ +#define SDMMC_CMD_SD_ERASE_GRP_START 32U /*!< Sets the address of the first write block to be erased. (For SD card only). */ +#define SDMMC_CMD_SD_ERASE_GRP_END 33U /*!< Sets the address of the last write block of the continuous range to be erased. */ +#define SDMMC_CMD_ERASE_GRP_START 35U /*!< Sets the address of the first write block to be erased. Reserved for each command + system set by switch function command (CMD6). */ +#define SDMMC_CMD_ERASE_GRP_END 36U /*!< Sets the address of the last write block of the continuous range to be erased. + Reserved for each command system set by switch function command (CMD6). */ +#define SDMMC_CMD_ERASE 38U /*!< Reserved for SD security applications. */ +#define SDMMC_CMD_FAST_IO 39U /*!< SD card doesn't support it (Reserved). */ +#define SDMMC_CMD_GO_IRQ_STATE 40U /*!< SD card doesn't support it (Reserved). */ +#define SDMMC_CMD_LOCK_UNLOCK 42U /*!< Sets/resets the password or lock/unlock the card. The size of the data block is set by + the SET_BLOCK_LEN command. */ +#define SDMMC_CMD_APP_CMD 55U /*!< Indicates to the card that the next command is an application specific command rather + than a standard command. */ +#define SDMMC_CMD_GEN_CMD 56U /*!< Used either to transfer a data block to the card or to get a data block from the card + for general purpose/application specific commands. */ +#define SDMMC_CMD_NO_CMD 64U /*!< No command */ + +/** + * @brief Following commands are SD Card Specific commands. + * SDMMC_APP_CMD should be sent before sending these commands. + */ +#define SDMMC_CMD_APP_SD_SET_BUSWIDTH 6U /*!< (ACMD6) Defines the data bus width to be used for data transfer. The allowed data bus + widths are given in SCR register. */ +#define SDMMC_CMD_SD_APP_STATUS 13U /*!< (ACMD13) Sends the SD status. */ +#define SDMMC_CMD_SD_APP_SEND_NUM_WRITE_BLOCKS 22U /*!< (ACMD22) Sends the number of the written (without errors) write blocks. Responds with + 32bit+CRC data block. */ +#define SDMMC_CMD_SD_APP_OP_COND 41U /*!< (ACMD41) Sends host capacity support information (HCS) and asks the accessed card to + send its operating condition register (OCR) content in the response on the CMD line. */ +#define SDMMC_CMD_SD_APP_SET_CLR_CARD_DETECT 42U /*!< (ACMD42) Connect/Disconnect the 50 KOhm pull-up resistor on CD/DAT3 (pin 1) of the card */ +#define SDMMC_CMD_SD_APP_SEND_SCR 51U /*!< Reads the SD Configuration Register (SCR). */ +#define SDMMC_CMD_SDMMC_RW_DIRECT 52U /*!< For SD I/O card only, reserved for security specification. */ +#define SDMMC_CMD_SDMMC_RW_EXTENDED 53U /*!< For SD I/O card only, reserved for security specification. */ + +/** + * @brief Following commands are SD Card Specific security commands. + * SDMMC_CMD_APP_CMD should be sent before sending these commands. + */ +#define SDMMC_CMD_SD_APP_GET_MKB 43U +#define SDMMC_CMD_SD_APP_GET_MID 44U +#define SDMMC_CMD_SD_APP_SET_CER_RN1 45U +#define SDMMC_CMD_SD_APP_GET_CER_RN2 46U +#define SDMMC_CMD_SD_APP_SET_CER_RES2 47U +#define SDMMC_CMD_SD_APP_GET_CER_RES1 48U +#define SDMMC_CMD_SD_APP_SECURE_READ_MULTIPLE_BLOCK 18U +#define SDMMC_CMD_SD_APP_SECURE_WRITE_MULTIPLE_BLOCK 25U +#define SDMMC_CMD_SD_APP_SECURE_ERASE 38U +#define SDMMC_CMD_SD_APP_CHANGE_SECURE_AREA 49U +#define SDMMC_CMD_SD_APP_SECURE_WRITE_MKB 48U + +/** + * @brief Masks for errors Card Status R1 (OCR Register) + */ +#define SDMMC_OCR_ADDR_OUT_OF_RANGE 0x80000000U +#define SDMMC_OCR_ADDR_MISALIGNED 0x40000000U +#define SDMMC_OCR_BLOCK_LEN_ERR 0x20000000U +#define SDMMC_OCR_ERASE_SEQ_ERR 0x10000000U +#define SDMMC_OCR_BAD_ERASE_PARAM 0x08000000U +#define SDMMC_OCR_WRITE_PROT_VIOLATION 0x04000000U +#define SDMMC_OCR_LOCK_UNLOCK_FAILED 0x01000000U +#define SDMMC_OCR_COM_CRC_FAILED 0x00800000U +#define SDMMC_OCR_ILLEGAL_CMD 0x00400000U +#define SDMMC_OCR_CARD_ECC_FAILED 0x00200000U +#define SDMMC_OCR_CC_ERROR 0x00100000U +#define SDMMC_OCR_GENERAL_UNKNOWN_ERROR 0x00080000U +#define SDMMC_OCR_STREAM_READ_UNDERRUN 0x00040000U +#define SDMMC_OCR_STREAM_WRITE_OVERRUN 0x00020000U +#define SDMMC_OCR_CID_CSD_OVERWRITE 0x00010000U +#define SDMMC_OCR_WP_ERASE_SKIP 0x00008000U +#define SDMMC_OCR_CARD_ECC_DISABLED 0x00004000U +#define SDMMC_OCR_ERASE_RESET 0x00002000U +#define SDMMC_OCR_AKE_SEQ_ERROR 0x00000008U +#define SDMMC_OCR_ERRORBITS 0xFDFFE008U + +/** + * @brief Masks for R6 Response + */ +#define SDMMC_R6_GENERAL_UNKNOWN_ERROR 0x00002000U +#define SDMMC_R6_ILLEGAL_CMD 0x00004000U +#define SDMMC_R6_COM_CRC_FAILED 0x00008000U + +#define SDMMC_VOLTAGE_WINDOW_SD 0x80100000U +#define SDMMC_HIGH_CAPACITY 0x40000000U +#define SDMMC_STD_CAPACITY 0x00000000U +#define SDMMC_CHECK_PATTERN 0x000001AAU +#define SD_SWITCH_1_8V_CAPACITY 0x01000000U + +#define SDMMC_MAX_VOLT_TRIAL 0x0000FFFFU + +#define SDMMC_MAX_TRIAL 0x0000FFFFU + +#define SDMMC_ALLZERO 0x00000000U + +#define SDMMC_WIDE_BUS_SUPPORT 0x00040000U +#define SDMMC_SINGLE_BUS_SUPPORT 0x00010000U +#define SDMMC_CARD_LOCKED 0x02000000U + +#define SDMMC_DATATIMEOUT 0xFFFFFFFFU + +#define SDMMC_0TO7BITS 0x000000FFU +#define SDMMC_8TO15BITS 0x0000FF00U +#define SDMMC_16TO23BITS 0x00FF0000U +#define SDMMC_24TO31BITS 0xFF000000U +#define SDMMC_MAX_DATA_LENGTH 0x01FFFFFFU + +#define SDMMC_HALFFIFO 0x00000008U +#define SDMMC_HALFFIFOBYTES 0x00000020U + +/** + * @brief Command Class supported + */ +#define SDMMC_CCCC_ERASE 0x00000020U + +#define SDMMC_CMDTIMEOUT 50U /* Command send and response timeout */ +#define SDMMC_MAXERASETIMEOUT 63000U /* Max erase Timeout 63 s */ +#define SDMMC_STOPTRANSFERTIMEOUT 100000000U /* Timeout for STOP TRANSMISSION command */ + +/** @defgroup SDMMC_LL_Clock_Edge Clock Edge + * @{ + */ +#define SDMMC_CLOCK_EDGE_RISING 0x00000000U +#define SDMMC_CLOCK_EDGE_FALLING SDMMC_CLKCR_NEGEDGE + +#define IS_SDMMC_CLOCK_EDGE(EDGE) (((EDGE) == SDMMC_CLOCK_EDGE_RISING) || \ + ((EDGE) == SDMMC_CLOCK_EDGE_FALLING)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Clock_Bypass Clock Bypass + * @{ + */ +#define SDMMC_CLOCK_BYPASS_DISABLE 0x00000000U +#define SDMMC_CLOCK_BYPASS_ENABLE SDMMC_CLKCR_BYPASS + +#define IS_SDMMC_CLOCK_BYPASS(BYPASS) (((BYPASS) == SDMMC_CLOCK_BYPASS_DISABLE) || \ + ((BYPASS) == SDMMC_CLOCK_BYPASS_ENABLE)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Clock_Power_Save Clock Power Saving + * @{ + */ +#define SDMMC_CLOCK_POWER_SAVE_DISABLE 0x00000000U +#define SDMMC_CLOCK_POWER_SAVE_ENABLE SDMMC_CLKCR_PWRSAV + +#define IS_SDMMC_CLOCK_POWER_SAVE(SAVE) (((SAVE) == SDMMC_CLOCK_POWER_SAVE_DISABLE) || \ + ((SAVE) == SDMMC_CLOCK_POWER_SAVE_ENABLE)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Bus_Wide Bus Width + * @{ + */ +#define SDMMC_BUS_WIDE_1B 0x00000000U +#define SDMMC_BUS_WIDE_4B SDMMC_CLKCR_WIDBUS_0 +#define SDMMC_BUS_WIDE_8B SDMMC_CLKCR_WIDBUS_1 + +#define IS_SDMMC_BUS_WIDE(WIDE) (((WIDE) == SDMMC_BUS_WIDE_1B) || \ + ((WIDE) == SDMMC_BUS_WIDE_4B) || \ + ((WIDE) == SDMMC_BUS_WIDE_8B)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Hardware_Flow_Control Hardware Flow Control + * @{ + */ +#define SDMMC_HARDWARE_FLOW_CONTROL_DISABLE 0x00000000U +#define SDMMC_HARDWARE_FLOW_CONTROL_ENABLE SDMMC_CLKCR_HWFC_EN + +#define IS_SDMMC_HARDWARE_FLOW_CONTROL(CONTROL) (((CONTROL) == SDMMC_HARDWARE_FLOW_CONTROL_DISABLE) || \ + ((CONTROL) == SDMMC_HARDWARE_FLOW_CONTROL_ENABLE)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Clock_Division Clock Division + * @{ + */ +#define IS_SDMMC_CLKDIV(DIV) ((DIV) <= 0xFFU) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Command_Index Command Index + * @{ + */ +#define IS_SDMMC_CMD_INDEX(INDEX) ((INDEX) < 0x40U) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Response_Type Response Type + * @{ + */ +#define SDMMC_RESPONSE_NO 0x00000000U +#define SDMMC_RESPONSE_SHORT SDMMC_CMD_WAITRESP_0 +#define SDMMC_RESPONSE_LONG SDMMC_CMD_WAITRESP + +#define IS_SDMMC_RESPONSE(RESPONSE) (((RESPONSE) == SDMMC_RESPONSE_NO) || \ + ((RESPONSE) == SDMMC_RESPONSE_SHORT) || \ + ((RESPONSE) == SDMMC_RESPONSE_LONG)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Wait_Interrupt_State Wait Interrupt + * @{ + */ +#define SDMMC_WAIT_NO 0x00000000U +#define SDMMC_WAIT_IT SDMMC_CMD_WAITINT +#define SDMMC_WAIT_PEND SDMMC_CMD_WAITPEND + +#define IS_SDMMC_WAIT(WAIT) (((WAIT) == SDMMC_WAIT_NO) || \ + ((WAIT) == SDMMC_WAIT_IT) || \ + ((WAIT) == SDMMC_WAIT_PEND)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_CPSM_State CPSM State + * @{ + */ +#define SDMMC_CPSM_DISABLE 0x00000000U +#define SDMMC_CPSM_ENABLE SDMMC_CMD_CPSMEN + +#define IS_SDMMC_CPSM(CPSM) (((CPSM) == SDMMC_CPSM_DISABLE) || \ + ((CPSM) == SDMMC_CPSM_ENABLE)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Response_Registers Response Register + * @{ + */ +#define SDMMC_RESP1 0x00000000U +#define SDMMC_RESP2 0x00000004U +#define SDMMC_RESP3 0x00000008U +#define SDMMC_RESP4 0x0000000CU + +#define IS_SDMMC_RESP(RESP) (((RESP) == SDMMC_RESP1) || \ + ((RESP) == SDMMC_RESP2) || \ + ((RESP) == SDMMC_RESP3) || \ + ((RESP) == SDMMC_RESP4)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Data_Length Data Lenght + * @{ + */ +#define IS_SDMMC_DATA_LENGTH(LENGTH) ((LENGTH) <= 0x01FFFFFFU) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Data_Block_Size Data Block Size + * @{ + */ +#define SDMMC_DATABLOCK_SIZE_1B 0x00000000U +#define SDMMC_DATABLOCK_SIZE_2B SDMMC_DCTRL_DBLOCKSIZE_0 +#define SDMMC_DATABLOCK_SIZE_4B SDMMC_DCTRL_DBLOCKSIZE_1 +#define SDMMC_DATABLOCK_SIZE_8B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_1) +#define SDMMC_DATABLOCK_SIZE_16B SDMMC_DCTRL_DBLOCKSIZE_2 +#define SDMMC_DATABLOCK_SIZE_32B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_2) +#define SDMMC_DATABLOCK_SIZE_64B (SDMMC_DCTRL_DBLOCKSIZE_1|SDMMC_DCTRL_DBLOCKSIZE_2) +#define SDMMC_DATABLOCK_SIZE_128B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_1|SDMMC_DCTRL_DBLOCKSIZE_2) +#define SDMMC_DATABLOCK_SIZE_256B SDMMC_DCTRL_DBLOCKSIZE_3 +#define SDMMC_DATABLOCK_SIZE_512B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_3) +#define SDMMC_DATABLOCK_SIZE_1024B (SDMMC_DCTRL_DBLOCKSIZE_1|SDMMC_DCTRL_DBLOCKSIZE_3) +#define SDMMC_DATABLOCK_SIZE_2048B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_1|SDMMC_DCTRL_DBLOCKSIZE_3) +#define SDMMC_DATABLOCK_SIZE_4096B (SDMMC_DCTRL_DBLOCKSIZE_2|SDMMC_DCTRL_DBLOCKSIZE_3) +#define SDMMC_DATABLOCK_SIZE_8192B (SDMMC_DCTRL_DBLOCKSIZE_0|SDMMC_DCTRL_DBLOCKSIZE_2|SDMMC_DCTRL_DBLOCKSIZE_3) +#define SDMMC_DATABLOCK_SIZE_16384B (SDMMC_DCTRL_DBLOCKSIZE_1|SDMMC_DCTRL_DBLOCKSIZE_2|SDMMC_DCTRL_DBLOCKSIZE_3) + +#define IS_SDMMC_BLOCK_SIZE(SIZE) (((SIZE) == SDMMC_DATABLOCK_SIZE_1B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_2B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_4B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_8B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_16B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_32B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_64B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_128B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_256B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_512B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_1024B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_2048B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_4096B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_8192B) || \ + ((SIZE) == SDMMC_DATABLOCK_SIZE_16384B)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Transfer_Direction Transfer Direction + * @{ + */ +#define SDMMC_TRANSFER_DIR_TO_CARD 0x00000000U +#define SDMMC_TRANSFER_DIR_TO_SDMMC SDMMC_DCTRL_DTDIR + +#define IS_SDMMC_TRANSFER_DIR(DIR) (((DIR) == SDMMC_TRANSFER_DIR_TO_CARD) || \ + ((DIR) == SDMMC_TRANSFER_DIR_TO_SDMMC)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Transfer_Type Transfer Type + * @{ + */ +#define SDMMC_TRANSFER_MODE_BLOCK 0x00000000U +#define SDMMC_TRANSFER_MODE_STREAM SDMMC_DCTRL_DTMODE + +#define IS_SDMMC_TRANSFER_MODE(MODE) (((MODE) == SDMMC_TRANSFER_MODE_BLOCK) || \ + ((MODE) == SDMMC_TRANSFER_MODE_STREAM)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_DPSM_State DPSM State + * @{ + */ +#define SDMMC_DPSM_DISABLE 0x00000000U +#define SDMMC_DPSM_ENABLE SDMMC_DCTRL_DTEN + +#define IS_SDMMC_DPSM(DPSM) (((DPSM) == SDMMC_DPSM_DISABLE) ||\ + ((DPSM) == SDMMC_DPSM_ENABLE)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Read_Wait_Mode Read Wait Mode + * @{ + */ +#define SDMMC_READ_WAIT_MODE_DATA2 0x00000000U +#define SDMMC_READ_WAIT_MODE_CLK (SDMMC_DCTRL_RWMOD) + +#define IS_SDMMC_READWAIT_MODE(MODE) (((MODE) == SDMMC_READ_WAIT_MODE_CLK) || \ + ((MODE) == SDMMC_READ_WAIT_MODE_DATA2)) +/** + * @} + */ + +/** @defgroup SDMMC_LL_Interrupt_sources Interrupt Sources + * @{ + */ +#define SDMMC_IT_CCRCFAIL SDMMC_MASK_CCRCFAILIE +#define SDMMC_IT_DCRCFAIL SDMMC_MASK_DCRCFAILIE +#define SDMMC_IT_CTIMEOUT SDMMC_MASK_CTIMEOUTIE +#define SDMMC_IT_DTIMEOUT SDMMC_MASK_DTIMEOUTIE +#define SDMMC_IT_TXUNDERR SDMMC_MASK_TXUNDERRIE +#define SDMMC_IT_RXOVERR SDMMC_MASK_RXOVERRIE +#define SDMMC_IT_CMDREND SDMMC_MASK_CMDRENDIE +#define SDMMC_IT_CMDSENT SDMMC_MASK_CMDSENTIE +#define SDMMC_IT_DATAEND SDMMC_MASK_DATAENDIE +#define SDMMC_IT_DBCKEND SDMMC_MASK_DBCKENDIE +#define SDMMC_IT_CMDACT SDMMC_MASK_CMDACTIE +#define SDMMC_IT_TXACT SDMMC_MASK_TXACTIE +#define SDMMC_IT_RXACT SDMMC_MASK_RXACTIE +#define SDMMC_IT_TXFIFOHE SDMMC_MASK_TXFIFOHEIE +#define SDMMC_IT_RXFIFOHF SDMMC_MASK_RXFIFOHFIE +#define SDMMC_IT_TXFIFOF SDMMC_MASK_TXFIFOFIE +#define SDMMC_IT_RXFIFOF SDMMC_MASK_RXFIFOFIE +#define SDMMC_IT_TXFIFOE SDMMC_MASK_TXFIFOEIE +#define SDMMC_IT_RXFIFOE SDMMC_MASK_RXFIFOEIE +#define SDMMC_IT_TXDAVL SDMMC_MASK_TXDAVLIE +#define SDMMC_IT_RXDAVL SDMMC_MASK_RXDAVLIE +#define SDMMC_IT_SDIOIT SDMMC_MASK_SDIOITIE +/** + * @} + */ + +/** @defgroup SDMMC_LL_Flags Flags + * @{ + */ +#define SDMMC_FLAG_CCRCFAIL SDMMC_STA_CCRCFAIL +#define SDMMC_FLAG_DCRCFAIL SDMMC_STA_DCRCFAIL +#define SDMMC_FLAG_CTIMEOUT SDMMC_STA_CTIMEOUT +#define SDMMC_FLAG_DTIMEOUT SDMMC_STA_DTIMEOUT +#define SDMMC_FLAG_TXUNDERR SDMMC_STA_TXUNDERR +#define SDMMC_FLAG_RXOVERR SDMMC_STA_RXOVERR +#define SDMMC_FLAG_CMDREND SDMMC_STA_CMDREND +#define SDMMC_FLAG_CMDSENT SDMMC_STA_CMDSENT +#define SDMMC_FLAG_DATAEND SDMMC_STA_DATAEND +#define SDMMC_FLAG_DBCKEND SDMMC_STA_DBCKEND +#define SDMMC_FLAG_CMDACT SDMMC_STA_CMDACT +#define SDMMC_FLAG_TXACT SDMMC_STA_TXACT +#define SDMMC_FLAG_RXACT SDMMC_STA_RXACT +#define SDMMC_FLAG_TXFIFOHE SDMMC_STA_TXFIFOHE +#define SDMMC_FLAG_RXFIFOHF SDMMC_STA_RXFIFOHF +#define SDMMC_FLAG_TXFIFOF SDMMC_STA_TXFIFOF +#define SDMMC_FLAG_RXFIFOF SDMMC_STA_RXFIFOF +#define SDMMC_FLAG_TXFIFOE SDMMC_STA_TXFIFOE +#define SDMMC_FLAG_RXFIFOE SDMMC_STA_RXFIFOE +#define SDMMC_FLAG_TXDAVL SDMMC_STA_TXDAVL +#define SDMMC_FLAG_RXDAVL SDMMC_STA_RXDAVL +#define SDMMC_FLAG_SDIOIT SDMMC_STA_SDIOIT +#define SDMMC_STATIC_FLAGS ((uint32_t)(SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_CTIMEOUT |\ + SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_TXUNDERR | SDMMC_FLAG_RXOVERR |\ + SDMMC_FLAG_CMDREND | SDMMC_FLAG_CMDSENT | SDMMC_FLAG_DATAEND |\ + SDMMC_FLAG_DBCKEND | SDMMC_FLAG_SDIOIT)) + +#define SDMMC_STATIC_CMD_FLAGS ((uint32_t)(SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CTIMEOUT | SDMMC_FLAG_CMDREND |\ + SDMMC_FLAG_CMDSENT)) + +#define SDMMC_STATIC_DATA_FLAGS ((uint32_t)(SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_TXUNDERR |\ + SDMMC_FLAG_RXOVERR | SDMMC_FLAG_DATAEND | SDMMC_FLAG_DBCKEND)) +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup SDMMC_LL_Exported_macros SDMMC_LL Exported Macros + * @{ + */ + +/** @defgroup SDMMC_LL_Register Bits And Addresses Definitions + * @brief SDMMC_LL registers bit address in the alias region + * @{ + */ +/* ---------------------- SDMMC registers bit mask --------------------------- */ +/* --- CLKCR Register ---*/ +/* CLKCR register clear mask */ +#define CLKCR_CLEAR_MASK ((uint32_t)(SDMMC_CLKCR_CLKDIV | SDMMC_CLKCR_PWRSAV |\ + SDMMC_CLKCR_BYPASS | SDMMC_CLKCR_WIDBUS |\ + SDMMC_CLKCR_NEGEDGE | SDMMC_CLKCR_HWFC_EN)) + +/* --- DCTRL Register ---*/ +/* SDMMC DCTRL Clear Mask */ +#define DCTRL_CLEAR_MASK ((uint32_t)(SDMMC_DCTRL_DTEN | SDMMC_DCTRL_DTDIR |\ + SDMMC_DCTRL_DTMODE | SDMMC_DCTRL_DBLOCKSIZE)) + +/* --- CMD Register ---*/ +/* CMD Register clear mask */ +#define CMD_CLEAR_MASK ((uint32_t)(SDMMC_CMD_CMDINDEX | SDMMC_CMD_WAITRESP |\ + SDMMC_CMD_WAITINT | SDMMC_CMD_WAITPEND |\ + SDMMC_CMD_CPSMEN | SDMMC_CMD_SDIOSUSPEND)) + +/* SDMMC Initialization Frequency (400KHz max) */ +#define SDMMC_INIT_CLK_DIV ((uint8_t)0x76) /* 48MHz / (SDMMC_INIT_CLK_DIV + 2) < 400KHz */ + +/* SDMMC Data Transfer Frequency (25MHz max) */ +#define SDMMC_TRANSFER_CLK_DIV ((uint8_t)0x0) /* 48MHz / (SDMMC_TRANSFER_CLK_DIV + 2) < 25MHz */ +/** + * @} + */ + +/** @defgroup SDMMC_LL_Interrupt_Clock Interrupt And Clock Configuration + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ + +/** + * @brief Enable the SDMMC device. + * @param __INSTANCE__: SDMMC Instance + * @retval None + */ +#define __SDMMC_ENABLE(__INSTANCE__) ((__INSTANCE__)->CLKCR |= SDMMC_CLKCR_CLKEN) + +/** + * @brief Disable the SDMMC device. + * @param __INSTANCE__: SDMMC Instance + * @retval None + */ +#define __SDMMC_DISABLE(__INSTANCE__) ((__INSTANCE__)->CLKCR &= ~SDMMC_CLKCR_CLKEN) + +/** + * @brief Enable the SDMMC DMA transfer. + * @param __INSTANCE__: SDMMC Instance + * @retval None + */ +#define __SDMMC_DMA_ENABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL |= SDMMC_DCTRL_DMAEN) + +/** + * @brief Disable the SDMMC DMA transfer. + * @param __INSTANCE__: SDMMC Instance + * @retval None + */ +#define __SDMMC_DMA_DISABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL &= ~SDMMC_DCTRL_DMAEN) + +/** + * @brief Enable the SDMMC device interrupt. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __INTERRUPT__ : specifies the SDMMC interrupt sources to be enabled. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __SDMMC_ENABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->MASK |= (__INTERRUPT__)) + +/** + * @brief Disable the SDMMC device interrupt. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __INTERRUPT__ : specifies the SDMMC interrupt sources to be disabled. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __SDMMC_DISABLE_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->MASK &= ~(__INTERRUPT__)) + +/** + * @brief Checks whether the specified SDMMC flag is set or not. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __FLAG__: specifies the flag to check. + * This parameter can be one of the following values: + * @arg SDMMC_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDMMC_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDMMC_FLAG_CTIMEOUT: Command response timeout + * @arg SDMMC_FLAG_DTIMEOUT: Data timeout + * @arg SDMMC_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDMMC_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDMMC_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDMMC_FLAG_CMDSENT: Command sent (no response required) + * @arg SDMMC_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDMMC_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDMMC_FLAG_CMDACT: Command transfer in progress + * @arg SDMMC_FLAG_TXACT: Data transmit in progress + * @arg SDMMC_FLAG_RXACT: Data receive in progress + * @arg SDMMC_FLAG_TXFIFOHE: Transmit FIFO Half Empty + * @arg SDMMC_FLAG_RXFIFOHF: Receive FIFO Half Full + * @arg SDMMC_FLAG_TXFIFOF: Transmit FIFO full + * @arg SDMMC_FLAG_RXFIFOF: Receive FIFO full + * @arg SDMMC_FLAG_TXFIFOE: Transmit FIFO empty + * @arg SDMMC_FLAG_RXFIFOE: Receive FIFO empty + * @arg SDMMC_FLAG_TXDAVL: Data available in transmit FIFO + * @arg SDMMC_FLAG_RXDAVL: Data available in receive FIFO + * @arg SDMMC_FLAG_SDIOIT: SDIO interrupt received + * @retval The new state of SDMMC_FLAG (SET or RESET). + */ +#define __SDMMC_GET_FLAG(__INSTANCE__, __FLAG__) (((__INSTANCE__)->STA &(__FLAG__)) != 0U) + + +/** + * @brief Clears the SDMMC pending flags. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __FLAG__: specifies the flag to clear. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDMMC_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDMMC_FLAG_CTIMEOUT: Command response timeout + * @arg SDMMC_FLAG_DTIMEOUT: Data timeout + * @arg SDMMC_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDMMC_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDMMC_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDMMC_FLAG_CMDSENT: Command sent (no response required) + * @arg SDMMC_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDMMC_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDMMC_FLAG_SDIOIT: SDIO interrupt received + * @retval None + */ +#define __SDMMC_CLEAR_FLAG(__INSTANCE__, __FLAG__) ((__INSTANCE__)->ICR = (__FLAG__)) + +/** + * @brief Checks whether the specified SDMMC interrupt has occurred or not. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __INTERRUPT__: specifies the SDMMC interrupt source to check. + * This parameter can be one of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDACT: Command transfer in progress interrupt + * @arg SDMMC_IT_TXACT: Data transmit in progress interrupt + * @arg SDMMC_IT_RXACT: Data receive in progress interrupt + * @arg SDMMC_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDMMC_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDMMC_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDMMC_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDMMC_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDMMC_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDMMC_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDMMC_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval The new state of SDMMC_IT (SET or RESET). + */ +#define __SDMMC_GET_IT (__INSTANCE__, __INTERRUPT__) (((__INSTANCE__)->STA &(__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Clears the SDMMC's interrupt pending bits. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @param __INTERRUPT__: specifies the interrupt pending bit to clear. + * This parameter can be one or a combination of the following values: + * @arg SDMMC_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDMMC_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDMMC_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDMMC_IT_DTIMEOUT: Data timeout interrupt + * @arg SDMMC_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDMMC_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDMMC_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDMMC_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDMMC_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDMMC_IT_SDIOIT: SDIO interrupt received interrupt + * @retval None + */ +#define __SDMMC_CLEAR_IT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->ICR = (__INTERRUPT__)) + +/** + * @brief Enable Start the SD I/O Read Wait operation. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_START_READWAIT_ENABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL |= SDMMC_DCTRL_RWSTART) + +/** + * @brief Disable Start the SD I/O Read Wait operations. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_START_READWAIT_DISABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL &= ~SDMMC_DCTRL_RWSTART) + +/** + * @brief Enable Start the SD I/O Read Wait operation. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_STOP_READWAIT_ENABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL |= SDMMC_DCTRL_RWSTOP) + +/** + * @brief Disable Stop the SD I/O Read Wait operations. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_STOP_READWAIT_DISABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL &= ~SDMMC_DCTRL_RWSTOP) + +/** + * @brief Enable the SD I/O Mode Operation. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_OPERATION_ENABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL |= SDMMC_DCTRL_SDIOEN) + +/** + * @brief Disable the SD I/O Mode Operation. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_OPERATION_DISABLE(__INSTANCE__) ((__INSTANCE__)->DCTRL &= ~SDMMC_DCTRL_SDIOEN) + +/** + * @brief Enable the SD I/O Suspend command sending. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_SUSPEND_CMD_ENABLE(__INSTANCE__) ((__INSTANCE__)->CMD |= SDMMC_CMD_SDIOSUSPEND) + +/** + * @brief Disable the SD I/O Suspend command sending. + * @param __INSTANCE__ : Pointer to SDMMC register base + * @retval None + */ +#define __SDMMC_SUSPEND_CMD_DISABLE(__INSTANCE__) ((__INSTANCE__)->CMD &= ~SDMMC_CMD_SDIOSUSPEND) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup SDMMC_LL_Exported_Functions + * @{ + */ + +/* Initialization/de-initialization functions **********************************/ +/** @addtogroup HAL_SDMMC_LL_Group1 + * @{ + */ +HAL_StatusTypeDef SDMMC_Init(SDMMC_TypeDef *SDMMCx, SDMMC_InitTypeDef Init); +/** + * @} + */ + +/* I/O operation functions *****************************************************/ +/** @addtogroup HAL_SDMMC_LL_Group2 + * @{ + */ +uint32_t SDMMC_ReadFIFO(SDMMC_TypeDef *SDMMCx); +HAL_StatusTypeDef SDMMC_WriteFIFO(SDMMC_TypeDef *SDMMCx, uint32_t *pWriteData); +/** + * @} + */ + +/* Peripheral Control functions ************************************************/ +/** @addtogroup HAL_SDMMC_LL_Group3 + * @{ + */ +HAL_StatusTypeDef SDMMC_PowerState_ON(SDMMC_TypeDef *SDMMCx); +HAL_StatusTypeDef SDMMC_PowerState_OFF(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_GetPowerState(SDMMC_TypeDef *SDMMCx); + +/* Command path state machine (CPSM) management functions */ +HAL_StatusTypeDef SDMMC_SendCommand(SDMMC_TypeDef *SDMMCx, SDMMC_CmdInitTypeDef *Command); +uint8_t SDMMC_GetCommandResponse(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_GetResponse(SDMMC_TypeDef *SDMMCx, uint32_t Response); + +/* Data path state machine (DPSM) management functions */ +HAL_StatusTypeDef SDMMC_ConfigData(SDMMC_TypeDef *SDMMCx, SDMMC_DataInitTypeDef* Data); +uint32_t SDMMC_GetDataCounter(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_GetFIFOCount(SDMMC_TypeDef *SDMMCx); + +/* SDMMC Cards mode management functions */ +HAL_StatusTypeDef SDMMC_SetSDMMCReadWaitMode(SDMMC_TypeDef *SDMMCx, uint32_t SDMMC_ReadWaitMode); + +/* SDMMC Commands management functions */ +uint32_t SDMMC_CmdBlockLength(SDMMC_TypeDef *SDMMCx, uint32_t BlockSize); +uint32_t SDMMC_CmdReadSingleBlock(SDMMC_TypeDef *SDMMCx, uint32_t ReadAdd); +uint32_t SDMMC_CmdReadMultiBlock(SDMMC_TypeDef *SDMMCx, uint32_t ReadAdd); +uint32_t SDMMC_CmdWriteSingleBlock(SDMMC_TypeDef *SDMMCx, uint32_t WriteAdd); +uint32_t SDMMC_CmdWriteMultiBlock(SDMMC_TypeDef *SDMMCx, uint32_t WriteAdd); +uint32_t SDMMC_CmdEraseStartAdd(SDMMC_TypeDef *SDMMCx, uint32_t StartAdd); +uint32_t SDMMC_CmdSDEraseStartAdd(SDMMC_TypeDef *SDMMCx, uint32_t StartAdd); +uint32_t SDMMC_CmdEraseEndAdd(SDMMC_TypeDef *SDMMCx, uint32_t EndAdd); +uint32_t SDMMC_CmdSDEraseEndAdd(SDMMC_TypeDef *SDMMCx, uint32_t EndAdd); +uint32_t SDMMC_CmdErase(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdStopTransfer(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdSelDesel(SDMMC_TypeDef *SDMMCx, uint64_t Addr); +uint32_t SDMMC_CmdGoIdleState(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdOperCond(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdAppCommand(SDMMC_TypeDef *SDMMCx, uint32_t Argument); +uint32_t SDMMC_CmdAppOperCommand(SDMMC_TypeDef *SDMMCx, uint32_t Argument); +uint32_t SDMMC_CmdBusWidth(SDMMC_TypeDef *SDMMCx, uint32_t BusWidth); +uint32_t SDMMC_CmdSendSCR(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdSendCID(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdSendCSD(SDMMC_TypeDef *SDMMCx, uint32_t Argument); +uint32_t SDMMC_CmdSetRelAdd(SDMMC_TypeDef *SDMMCx, uint16_t *pRCA); +uint32_t SDMMC_CmdSendStatus(SDMMC_TypeDef *SDMMCx, uint32_t Argument); +uint32_t SDMMC_CmdStatusRegister(SDMMC_TypeDef *SDMMCx); +uint32_t SDMMC_CmdOpCondition(SDMMC_TypeDef *SDMMCx, uint32_t Argument); +uint32_t SDMMC_CmdSwitch(SDMMC_TypeDef *SDMMCx, uint32_t Argument); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* SDMMC1 */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_LL_SDMMC_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h new file mode 100644 index 000000000..6d3769c88 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h @@ -0,0 +1,512 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_usb.h + * @author MCD Application Team + * @brief Header file of USB Low Layer HAL module. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_LL_USB_H +#define STM32F7xx_LL_USB_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup USB_LL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief USB Mode definition + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +typedef enum +{ + USB_DEVICE_MODE = 0, + USB_HOST_MODE = 1, + USB_DRD_MODE = 2 +} USB_OTG_ModeTypeDef; + +/** + * @brief URB States definition + */ +typedef enum +{ + URB_IDLE = 0, + URB_DONE, + URB_NOTREADY, + URB_NYET, + URB_ERROR, + URB_STALL +} USB_OTG_URBStateTypeDef; + +/** + * @brief Host channel States definition + */ +typedef enum +{ + HC_IDLE = 0, + HC_XFRC, + HC_HALTED, + HC_NAK, + HC_NYET, + HC_STALL, + HC_XACTERR, + HC_BBLERR, + HC_DATATGLERR +} USB_OTG_HCStateTypeDef; + +/** + * @brief USB OTG Initialization Structure definition + */ +typedef struct +{ + uint32_t dev_endpoints; /*!< Device Endpoints number. + This parameter depends on the used USB core. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t Host_channels; /*!< Host Channels number. + This parameter Depends on the used USB core. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t speed; /*!< USB Core speed. + This parameter can be any value of @ref USB_Core_Speed_ */ + + uint32_t dma_enable; /*!< Enable or disable of the USB embedded DMA used only for OTG HS. */ + + uint32_t ep0_mps; /*!< Set the Endpoint 0 Max Packet size. */ + + uint32_t phy_itface; /*!< Select the used PHY interface. + This parameter can be any value of @ref USB_Core_PHY_ */ + + uint32_t Sof_enable; /*!< Enable or disable the output of the SOF signal. */ + + uint32_t low_power_enable; /*!< Enable or disable the low power mode. */ + + uint32_t lpm_enable; /*!< Enable or disable Link Power Management. */ + + uint32_t battery_charging_enable; /*!< Enable or disable Battery charging. */ + + uint32_t vbus_sensing_enable; /*!< Enable or disable the VBUS Sensing feature. */ + + uint32_t use_dedicated_ep1; /*!< Enable or disable the use of the dedicated EP1 interrupt. */ + + uint32_t use_external_vbus; /*!< Enable or disable the use of the external VBUS. */ +} USB_OTG_CfgTypeDef; + +typedef struct +{ + uint8_t num; /*!< Endpoint number + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t is_in; /*!< Endpoint direction + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t is_stall; /*!< Endpoint stall condition + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t type; /*!< Endpoint type + This parameter can be any value of @ref USB_EP_Type_ */ + + uint8_t data_pid_start; /*!< Initial data PID + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t even_odd_frame; /*!< IFrame parity + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint16_t tx_fifo_num; /*!< Transmission FIFO number + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t maxpacket; /*!< Endpoint Max packet size + This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ + + uint8_t *xfer_buff; /*!< Pointer to transfer buffer */ + + uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address */ + + uint32_t xfer_len; /*!< Current transfer length */ + + uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer */ +} USB_OTG_EPTypeDef; + +typedef struct +{ + uint8_t dev_addr; /*!< USB device address. + This parameter must be a number between Min_Data = 1 and Max_Data = 255 */ + + uint8_t ch_num; /*!< Host channel number. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t ep_num; /*!< Endpoint number. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t ep_is_in; /*!< Endpoint direction + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t speed; /*!< USB Host speed. + This parameter can be any value of @ref USB_Core_Speed_ */ + + uint8_t do_ping; /*!< Enable or disable the use of the PING protocol for HS mode. */ + + uint8_t process_ping; /*!< Execute the PING protocol for HS mode. */ + + uint8_t ep_type; /*!< Endpoint Type. + This parameter can be any value of @ref USB_EP_Type_ */ + + uint16_t max_packet; /*!< Endpoint Max packet size. + This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ + + uint8_t data_pid; /*!< Initial data PID. + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t *xfer_buff; /*!< Pointer to transfer buffer. */ + + uint32_t xfer_len; /*!< Current transfer length. */ + + uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer. */ + + uint8_t toggle_in; /*!< IN transfer current toggle flag. + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t toggle_out; /*!< OUT transfer current toggle flag + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address. */ + + uint32_t ErrCnt; /*!< Host channel error count.*/ + + USB_OTG_URBStateTypeDef urb_state; /*!< URB state. + This parameter can be any value of @ref USB_OTG_URBStateTypeDef */ + + USB_OTG_HCStateTypeDef state; /*!< Host Channel state. + This parameter can be any value of @ref USB_OTG_HCStateTypeDef */ +} USB_OTG_HCTypeDef; +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup PCD_Exported_Constants PCD Exported Constants + * @{ + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @defgroup USB_OTG_CORE VERSION ID + * @{ + */ +#define USB_OTG_CORE_ID_300A 0x4F54300AU +#define USB_OTG_CORE_ID_310A 0x4F54310AU +/** + * @} + */ + +/** @defgroup USB_Core_Mode_ USB Core Mode + * @{ + */ +#define USB_OTG_MODE_DEVICE 0U +#define USB_OTG_MODE_HOST 1U +#define USB_OTG_MODE_DRD 2U +/** + * @} + */ + +/** @defgroup USB_LL Device Speed + * @{ + */ +#define USBD_HS_SPEED 0U +#define USBD_HSINFS_SPEED 1U +#define USBH_HS_SPEED 0U +#define USBD_FS_SPEED 2U +#define USBH_FSLS_SPEED 1U +/** + * @} + */ + +/** @defgroup USB_LL_Core_Speed USB Low Layer Core Speed + * @{ + */ +#define USB_OTG_SPEED_HIGH 0U +#define USB_OTG_SPEED_HIGH_IN_FULL 1U +#define USB_OTG_SPEED_FULL 3U +/** + * @} + */ + +/** @defgroup USB_LL_Core_PHY USB Low Layer Core PHY + * @{ + */ +#define USB_OTG_ULPI_PHY 1U +#define USB_OTG_EMBEDDED_PHY 2U +#define USB_OTG_HS_EMBEDDED_PHY 3U + +#if !defined (USB_HS_PHYC_TUNE_VALUE) +#define USB_HS_PHYC_TUNE_VALUE 0x00000F13U /*!< Value of USB HS PHY Tune */ +#endif /* USB_HS_PHYC_TUNE_VALUE */ +/** + * @} + */ + +/** @defgroup USB_LL_Turnaround_Timeout Turnaround Timeout Value + * @{ + */ +#ifndef USBD_HS_TRDT_VALUE +#define USBD_HS_TRDT_VALUE 9U +#endif /* USBD_HS_TRDT_VALUE */ +#ifndef USBD_FS_TRDT_VALUE +#define USBD_FS_TRDT_VALUE 5U +#define USBD_DEFAULT_TRDT_VALUE 9U +#endif /* USBD_HS_TRDT_VALUE */ +/** + * @} + */ + +/** @defgroup USB_LL_Core_MPS USB Low Layer Core MPS + * @{ + */ +#define USB_OTG_HS_MAX_PACKET_SIZE 512U +#define USB_OTG_FS_MAX_PACKET_SIZE 64U +#define USB_OTG_MAX_EP0_SIZE 64U +/** + * @} + */ + +/** @defgroup USB_LL_Core_PHY_Frequency USB Low Layer Core PHY Frequency + * @{ + */ +#define DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ (0U << 1) +#define DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ (1U << 1) +#define DSTS_ENUMSPD_FS_PHY_48MHZ (3U << 1) +/** + * @} + */ + +/** @defgroup USB_LL_CORE_Frame_Interval USB Low Layer Core Frame Interval + * @{ + */ +#define DCFG_FRAME_INTERVAL_80 0U +#define DCFG_FRAME_INTERVAL_85 1U +#define DCFG_FRAME_INTERVAL_90 2U +#define DCFG_FRAME_INTERVAL_95 3U +/** + * @} + */ + +/** @defgroup USB_LL_EP0_MPS USB Low Layer EP0 MPS + * @{ + */ +#define DEP0CTL_MPS_64 0U +#define DEP0CTL_MPS_32 1U +#define DEP0CTL_MPS_16 2U +#define DEP0CTL_MPS_8 3U +/** + * @} + */ + +/** @defgroup USB_LL_EP_Speed USB Low Layer EP Speed + * @{ + */ +#define EP_SPEED_LOW 0U +#define EP_SPEED_FULL 1U +#define EP_SPEED_HIGH 2U +/** + * @} + */ + +/** @defgroup USB_LL_EP_Type USB Low Layer EP Type + * @{ + */ +#define EP_TYPE_CTRL 0U +#define EP_TYPE_ISOC 1U +#define EP_TYPE_BULK 2U +#define EP_TYPE_INTR 3U +#define EP_TYPE_MSK 3U +/** + * @} + */ + +/** @defgroup USB_LL_STS_Defines USB Low Layer STS Defines + * @{ + */ +#define STS_GOUT_NAK 1U +#define STS_DATA_UPDT 2U +#define STS_XFER_COMP 3U +#define STS_SETUP_COMP 4U +#define STS_SETUP_UPDT 6U +/** + * @} + */ + +/** @defgroup USB_LL_HCFG_SPEED_Defines USB Low Layer HCFG Speed Defines + * @{ + */ +#define HCFG_30_60_MHZ 0U +#define HCFG_48_MHZ 1U +#define HCFG_6_MHZ 2U +/** + * @} + */ + +/** @defgroup USB_LL_HPRT0_PRTSPD_SPEED_Defines USB Low Layer HPRT0 PRTSPD Speed Defines + * @{ + */ +#define HPRT0_PRTSPD_HIGH_SPEED 0U +#define HPRT0_PRTSPD_FULL_SPEED 1U +#define HPRT0_PRTSPD_LOW_SPEED 2U +/** + * @} + */ + +#define HCCHAR_CTRL 0U +#define HCCHAR_ISOC 1U +#define HCCHAR_BULK 2U +#define HCCHAR_INTR 3U + +#define HC_PID_DATA0 0U +#define HC_PID_DATA2 1U +#define HC_PID_DATA1 2U +#define HC_PID_SETUP 3U + +#define GRXSTS_PKTSTS_IN 2U +#define GRXSTS_PKTSTS_IN_XFER_COMP 3U +#define GRXSTS_PKTSTS_DATA_TOGGLE_ERR 5U +#define GRXSTS_PKTSTS_CH_HALTED 7U + +#define USBx_PCGCCTL *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_PCGCCTL_BASE) +#define USBx_HPRT0 *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_HOST_PORT_BASE) + +#define USBx_DEVICE ((USB_OTG_DeviceTypeDef *)(USBx_BASE + USB_OTG_DEVICE_BASE)) +#define USBx_INEP(i) ((USB_OTG_INEndpointTypeDef *)(USBx_BASE + USB_OTG_IN_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) +#define USBx_OUTEP(i) ((USB_OTG_OUTEndpointTypeDef *)(USBx_BASE + USB_OTG_OUT_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) +#define USBx_DFIFO(i) *(__IO uint32_t *)(USBx_BASE + USB_OTG_FIFO_BASE + ((i) * USB_OTG_FIFO_SIZE)) + +#define USBx_HOST ((USB_OTG_HostTypeDef *)(USBx_BASE + USB_OTG_HOST_BASE)) +#define USBx_HC(i) ((USB_OTG_HostChannelTypeDef *)(USBx_BASE + USB_OTG_HOST_CHANNEL_BASE + ((i) * USB_OTG_HOST_CHANNEL_SIZE))) +#define USBPHYC ((USBPHYC_GlobalTypeDef *)((uint32_t )USB_PHY_HS_CONTROLLER_BASE)) +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#define EP_ADDR_MSK 0xFU +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup USB_LL_Exported_Macros USB Low Layer Exported Macros + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define USB_MASK_INTERRUPT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->GINTMSK &= ~(__INTERRUPT__)) +#define USB_UNMASK_INTERRUPT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->GINTMSK |= (__INTERRUPT__)) + +#define CLEAR_IN_EP_INTR(__EPNUM__, __INTERRUPT__) (USBx_INEP(__EPNUM__)->DIEPINT = (__INTERRUPT__)) +#define CLEAR_OUT_EP_INTR(__EPNUM__, __INTERRUPT__) (USBx_OUTEP(__EPNUM__)->DOEPINT = (__INTERRUPT__)) +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup USB_LL_Exported_Functions USB Low Layer Exported Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_EnableGlobalInt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_SetTurnaroundTime(USB_OTG_GlobalTypeDef *USBx, uint32_t hclk, uint8_t speed); +HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode); +HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed); +HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num); +HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); +HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); +HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, uint8_t ch_ep_num, uint16_t len, uint8_t dma); +void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len); +HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address); +HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup); +uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt); + +HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq); +HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state); +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, + uint8_t epnum, uint8_t dev_address, uint8_t speed, + uint8_t ep_type, uint16_t mps); +HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDef *hc, uint8_t dma); +uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num); +HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num); +HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_LL_USB_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c new file mode 100644 index 000000000..ce308e44a --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c @@ -0,0 +1,620 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal.c + * @author MCD Application Team + * @brief HAL module driver. + * This is the common part of the HAL initialization + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The common HAL driver contains a set of generic and common APIs that can be + used by the PPP peripheral drivers and the user to start using the HAL. + [..] + The HAL contains two APIs' categories: + (+) Common HAL APIs + (+) Services HAL APIs + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup HAL HAL + * @brief HAL module driver. + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup HAL_Private_Constants + * @{ + */ +/** + * @brief STM32F7xx HAL Driver version number V1.2.8 + */ +#define __STM32F7xx_HAL_VERSION_MAIN (0x01) /*!< [31:24] main version */ +#define __STM32F7xx_HAL_VERSION_SUB1 (0x02) /*!< [23:16] sub1 version */ +#define __STM32F7xx_HAL_VERSION_SUB2 (0x08) /*!< [15:8] sub2 version */ +#define __STM32F7xx_HAL_VERSION_RC (0x00) /*!< [7:0] release candidate */ +#define __STM32F7xx_HAL_VERSION ((__STM32F7xx_HAL_VERSION_MAIN << 24)\ + |(__STM32F7xx_HAL_VERSION_SUB1 << 16)\ + |(__STM32F7xx_HAL_VERSION_SUB2 << 8 )\ + |(__STM32F7xx_HAL_VERSION_RC)) + +#define IDCODE_DEVID_MASK ((uint32_t)0x00000FFF) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Exported variables ---------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Variables + * @{ + */ +__IO uint32_t uwTick; +uint32_t uwTickPrio = (1UL << __NVIC_PRIO_BITS); /* Invalid PRIO */ +HAL_TickFreqTypeDef uwTickFreq = HAL_TICK_FREQ_DEFAULT; /* 1KHz */ +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Functions HAL Exported Functions + * @{ + */ + +/** @defgroup HAL_Exported_Functions_Group1 Initialization and de-initialization Functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initializes the Flash interface the NVIC allocation and initial clock + configuration. It initializes the systick also when timeout is needed + and the backup domain when enabled. + (+) De-Initializes common part of the HAL. + (+) Configure the time base source to have 1ms time base with a dedicated + Tick interrupt priority. + (++) SysTick timer is used by default as source of time base, but user + can eventually implement his proper time base source (a general purpose + timer for example or other time source), keeping in mind that Time base + duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and + handled in milliseconds basis. + (++) Time base configuration function (HAL_InitTick ()) is called automatically + at the beginning of the program after reset by HAL_Init() or at any time + when clock is configured, by HAL_RCC_ClockConfig(). + (++) Source of time base is configured to generate interrupts at regular + time intervals. Care must be taken if HAL_Delay() is called from a + peripheral ISR process, the Tick interrupt line must have higher priority + (numerically lower) than the peripheral interrupt. Otherwise the caller + ISR process will be blocked. + (++) functions affecting time base configurations are declared as __weak + to make override possible in case of other implementations in user file. +@endverbatim + * @{ + */ + +/** + * @brief This function is used to initialize the HAL Library; it must be the first + * instruction to be executed in the main program (before to call any other + * HAL function), it performs the following: + * Configure the Flash prefetch, and instruction cache through ART accelerator. + * Configures the SysTick to generate an interrupt each 1 millisecond, + * which is clocked by the HSI (at this stage, the clock is not yet + * configured and thus the system is running from the internal HSI at 16 MHz). + * Set NVIC Group Priority to 4. + * Calls the HAL_MspInit() callback function defined in user file + * "stm32f7xx_hal_msp.c" to do the global low level hardware initialization + * + * @note SysTick is used as time base for the HAL_Delay() function, the application + * need to ensure that the SysTick time base is always set to 1 millisecond + * to have correct HAL operation. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_Init(void) +{ + /* Configure Instruction cache through ART accelerator */ +#if (ART_ACCLERATOR_ENABLE != 0) + __HAL_FLASH_ART_ENABLE(); +#endif /* ART_ACCLERATOR_ENABLE */ + + /* Configure Flash prefetch */ +#if (PREFETCH_ENABLE != 0U) + __HAL_FLASH_PREFETCH_BUFFER_ENABLE(); +#endif /* PREFETCH_ENABLE */ + + /* Set Interrupt Group Priority */ + HAL_NVIC_SetPriorityGrouping(NVIC_PRIORITYGROUP_4); + + /* Use systick as time base source and configure 1ms tick (default clock after Reset is HSI) */ + HAL_InitTick(TICK_INT_PRIORITY); + + /* Init the low level hardware */ + HAL_MspInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief This function de-Initializes common part of the HAL and stops the systick. + * This function is optional. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DeInit(void) +{ + /* Reset of all peripherals */ + __HAL_RCC_APB1_FORCE_RESET(); + __HAL_RCC_APB1_RELEASE_RESET(); + + __HAL_RCC_APB2_FORCE_RESET(); + __HAL_RCC_APB2_RELEASE_RESET(); + + __HAL_RCC_AHB1_FORCE_RESET(); + __HAL_RCC_AHB1_RELEASE_RESET(); + + __HAL_RCC_AHB2_FORCE_RESET(); + __HAL_RCC_AHB2_RELEASE_RESET(); + + __HAL_RCC_AHB3_FORCE_RESET(); + __HAL_RCC_AHB3_RELEASE_RESET(); + + /* De-Init the low level hardware */ + HAL_MspDeInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initialize the MSP. + * @retval None + */ +__weak void HAL_MspInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the MSP. + * @retval None + */ +__weak void HAL_MspDeInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief This function configures the source of the time base. + * The time source is configured to have 1ms time base with a dedicated + * Tick interrupt priority. + * @note This function is called automatically at the beginning of program after + * reset by HAL_Init() or at any time when clock is reconfigured by HAL_RCC_ClockConfig(). + * @note In the default implementation, SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals. + * Care must be taken if HAL_Delay() is called from a peripheral ISR process, + * The SysTick interrupt must have higher priority (numerically lower) + * than the peripheral interrupt. Otherwise the caller ISR process will be blocked. + * The function is declared as __weak to be overwritten in case of other + * implementation in user file. + * @param TickPriority Tick interrupt priority. + * @retval HAL status + */ +__weak HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) +{ + /* Configure the SysTick to have interrupt in 1ms time basis*/ + if (HAL_SYSTICK_Config(SystemCoreClock / (1000U / uwTickFreq)) > 0U) + { + return HAL_ERROR; + } + + /* Configure the SysTick IRQ priority */ + if (TickPriority < (1UL << __NVIC_PRIO_BITS)) + { + HAL_NVIC_SetPriority(SysTick_IRQn, TickPriority, 0U); + uwTickPrio = TickPriority; + } + else + { + return HAL_ERROR; + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup HAL_Exported_Functions_Group2 HAL Control functions + * @brief HAL Control functions + * +@verbatim + =============================================================================== + ##### HAL Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Provide a tick value in millisecond + (+) Provide a blocking delay in millisecond + (+) Suspend the time base source interrupt + (+) Resume the time base source interrupt + (+) Get the HAL API driver version + (+) Get the device identifier + (+) Get the device revision identifier + (+) Enable/Disable Debug module during SLEEP mode + (+) Enable/Disable Debug module during STOP mode + (+) Enable/Disable Debug module during STANDBY mode + +@endverbatim + * @{ + */ + +/** + * @brief This function is called to increment a global variable "uwTick" + * used as application time base. + * @note In the default implementation, this variable is incremented each 1ms + * in SysTick ISR. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_IncTick(void) +{ + uwTick += uwTickFreq; +} + +/** + * @brief Provides a tick value in millisecond. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval tick value + */ +__weak uint32_t HAL_GetTick(void) +{ + return uwTick; +} + +/** + * @brief This function returns a tick priority. + * @retval tick priority + */ +uint32_t HAL_GetTickPrio(void) +{ + return uwTickPrio; +} + +/** + * @brief Set new tick Freq. + * @retval Status + */ +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_TickFreqTypeDef prevTickFreq; + + assert_param(IS_TICKFREQ(Freq)); + + if (uwTickFreq != Freq) + { + /* Back up uwTickFreq frequency */ + prevTickFreq = uwTickFreq; + + /* Update uwTickFreq global variable used by HAL_InitTick() */ + uwTickFreq = Freq; + + /* Apply the new tick Freq */ + status = HAL_InitTick(uwTickPrio); + + if (status != HAL_OK) + { + /* Restore previous tick frequency */ + uwTickFreq = prevTickFreq; + } + } + + return status; +} + +/** + * @brief Return tick frequency. + * @retval tick period in Hz + */ +HAL_TickFreqTypeDef HAL_GetTickFreq(void) +{ + return uwTickFreq; +} + +/** + * @brief This function provides minimum delay (in milliseconds) based + * on variable incremented. + * @note In the default implementation , SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals where uwTick + * is incremented. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @param Delay specifies the delay time length, in milliseconds. + * @retval None + */ +__weak void HAL_Delay(uint32_t Delay) +{ + uint32_t tickstart = HAL_GetTick(); + uint32_t wait = Delay; + + /* Add a freq to guarantee minimum wait */ + if (wait < HAL_MAX_DELAY) + { + wait += (uint32_t)(uwTickFreq); + } + + while ((HAL_GetTick() - tickstart) < wait) + { + } +} + +/** + * @brief Suspend Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_SuspendTick() + * is called, the SysTick interrupt will be disabled and so Tick increment + * is suspended. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_SuspendTick(void) +{ + /* Disable SysTick Interrupt */ + SysTick->CTRL &= ~SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Resume Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_ResumeTick() + * is called, the SysTick interrupt will be enabled and so Tick increment + * is resumed. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_ResumeTick(void) +{ + /* Enable SysTick Interrupt */ + SysTick->CTRL |= SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Returns the HAL revision + * @retval version : 0xXYZR (8bits for each decimal, R for RC) + */ +uint32_t HAL_GetHalVersion(void) +{ + return __STM32F7xx_HAL_VERSION; +} + +/** + * @brief Returns the device revision identifier. + * @retval Device revision identifier + */ +uint32_t HAL_GetREVID(void) +{ + return((DBGMCU->IDCODE) >> 16U); +} + +/** + * @brief Returns the device identifier. + * @retval Device identifier + */ +uint32_t HAL_GetDEVID(void) +{ + return((DBGMCU->IDCODE) & IDCODE_DEVID_MASK); +} + +/** + * @brief Returns first word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw0(void) +{ + return(READ_REG(*((uint32_t *)UID_BASE))); +} + +/** + * @brief Returns second word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw1(void) +{ + return(READ_REG(*((uint32_t *)(UID_BASE + 4U)))); +} + +/** + * @brief Returns third word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw2(void) +{ + return(READ_REG(*((uint32_t *)(UID_BASE + 8U)))); +} + +/** + * @brief Enable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGSleepMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Disable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGSleepMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Enable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStopMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Disable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStopMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Enable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStandbyMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Disable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStandbyMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Enables the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_EnableCompensationCell(void) +{ + SYSCFG->CMPCR |= SYSCFG_CMPCR_CMP_PD; +} + +/** + * @brief Power-down the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_DisableCompensationCell(void) +{ + SYSCFG->CMPCR &= (uint32_t)~((uint32_t)SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Enables the FMC Memory Mapping Swapping. + * + * @note SDRAM is accessible at 0x60000000 + * and NOR/RAM is accessible at 0xC0000000 + * + * @retval None + */ +void HAL_EnableFMCMemorySwapping(void) +{ + SYSCFG->MEMRMP |= SYSCFG_MEMRMP_SWP_FMC_0; +} + +/** + * @brief Disables the FMC Memory Mapping Swapping + * + * @note SDRAM is accessible at 0xC0000000 (default mapping) + * and NOR/RAM is accessible at 0x60000000 (default mapping) + * + * @retval None + */ +void HAL_DisableFMCMemorySwapping(void) +{ + + SYSCFG->MEMRMP &= (uint32_t)~((uint32_t)SYSCFG_MEMRMP_SWP_FMC); +} + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** +* @brief Enable the Internal FLASH Bank Swapping. +* +* @note This function can be used only for STM32F77xx/STM32F76xx devices. +* +* @note Flash Bank2 mapped at 0x08000000 (AXI) (aliased at 0x00200000 (TCM)) +* and Flash Bank1 mapped at 0x08100000 (AXI) (aliased at 0x00300000 (TCM)) +* +* @retval None +*/ +void HAL_EnableMemorySwappingBank(void) +{ + SET_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FB); +} + +/** +* @brief Disable the Internal FLASH Bank Swapping. +* +* @note This function can be used only for STM32F77xx/STM32F76xx devices. +* +* @note The default state : Flash Bank1 mapped at 0x08000000 (AXI) (aliased at 0x00200000 (TCM)) +* and Flash Bank2 mapped at 0x08100000 (AXI)( aliased at 0x00300000 (TCM)) +* +* @retval None +*/ +void HAL_DisableMemorySwappingBank(void) +{ + CLEAR_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FB); +} +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c new file mode 100644 index 000000000..f6b64e7e4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc.c @@ -0,0 +1,2063 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_adc.c + * @author MCD Application Team + * @brief This file provides firmware functions to manage the following + * functionalities of the Analog to Digital Convertor (ADC) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + State and errors functions + * + @verbatim + ============================================================================== + ##### ADC Peripheral features ##### + ============================================================================== + [..] + (#) 12-bit, 10-bit, 8-bit or 6-bit configurable resolution. + (#) Interrupt generation at the end of conversion, end of injected conversion, + and in case of analog watchdog or overrun events + (#) Single and continuous conversion modes. + (#) Scan mode for automatic conversion of channel 0 to channel x. + (#) Data alignment with in-built data coherency. + (#) Channel-wise programmable sampling time. + (#) External trigger option with configurable polarity for both regular and + injected conversion. + (#) Dual/Triple mode (on devices with 2 ADCs or more). + (#) Configurable DMA data storage in Dual/Triple ADC mode. + (#) Configurable delay between conversions in Dual/Triple interleaved mode. + (#) ADC conversion type (refer to the datasheets). + (#) ADC supply requirements: 2.4 V to 3.6 V at full speed and down to 1.8 V at + slower speed. + (#) ADC input range: VREF(minus) = VIN = VREF(plus). + (#) DMA request generation during regular channel conversion. + + + ##### How to use this driver ##### + ============================================================================== + [..] + (#)Initialize the ADC low level resources by implementing the HAL_ADC_MspInit(): + (##) Enable the ADC interface clock using __HAL_RCC_ADC_CLK_ENABLE() + (##) ADC pins configuration + (+++) Enable the clock for the ADC GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE() + (+++) Configure these ADC pins in analog mode using HAL_GPIO_Init() + (##) In case of using interrupts (e.g. HAL_ADC_Start_IT()) + (+++) Configure the ADC interrupt priority using HAL_NVIC_SetPriority() + (+++) Enable the ADC IRQ handler using HAL_NVIC_EnableIRQ() + (+++) In ADC IRQ handler, call HAL_ADC_IRQHandler() + (##) In case of using DMA to control data transfer (e.g. HAL_ADC_Start_DMA()) + (+++) Enable the DMAx interface clock using __HAL_RCC_DMAx_CLK_ENABLE() + (+++) Configure and enable two DMA streams stream for managing data + transfer from peripheral to memory (output stream) + (+++) Associate the initialized DMA handle to the CRYP DMA handle + using __HAL_LINKDMA() + (+++) Configure the priority and enable the NVIC for the transfer complete + interrupt on the two DMA Streams. The output stream should have higher + priority than the input stream. + + *** Configuration of ADC, groups regular/injected, channels parameters *** + ============================================================================== + [..] + (#) Configure the ADC parameters (resolution, data alignment, ...) + and regular group parameters (conversion trigger, sequencer, ...) + using function HAL_ADC_Init(). + + (#) Configure the channels for regular group parameters (channel number, + channel rank into sequencer, ..., into regular group) + using function HAL_ADC_ConfigChannel(). + + (#) Optionally, configure the injected group parameters (conversion trigger, + sequencer, ..., of injected group) + and the channels for injected group parameters (channel number, + channel rank into sequencer, ..., into injected group) + using function HAL_ADCEx_InjectedConfigChannel(). + + (#) Optionally, configure the analog watchdog parameters (channels + monitored, thresholds, ...) using function HAL_ADC_AnalogWDGConfig(). + + (#) Optionally, for devices with several ADC instances: configure the + multimode parameters using function HAL_ADCEx_MultiModeConfigChannel(). + + *** Execution of ADC conversions *** + ============================================================================== + [..] + (#) ADC driver can be used among three modes: polling, interruption, + transfer by DMA. + + *** Polling mode IO operation *** + ================================= + [..] + (+) Start the ADC peripheral using HAL_ADC_Start() + (+) Wait for end of conversion using HAL_ADC_PollForConversion(), at this stage + user can specify the value of timeout according to his end application + (+) To read the ADC converted values, use the HAL_ADC_GetValue() function. + (+) Stop the ADC peripheral using HAL_ADC_Stop() + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Start the ADC peripheral using HAL_ADC_Start_IT() + (+) Use HAL_ADC_IRQHandler() called under ADC_IRQHandler() Interrupt subroutine + (+) At ADC end of conversion HAL_ADC_ConvCpltCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ConvCpltCallback + (+) In case of ADC Error, HAL_ADC_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ErrorCallback + (+) Stop the ADC peripheral using HAL_ADC_Stop_IT() + + *** DMA mode IO operation *** + ============================== + [..] + (+) Start the ADC peripheral using HAL_ADC_Start_DMA(), at this stage the user specify the length + of data to be transferred at each end of conversion + (+) At The end of data transfer by HAL_ADC_ConvCpltCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ConvCpltCallback + (+) In case of transfer Error, HAL_ADC_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ErrorCallback + (+) Stop the ADC peripheral using HAL_ADC_Stop_DMA() + + *** ADC HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in ADC HAL driver. + + (+) __HAL_ADC_ENABLE : Enable the ADC peripheral + (+) __HAL_ADC_DISABLE : Disable the ADC peripheral + (+) __HAL_ADC_ENABLE_IT: Enable the ADC end of conversion interrupt + (+) __HAL_ADC_DISABLE_IT: Disable the ADC end of conversion interrupt + (+) __HAL_ADC_GET_IT_SOURCE: Check if the specified ADC interrupt source is enabled or disabled + (+) __HAL_ADC_CLEAR_FLAG: Clear the ADC's pending flags + (+) __HAL_ADC_GET_FLAG: Get the selected ADC's flag status + (+) ADC_GET_RESOLUTION: Return resolution bits in CR1 register + + *** Callback functions *** + ============================== + [..] + (@) Callback functions must be implemented in user program: + (+@) HAL_ADC_ErrorCallback() + (+@) HAL_ADC_LevelOutOfWindowCallback() (callback of analog watchdog) + (+@) HAL_ADC_ConvCpltCallback() + (+@) HAL_ADC_ConvHalfCpltCallback + + (@) You can refer to the ADC HAL driver header file for more useful macros + + *** Deinitialization of ADC *** + ============================================================================== + [..] + (#) Disable the ADC interface + (++) ADC clock can be hard reset and disabled at RCC top level. + (++) Hard reset of ADC peripherals + using macro __HAL_RCC_ADC_FORCE_RESET(), __HAL_RCC_ADC_RELEASE_RESET(). + (++) ADC clock disable using the equivalent macro/functions as configuration step. + (+++) Example: + Into HAL_ADC_MspDeInit() (recommended code location) or with + other device clock parameters configuration: + (+++) HAL_RCC_GetOscConfig(&RCC_OscInitStructure); + (+++) RCC_OscInitStructure.OscillatorType = RCC_OSCILLATORTYPE_HSI; + (+++) RCC_OscInitStructure.HSIState = RCC_HSI_OFF; (if not used for system clock) + (+++) HAL_RCC_OscConfig(&RCC_OscInitStructure); + + (#) ADC pins configuration + (++) Disable the clock for the ADC GPIOs using macro __HAL_RCC_GPIOx_CLK_DISABLE() + + (#) Optionally, in case of usage of ADC with interruptions: + (++) Disable the NVIC for ADC using function HAL_NVIC_DisableIRQ(ADCx_IRQn) + + (#) Optionally, in case of usage of DMA: + (++) Deinitialize the DMA using function HAL_DMA_DeInit(). + (++) Disable the NVIC for DMA using function HAL_NVIC_DisableIRQ(DMAx_Channelx_IRQn) + + *** Callback registration *** + ============================================================================== + [..] + + The compilation flag USE_HAL_ADC_REGISTER_CALLBACKS, when set to 1, + allows the user to configure dynamically the driver callbacks. + Use Functions @ref HAL_ADC_RegisterCallback() + to register an interrupt callback. + [..] + + Function @ref HAL_ADC_RegisterCallback() allows to register following callbacks: + (+) ConvCpltCallback : ADC conversion complete callback + (+) ConvHalfCpltCallback : ADC conversion DMA half-transfer callback + (+) LevelOutOfWindowCallback : ADC analog watchdog 1 callback + (+) ErrorCallback : ADC error callback + (+) InjectedConvCpltCallback : ADC group injected conversion complete callback + (+) InjectedQueueOverflowCallback : ADC group injected context queue overflow callback + (+) LevelOutOfWindow2Callback : ADC analog watchdog 2 callback + (+) LevelOutOfWindow3Callback : ADC analog watchdog 3 callback + (+) EndOfSamplingCallback : ADC end of sampling callback + (+) MspInitCallback : ADC Msp Init callback + (+) MspDeInitCallback : ADC Msp DeInit callback + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + [..] + + Use function @ref HAL_ADC_UnRegisterCallback to reset a callback to the default + weak function. + [..] + + @ref HAL_ADC_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) ConvCpltCallback : ADC conversion complete callback + (+) ConvHalfCpltCallback : ADC conversion DMA half-transfer callback + (+) LevelOutOfWindowCallback : ADC analog watchdog 1 callback + (+) ErrorCallback : ADC error callback + (+) InjectedConvCpltCallback : ADC group injected conversion complete callback + (+) InjectedQueueOverflowCallback : ADC group injected context queue overflow callback + (+) LevelOutOfWindow2Callback : ADC analog watchdog 2 callback + (+) LevelOutOfWindow3Callback : ADC analog watchdog 3 callback + (+) EndOfSamplingCallback : ADC end of sampling callback + (+) MspInitCallback : ADC Msp Init callback + (+) MspDeInitCallback : ADC Msp DeInit callback + [..] + + By default, after the @ref HAL_ADC_Init() and when the state is @ref HAL_ADC_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_ADC_ConvCpltCallback(), @ref HAL_ADC_ErrorCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak functions in the @ref HAL_ADC_Init()/ @ref HAL_ADC_DeInit() only when + these callbacks are null (not registered beforehand). + [..] + + If MspInit or MspDeInit are not null, the @ref HAL_ADC_Init()/ @ref HAL_ADC_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. + [..] + + Callbacks can be registered/unregistered in @ref HAL_ADC_STATE_READY state only. + Exception done MspInit/MspDeInit functions that can be registered/unregistered + in @ref HAL_ADC_STATE_READY or @ref HAL_ADC_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + [..] + + Then, the user first registers the MspInit/MspDeInit user callbacks + using @ref HAL_ADC_RegisterCallback() before calling @ref HAL_ADC_DeInit() + or @ref HAL_ADC_Init() function. + [..] + + When the compilation flag USE_HAL_ADC_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup ADC ADC + * @brief ADC driver modules + * @{ + */ + +#ifdef HAL_ADC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup ADC_Private_Functions + * @{ + */ +/* Private function prototypes -----------------------------------------------*/ +static void ADC_Init(ADC_HandleTypeDef* hadc); +static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma); +static void ADC_DMAError(DMA_HandleTypeDef *hdma); +static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup ADC_Exported_Functions ADC Exported Functions + * @{ + */ + +/** @defgroup ADC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the ADC. + (+) De-initialize the ADC. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the ADCx peripheral according to the specified parameters + * in the ADC_InitStruct and initializes the ADC MSP. + * + * @note This function is used to configure the global features of the ADC ( + * ClockPrescaler, Resolution, Data Alignment and number of conversion), however, + * the rest of the configuration parameters are specific to the regular + * channels group (scan mode activation, continuous mode activation, + * External trigger source and edge, DMA continuous request after the + * last transfer and End of conversion selection). + * + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check ADC handle */ + if(hadc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + assert_param(IS_ADC_CLOCKPRESCALER(hadc->Init.ClockPrescaler)); + assert_param(IS_ADC_RESOLUTION(hadc->Init.Resolution)); + assert_param(IS_ADC_SCAN_MODE(hadc->Init.ScanConvMode)); + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_EXT_TRIG(hadc->Init.ExternalTrigConv)); + assert_param(IS_ADC_DATA_ALIGN(hadc->Init.DataAlign)); + assert_param(IS_ADC_REGULAR_LENGTH(hadc->Init.NbrOfConversion)); + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DMAContinuousRequests)); + assert_param(IS_ADC_EOCSelection(hadc->Init.EOCSelection)); + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DiscontinuousConvMode)); + + if(hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) + { + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + } + + if(hadc->State == HAL_ADC_STATE_RESET) + { +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + /* Init the ADC Callback settings */ + hadc->ConvCpltCallback = HAL_ADC_ConvCpltCallback; /* Legacy weak callback */ + hadc->ConvHalfCpltCallback = HAL_ADC_ConvHalfCpltCallback; /* Legacy weak callback */ + hadc->LevelOutOfWindowCallback = HAL_ADC_LevelOutOfWindowCallback; /* Legacy weak callback */ + hadc->ErrorCallback = HAL_ADC_ErrorCallback; /* Legacy weak callback */ + hadc->InjectedConvCpltCallback = HAL_ADCEx_InjectedConvCpltCallback; /* Legacy weak callback */ + if (hadc->MspInitCallback == NULL) + { + hadc->MspInitCallback = HAL_ADC_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware */ + hadc->MspInitCallback(hadc); +#else + /* Init the low level hardware */ + HAL_ADC_MspInit(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + /* Initialize ADC error code */ + ADC_CLEAR_ERRORCODE(hadc); + + /* Allocate lock resource and initialize it */ + hadc->Lock = HAL_UNLOCKED; + } + + /* Configuration of ADC parameters if previous preliminary actions are */ + /* correctly completed. */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) + { + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_BUSY_INTERNAL); + + /* Set ADC parameters */ + ADC_Init(hadc); + + /* Set ADC error code to none */ + ADC_CLEAR_ERRORCODE(hadc); + + /* Set the ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_BUSY_INTERNAL, + HAL_ADC_STATE_READY); + } + else + { + tmp_hal_status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Deinitializes the ADCx peripheral registers to their default reset values. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_DeInit(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check ADC handle */ + if(hadc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_BUSY_INTERNAL); + + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Configuration of ADC parameters if previous preliminary actions are */ + /* correctly completed. */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + if (hadc->MspDeInitCallback == NULL) + { + hadc->MspDeInitCallback = HAL_ADC_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware: RCC clock, NVIC */ + hadc->MspDeInitCallback(hadc); +#else + /* DeInit the low level hardware: RCC clock, NVIC */ + HAL_ADC_MspDeInit(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + /* Set ADC error code to none */ + ADC_CLEAR_ERRORCODE(hadc); + + /* Set ADC state */ + hadc->State = HAL_ADC_STATE_RESET; + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Initializes the ADC MSP. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_MspInit(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the ADC MSP. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User ADC Callback + * To be used instead of the weak predefined callback + * @param hadc Pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_ADC_CONVERSION_COMPLETE_CB_ID ADC conversion complete callback ID + * @arg @ref HAL_ADC_CONVERSION_HALF_CB_ID ADC conversion DMA half-transfer callback ID + * @arg @ref HAL_ADC_LEVEL_OUT_OF_WINDOW_1_CB_ID ADC analog watchdog 1 callback ID + * @arg @ref HAL_ADC_ERROR_CB_ID ADC error callback ID + * @arg @ref HAL_ADC_INJ_CONVERSION_COMPLETE_CB_ID ADC group injected conversion complete callback ID + * @arg @ref HAL_ADC_INJ_QUEUE_OVEFLOW_CB_ID ADC group injected context queue overflow callback ID + * @arg @ref HAL_ADC_MSPINIT_CB_ID ADC Msp Init callback ID + * @arg @ref HAL_ADC_MSPDEINIT_CB_ID ADC Msp DeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_RegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_CallbackIDTypeDef CallbackID, pADC_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + if ((hadc->State & HAL_ADC_STATE_READY) != 0UL) + { + switch (CallbackID) + { + case HAL_ADC_CONVERSION_COMPLETE_CB_ID : + hadc->ConvCpltCallback = pCallback; + break; + + case HAL_ADC_CONVERSION_HALF_CB_ID : + hadc->ConvHalfCpltCallback = pCallback; + break; + + case HAL_ADC_LEVEL_OUT_OF_WINDOW_1_CB_ID : + hadc->LevelOutOfWindowCallback = pCallback; + break; + + case HAL_ADC_ERROR_CB_ID : + hadc->ErrorCallback = pCallback; + break; + + case HAL_ADC_INJ_CONVERSION_COMPLETE_CB_ID : + hadc->InjectedConvCpltCallback = pCallback; + break; + + case HAL_ADC_MSPINIT_CB_ID : + hadc->MspInitCallback = pCallback; + break; + + case HAL_ADC_MSPDEINIT_CB_ID : + hadc->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_ADC_STATE_RESET == hadc->State) + { + switch (CallbackID) + { + case HAL_ADC_MSPINIT_CB_ID : + hadc->MspInitCallback = pCallback; + break; + + case HAL_ADC_MSPDEINIT_CB_ID : + hadc->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Unregister a ADC Callback + * ADC callback is redirected to the weak predefined callback + * @param hadc Pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_ADC_CONVERSION_COMPLETE_CB_ID ADC conversion complete callback ID + * @arg @ref HAL_ADC_CONVERSION_HALF_CB_ID ADC conversion DMA half-transfer callback ID + * @arg @ref HAL_ADC_LEVEL_OUT_OF_WINDOW_1_CB_ID ADC analog watchdog 1 callback ID + * @arg @ref HAL_ADC_ERROR_CB_ID ADC error callback ID + * @arg @ref HAL_ADC_INJ_CONVERSION_COMPLETE_CB_ID ADC group injected conversion complete callback ID + * @arg @ref HAL_ADC_INJ_QUEUE_OVEFLOW_CB_ID ADC group injected context queue overflow callback ID + * @arg @ref HAL_ADC_MSPINIT_CB_ID ADC Msp Init callback ID + * @arg @ref HAL_ADC_MSPDEINIT_CB_ID ADC Msp DeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_UnRegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + if ((hadc->State & HAL_ADC_STATE_READY) != 0UL) + { + switch (CallbackID) + { + case HAL_ADC_CONVERSION_COMPLETE_CB_ID : + hadc->ConvCpltCallback = HAL_ADC_ConvCpltCallback; + break; + + case HAL_ADC_CONVERSION_HALF_CB_ID : + hadc->ConvHalfCpltCallback = HAL_ADC_ConvHalfCpltCallback; + break; + + case HAL_ADC_LEVEL_OUT_OF_WINDOW_1_CB_ID : + hadc->LevelOutOfWindowCallback = HAL_ADC_LevelOutOfWindowCallback; + break; + + case HAL_ADC_ERROR_CB_ID : + hadc->ErrorCallback = HAL_ADC_ErrorCallback; + break; + + case HAL_ADC_INJ_CONVERSION_COMPLETE_CB_ID : + hadc->InjectedConvCpltCallback = HAL_ADCEx_InjectedConvCpltCallback; + break; + + case HAL_ADC_MSPINIT_CB_ID : + hadc->MspInitCallback = HAL_ADC_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_ADC_MSPDEINIT_CB_ID : + hadc->MspDeInitCallback = HAL_ADC_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_ADC_STATE_RESET == hadc->State) + { + switch (CallbackID) + { + case HAL_ADC_MSPINIT_CB_ID : + hadc->MspInitCallback = HAL_ADC_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_ADC_MSPDEINIT_CB_ID : + hadc->MspDeInitCallback = HAL_ADC_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hadc->ErrorCode |= HAL_ADC_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + +/** + * @} + */ + +/** @defgroup ADC_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Start conversion of regular channel. + (+) Stop conversion of regular channel. + (+) Start conversion of regular channel and enable interrupt. + (+) Stop conversion of regular channel and disable interrupt. + (+) Start conversion of regular channel and enable DMA transfer. + (+) Stop conversion of regular channel and disable DMA transfer. + (+) Handle ADC interrupt request. + +@endverbatim + * @{ + */ + +/** + * @brief Enables ADC and starts conversion of the regular channels. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) +{ + __IO uint32_t counter = 0; + + /* Check the parameters */ + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Enable the ADC peripheral */ + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for ADC stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to regular group conversion results */ + /* - Set state bitfield related to regular group operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, + HAL_ADC_STATE_REG_BUSY); + + /* If conversions on group regular are also triggering group injected, */ + /* update ADC state. */ + if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) + { + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + } + + /* State machine update: Check if an injected conversion is ongoing */ + if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + /* Reset ADC error code fields related to conversions on group regular */ + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + } + else + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Clear regular group conversion flag and overrun flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); + + /* Check if Multimode enabled */ + if(HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI)) + { + /* if no external trigger present enable software conversion of regular channels */ + if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + else + { + /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + + /* if dual mode is selected, ADC3 works independently. */ + /* check if the mode selected is not triple */ + if( HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI_4) ) + { + /* if instance of handle correspond to ADC3 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC3) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables ADC and stop conversion of regular channels. + * + * @note Caution: This function will stop also injected channels. + * + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef* hadc) +{ + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Poll for regular conversion complete + * @note ADC conversion flags EOS (end of sequence) and EOC (end of + * conversion) are cleared by this function. + * @note This function cannot be used in a particular setup: ADC configured + * in DMA mode and polling for end of each conversion (ADC init + * parameter "EOCSelection" set to ADC_EOC_SINGLE_CONV). + * In this case, DMA resets the flag EOC and polling cannot be + * performed on each conversion. Nevertheless, polling can still + * be performed on the complete sequence. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param Timeout Timeout value in millisecond. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Verification that ADC configuration is compliant with polling for */ + /* each conversion: */ + /* Particular case is ADC configured in DMA mode and ADC sequencer with */ + /* several ranks and polling for end of each conversion. */ + /* For code simplicity sake, this particular case is generalized to */ + /* ADC configured in DMA mode and polling for end of each conversion. */ + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_EOCS) && + HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_DMA) ) + { + /* Update ADC state machine to error */ + SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + return HAL_ERROR; + } + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check End of conversion flag */ + while(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC))) + { + /* Check if timeout is disabled (set to infinite wait) */ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0) || ((HAL_GetTick() - tickstart ) > Timeout)) + { + /* Update ADC state machine to timeout */ + SET_BIT(hadc->State, HAL_ADC_STATE_TIMEOUT); + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + return HAL_TIMEOUT; + } + } + } + + /* Clear regular group conversion flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_STRT | ADC_FLAG_EOC); + + /* Update ADC state machine */ + SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); + + /* Determine whether any further conversion upcoming on group regular */ + /* by external trigger, continuous mode or scan sequence on going. */ + /* Note: On STM32F7, there is no independent flag of end of sequence. */ + /* The test of scan sequence on going is done either with scan */ + /* sequence disabled or with end of conversion flag set to */ + /* of end of sequence. */ + if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + { + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Return ADC state */ + return HAL_OK; +} + +/** + * @brief Poll for conversion event + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param EventType the ADC event type. + * This parameter can be one of the following values: + * @arg ADC_AWD_EVENT: ADC Analog watch Dog event. + * @arg ADC_OVR_EVENT: ADC Overrun event. + * @param Timeout Timeout value in millisecond. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventType, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + assert_param(IS_ADC_EVENT_TYPE(EventType)); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check selected event flag */ + while(!(__HAL_ADC_GET_FLAG(hadc,EventType))) + { + /* Check for the Timeout */ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0) || ((HAL_GetTick() - tickstart ) > Timeout)) + { + /* Update ADC state machine to timeout */ + SET_BIT(hadc->State, HAL_ADC_STATE_TIMEOUT); + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + return HAL_TIMEOUT; + } + } + } + + /* Analog watchdog (level out of window) event */ + if(EventType == ADC_AWD_EVENT) + { + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_AWD1); + + /* Clear ADC analog watchdog flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_AWD); + } + /* Overrun event */ + else + { + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_REG_OVR); + /* Set ADC error code to overrun */ + SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_OVR); + + /* Clear ADC overrun flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); + } + + /* Return ADC state */ + return HAL_OK; +} + + +/** + * @brief Enables the interrupt and starts ADC conversion of regular channels. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) +{ + __IO uint32_t counter = 0; + + /* Check the parameters */ + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Enable the ADC peripheral */ + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for ADC stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to regular group conversion results */ + /* - Set state bitfield related to regular group operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, + HAL_ADC_STATE_REG_BUSY); + + /* If conversions on group regular are also triggering group injected, */ + /* update ADC state. */ + if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) + { + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + } + + /* State machine update: Check if an injected conversion is ongoing */ + if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + /* Reset ADC error code fields related to conversions on group regular */ + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + } + else + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Clear regular group conversion flag and overrun flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); + + /* Enable end of conversion interrupt for regular group */ + __HAL_ADC_ENABLE_IT(hadc, (ADC_IT_EOC | ADC_IT_OVR)); + + /* Check if Multimode enabled */ + if(HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI)) + { + /* if no external trigger present enable software conversion of regular channels */ + if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + else + { + /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + + /* if dual mode is selected, ADC3 works independently. */ + /* check if the mode selected is not triple */ + if( HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI_4) ) + { + /* if instance of handle correspond to ADC3 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC3) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables the interrupt and stop ADC conversion of regular channels. + * + * @note Caution: This function will stop also injected channels. + * + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef* hadc) +{ + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Disable ADC end of conversion interrupt for regular group */ + __HAL_ADC_DISABLE_IT(hadc, (ADC_IT_EOC | ADC_IT_OVR)); + + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Handles ADC interrupt request + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +void HAL_ADC_IRQHandler(ADC_HandleTypeDef* hadc) +{ + uint32_t tmp1 = 0, tmp2 = 0; + + /* Check the parameters */ + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_REGULAR_LENGTH(hadc->Init.NbrOfConversion)); + assert_param(IS_ADC_EOCSelection(hadc->Init.EOCSelection)); + + tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC); + tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_EOC); + /* Check End of conversion flag for regular channels */ + if(tmp1 && tmp2) + { + /* Update state machine on conversion status if not in error state */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) + { + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); + } + + /* Determine whether any further conversion upcoming on group regular */ + /* by external trigger, continuous mode or scan sequence on going. */ + /* Note: On STM32F7, there is no independent flag of end of sequence. */ + /* The test of scan sequence on going is done either with scan */ + /* sequence disabled or with end of conversion flag set to */ + /* of end of sequence. */ + if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + { + /* Disable ADC end of single conversion interrupt on group regular */ + /* Note: Overrun interrupt was enabled with EOC interrupt in */ + /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ + /* by overrun IRQ process below. */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); + + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Conversion complete callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ConvCpltCallback(hadc); +#else + HAL_ADC_ConvCpltCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + /* Clear regular group conversion flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_STRT | ADC_FLAG_EOC); + } + + tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC); + tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_JEOC); + /* Check End of conversion flag for injected channels */ + if(tmp1 && tmp2) + { + /* Update state machine on conversion status if not in error state */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) + { + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_INJ_EOC); + } + + /* Determine whether any further conversion upcoming on group injected */ + /* by external trigger, scan sequence on going or by automatic injected */ + /* conversion from group regular (same conditions as group regular */ + /* interruption disabling above). */ + if(ADC_IS_SOFTWARE_START_INJECTED(hadc) && + (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS)) && + (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && + (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE)))) + { + /* Disable ADC end of single conversion interrupt on group injected */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_JEOC); + + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_INJ_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Conversion complete callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->InjectedConvCpltCallback(hadc); +#else + HAL_ADCEx_InjectedConvCpltCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + /* Clear injected group conversion flag */ + __HAL_ADC_CLEAR_FLAG(hadc, (ADC_FLAG_JSTRT | ADC_FLAG_JEOC)); + } + + tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_AWD); + tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_AWD); + /* Check Analog watchdog flag */ + if(tmp1 && tmp2) + { + if(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_AWD)) + { + /* Set ADC state */ + SET_BIT(hadc->State, HAL_ADC_STATE_AWD1); + + /* Level out of window callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->LevelOutOfWindowCallback(hadc); +#else + HAL_ADC_LevelOutOfWindowCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + + /* Clear the ADC analog watchdog flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_AWD); + } + } + + tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_OVR); + tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_OVR); + /* Check Overrun flag */ + if(tmp1 && tmp2) + { + /* Note: On STM32F7, ADC overrun can be set through other parameters */ + /* refer to description of parameter "EOCSelection" for more */ + /* details. */ + + /* Set ADC error code to overrun */ + SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_OVR); + + /* Clear ADC overrun flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); + + /* Error callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ErrorCallback(hadc); +#else + HAL_ADC_ErrorCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + + /* Clear the Overrun flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); + } +} + +/** + * @brief Enables ADC DMA request after last transfer (Single-ADC mode) and enables ADC peripheral + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from ADC peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length) +{ + __IO uint32_t counter = 0; + + /* Check the parameters */ + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Enable the ADC peripheral */ + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for ADC stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to regular group conversion results */ + /* - Set state bitfield related to regular group operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, + HAL_ADC_STATE_REG_BUSY); + + /* If conversions on group regular are also triggering group injected, */ + /* update ADC state. */ + if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) + { + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + } + + /* State machine update: Check if an injected conversion is ongoing */ + if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + /* Reset ADC error code fields related to conversions on group regular */ + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + } + else + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Set the DMA transfer complete callback */ + hadc->DMA_Handle->XferCpltCallback = ADC_DMAConvCplt; + + /* Set the DMA half transfer complete callback */ + hadc->DMA_Handle->XferHalfCpltCallback = ADC_DMAHalfConvCplt; + + /* Set the DMA error callback */ + hadc->DMA_Handle->XferErrorCallback = ADC_DMAError; + + + /* Manage ADC and DMA start: ADC overrun interruption, DMA start, ADC */ + /* start (in case of SW start): */ + + /* Clear regular group conversion flag and overrun flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); + + /* Enable ADC overrun interrupt */ + __HAL_ADC_ENABLE_IT(hadc, ADC_IT_OVR); + + /* Enable ADC DMA mode */ + hadc->Instance->CR2 |= ADC_CR2_DMA; + + /* Start the DMA channel */ + HAL_DMA_Start_IT(hadc->DMA_Handle, (uint32_t)&hadc->Instance->DR, (uint32_t)pData, Length); + + /* Check if Multimode enabled */ + if(HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI)) + { + /* if no external trigger present enable software conversion of regular channels */ + if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + else + { + /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + /* if dual mode is selected, ADC3 works independently. */ + /* check if the mode selected is not triple */ + if( HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI_4) ) + { + /* if instance of handle correspond to ADC3 and no external trigger present enable software conversion of regular channels */ + if((hadc->Instance == ADC3) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables ADC DMA (Single-ADC mode) and disables ADC peripheral + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Disable the selected ADC DMA mode */ + hadc->Instance->CR2 &= ~ADC_CR2_DMA; + + /* Disable the DMA channel (in case of DMA in circular mode or stop while */ + /* DMA transfer is on going) */ + tmp_hal_status = HAL_DMA_Abort(hadc->DMA_Handle); + + /* Disable ADC overrun interrupt */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_OVR); + + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Gets the converted value from data register of regular channel. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval Converted value + */ +uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef* hadc) +{ + /* Return the selected ADC converted value */ + return hadc->Instance->DR; +} + +/** + * @brief Regular conversion complete callback in non blocking mode + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_ConvCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Regular conversion half DMA transfer callback in non blocking mode + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_ConvHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Analog watchdog callback in non blocking mode + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_LevelOoutOfWindowCallback could be implemented in the user file + */ +} + +/** + * @brief Error ADC callback. + * @note In case of error due to overrun when using ADC with DMA transfer + * (HAL ADC handle paramater "ErrorCode" to state "HAL_ADC_ERROR_OVR"): + * - Reinitialize the DMA using function "HAL_ADC_Stop_DMA()". + * - If needed, restart a new ADC conversion using function + * "HAL_ADC_Start_DMA()" + * (this function is also clearing overrun flag) + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADC_ErrorCallback(ADC_HandleTypeDef *hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_ErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup ADC_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure regular channels. + (+) Configure injected channels. + (+) Configure multimode. + (+) Configure the analog watch dog. + +@endverbatim + * @{ + */ + + /** + * @brief Configures for the selected ADC regular channel its corresponding + * rank in the sequencer and its sample time. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param sConfig ADC configuration structure. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConfTypeDef* sConfig) +{ + __IO uint32_t counter = 0; + + /* Check the parameters */ + assert_param(IS_ADC_CHANNEL(sConfig->Channel)); + assert_param(IS_ADC_REGULAR_RANK(sConfig->Rank)); + assert_param(IS_ADC_SAMPLE_TIME(sConfig->SamplingTime)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* if ADC_Channel_10 ... ADC_Channel_18 is selected */ + if ((sConfig->Channel > ADC_CHANNEL_9) && (sConfig->Channel != ADC_INTERNAL_NONE)) + { + /* Clear the old sample time */ + hadc->Instance->SMPR1 &= ~ADC_SMPR1(ADC_SMPR1_SMP10, sConfig->Channel); + + if (sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) + { + /* Set the new sample time */ + hadc->Instance->SMPR1 |= ADC_SMPR1(sConfig->SamplingTime, ADC_CHANNEL_18); + } + else + { + /* Set the new sample time */ + hadc->Instance->SMPR1 |= ADC_SMPR1(sConfig->SamplingTime, sConfig->Channel); + } + } + else /* ADC_Channel include in ADC_Channel_[0..9] */ + { + /* Clear the old sample time */ + hadc->Instance->SMPR2 &= ~ADC_SMPR2(ADC_SMPR2_SMP0, sConfig->Channel); + + /* Set the new sample time */ + hadc->Instance->SMPR2 |= ADC_SMPR2(sConfig->SamplingTime, sConfig->Channel); + } + + /* For Rank 1 to 6 */ + if (sConfig->Rank < 7) + { + /* Clear the old SQx bits for the selected rank */ + hadc->Instance->SQR3 &= ~ADC_SQR3_RK(ADC_SQR3_SQ1, sConfig->Rank); + + /* Set the SQx bits for the selected rank */ + hadc->Instance->SQR3 |= ADC_SQR3_RK(sConfig->Channel, sConfig->Rank); + } + /* For Rank 7 to 12 */ + else if (sConfig->Rank < 13) + { + /* Clear the old SQx bits for the selected rank */ + hadc->Instance->SQR2 &= ~ADC_SQR2_RK(ADC_SQR2_SQ7, sConfig->Rank); + + /* Set the SQx bits for the selected rank */ + hadc->Instance->SQR2 |= ADC_SQR2_RK(sConfig->Channel, sConfig->Rank); + } + /* For Rank 13 to 16 */ + else + { + /* Clear the old SQx bits for the selected rank */ + hadc->Instance->SQR1 &= ~ADC_SQR1_RK(ADC_SQR1_SQ13, sConfig->Rank); + + /* Set the SQx bits for the selected rank */ + hadc->Instance->SQR1 |= ADC_SQR1_RK(sConfig->Channel, sConfig->Rank); + } + + /* if no internal channel selected */ + if ((hadc->Instance == ADC1) && (sConfig->Channel == ADC_INTERNAL_NONE)) + { + /* Disable the VBAT & TSVREFE channel*/ + ADC->CCR &= ~(ADC_CCR_VBATE | ADC_CCR_TSVREFE); + } + + /* if ADC1 Channel_18 is selected enable VBAT Channel */ + if ((hadc->Instance == ADC1) && (sConfig->Channel == ADC_CHANNEL_VBAT)) + { + /* Enable the VBAT channel*/ + ADC->CCR |= ADC_CCR_VBATE; + } + + /* if ADC1 Channel_18 or Channel_17 is selected enable TSVREFE Channel(Temperature sensor and VREFINT) */ + if ((hadc->Instance == ADC1) && ((sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) || (sConfig->Channel == ADC_CHANNEL_VREFINT))) + { + /* Enable the TSVREFE channel*/ + ADC->CCR |= ADC_CCR_TSVREFE; + + if(sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) + { + /* Delay for temperature sensor stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_TEMPSENSOR_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configures the analog watchdog. + * @note Analog watchdog thresholds can be modified while ADC conversion + * is on going. + * In this case, some constraints must be taken into account: + * the programmed threshold values are effective from the next + * ADC EOC (end of unitary conversion). + * Considering that registers write delay may happen due to + * bus activity, this might cause an uncertainty on the + * effective timing of the new programmed threshold values. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param AnalogWDGConfig pointer to an ADC_AnalogWDGConfTypeDef structure + * that contains the configuration information of ADC analog watchdog. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDGConfTypeDef* AnalogWDGConfig) +{ +#ifdef USE_FULL_ASSERT + uint32_t tmp = 0; +#endif /* USE_FULL_ASSERT */ + + /* Check the parameters */ + assert_param(IS_ADC_ANALOG_WATCHDOG(AnalogWDGConfig->WatchdogMode)); + assert_param(IS_ADC_CHANNEL(AnalogWDGConfig->Channel)); + assert_param(IS_FUNCTIONAL_STATE(AnalogWDGConfig->ITMode)); + +#ifdef USE_FULL_ASSERT + tmp = ADC_GET_RESOLUTION(hadc); + assert_param(IS_ADC_RANGE(tmp, AnalogWDGConfig->HighThreshold)); + assert_param(IS_ADC_RANGE(tmp, AnalogWDGConfig->LowThreshold)); +#endif /* USE_FULL_ASSERT */ + + /* Process locked */ + __HAL_LOCK(hadc); + + if(AnalogWDGConfig->ITMode == ENABLE) + { + /* Enable the ADC Analog watchdog interrupt */ + __HAL_ADC_ENABLE_IT(hadc, ADC_IT_AWD); + } + else + { + /* Disable the ADC Analog watchdog interrupt */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_AWD); + } + + /* Clear AWDEN, JAWDEN and AWDSGL bits */ + hadc->Instance->CR1 &= ~(ADC_CR1_AWDSGL | ADC_CR1_JAWDEN | ADC_CR1_AWDEN); + + /* Set the analog watchdog enable mode */ + hadc->Instance->CR1 |= AnalogWDGConfig->WatchdogMode; + + /* Set the high threshold */ + hadc->Instance->HTR = AnalogWDGConfig->HighThreshold; + + /* Set the low threshold */ + hadc->Instance->LTR = AnalogWDGConfig->LowThreshold; + + /* Clear the Analog watchdog channel select bits */ + hadc->Instance->CR1 &= ~ADC_CR1_AWDCH; + + /* Set the Analog watchdog channel */ + hadc->Instance->CR1 |= (uint32_t)((uint16_t)(AnalogWDGConfig->Channel)); + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ADC_Exported_Functions_Group4 ADC Peripheral State functions + * @brief ADC Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State and errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the ADC state + (+) Check the ADC Error + +@endverbatim + * @{ + */ + +/** + * @brief return the ADC state + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL state + */ +uint32_t HAL_ADC_GetState(ADC_HandleTypeDef* hadc) +{ + /* Return ADC state */ + return hadc->State; +} + +/** + * @brief Return the ADC error code + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval ADC Error Code + */ +uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc) +{ + return hadc->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup ADC_Private_Functions ADC Private Functions + * @{ + */ + +/** + * @brief Initializes the ADCx peripheral according to the specified parameters + * in the ADC_InitStruct without initializing the ADC MSP. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +static void ADC_Init(ADC_HandleTypeDef* hadc) +{ + /* Set ADC parameters */ + /* Set the ADC clock prescaler */ + ADC->CCR &= ~(ADC_CCR_ADCPRE); + ADC->CCR |= hadc->Init.ClockPrescaler; + + /* Set ADC scan mode */ + hadc->Instance->CR1 &= ~(ADC_CR1_SCAN); + hadc->Instance->CR1 |= ADC_CR1_SCANCONV(hadc->Init.ScanConvMode); + + /* Set ADC resolution */ + hadc->Instance->CR1 &= ~(ADC_CR1_RES); + hadc->Instance->CR1 |= hadc->Init.Resolution; + + /* Set ADC data alignment */ + hadc->Instance->CR2 &= ~(ADC_CR2_ALIGN); + hadc->Instance->CR2 |= hadc->Init.DataAlign; + + /* Enable external trigger if trigger selection is different of software */ + /* start. */ + /* Note: This configuration keeps the hardware feature of parameter */ + /* ExternalTrigConvEdge "trigger edge none" equivalent to */ + /* software start. */ + if(hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) + { + /* Select external trigger to start conversion */ + hadc->Instance->CR2 &= ~(ADC_CR2_EXTSEL); + hadc->Instance->CR2 |= hadc->Init.ExternalTrigConv; + + /* Select external trigger polarity */ + hadc->Instance->CR2 &= ~(ADC_CR2_EXTEN); + hadc->Instance->CR2 |= hadc->Init.ExternalTrigConvEdge; + } + else + { + /* Reset the external trigger */ + hadc->Instance->CR2 &= ~(ADC_CR2_EXTSEL); + hadc->Instance->CR2 &= ~(ADC_CR2_EXTEN); + } + + /* Enable or disable ADC continuous conversion mode */ + hadc->Instance->CR2 &= ~(ADC_CR2_CONT); + hadc->Instance->CR2 |= ADC_CR2_CONTINUOUS((uint32_t)hadc->Init.ContinuousConvMode); + + if(hadc->Init.DiscontinuousConvMode != DISABLE) + { + assert_param(IS_ADC_REGULAR_DISC_NUMBER(hadc->Init.NbrOfDiscConversion)); + + /* Enable the selected ADC regular discontinuous mode */ + hadc->Instance->CR1 |= (uint32_t)ADC_CR1_DISCEN; + + /* Set the number of channels to be converted in discontinuous mode */ + hadc->Instance->CR1 &= ~(ADC_CR1_DISCNUM); + hadc->Instance->CR1 |= ADC_CR1_DISCONTINUOUS(hadc->Init.NbrOfDiscConversion); + } + else + { + /* Disable the selected ADC regular discontinuous mode */ + hadc->Instance->CR1 &= ~(ADC_CR1_DISCEN); + } + + /* Set ADC number of conversion */ + hadc->Instance->SQR1 &= ~(ADC_SQR1_L); + hadc->Instance->SQR1 |= ADC_SQR1(hadc->Init.NbrOfConversion); + + /* Enable or disable ADC DMA continuous request */ + hadc->Instance->CR2 &= ~(ADC_CR2_DDS); + hadc->Instance->CR2 |= ADC_CR2_DMAContReq((uint32_t)hadc->Init.DMAContinuousRequests); + + /* Enable or disable ADC end of conversion selection */ + hadc->Instance->CR2 &= ~(ADC_CR2_EOCS); + hadc->Instance->CR2 |= ADC_CR2_EOCSelection(hadc->Init.EOCSelection); +} + +/** + * @brief DMA transfer complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma) +{ + /* Retrieve ADC handle corresponding to current DMA handle */ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + + /* Update state machine on conversion status if not in error state */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL | HAL_ADC_STATE_ERROR_DMA)) + { + /* Update ADC state machine */ + SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); + + /* Determine whether any further conversion upcoming on group regular */ + /* by external trigger, continuous mode or scan sequence on going. */ + /* Note: On STM32F7, there is no independent flag of end of sequence. */ + /* The test of scan sequence on going is done either with scan */ + /* sequence disabled or with end of conversion flag set to */ + /* of end of sequence. */ + if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + { + /* Disable ADC end of single conversion interrupt on group regular */ + /* Note: Overrun interrupt was enabled with EOC interrupt in */ + /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ + /* by overrun IRQ process below. */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); + + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Conversion complete callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ConvCpltCallback(hadc); +#else + HAL_ADC_ConvCpltCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + } + else /* DMA and-or internal error occurred */ + { + if ((hadc->State & HAL_ADC_STATE_ERROR_INTERNAL) != 0UL) + { + /* Call HAL ADC Error Callback function */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ErrorCallback(hadc); +#else + HAL_ADC_ErrorCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ + } + else + { + /* Call DMA error callback */ + hadc->DMA_Handle->XferErrorCallback(hdma); + } + } +} + +/** + * @brief DMA half transfer complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma) +{ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + /* Half conversion callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ConvHalfCpltCallback(hadc); +#else + HAL_ADC_ConvHalfCpltCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA error callback + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_DMAError(DMA_HandleTypeDef *hdma) +{ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + hadc->State= HAL_ADC_STATE_ERROR_DMA; + /* Set ADC error code to DMA error */ + hadc->ErrorCode |= HAL_ADC_ERROR_DMA; + /* Error callback */ +#if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) + hadc->ErrorCallback(hadc); +#else + HAL_ADC_ErrorCallback(hadc); +#endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_ADC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c new file mode 100644 index 000000000..40f058762 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_adc_ex.c @@ -0,0 +1,1039 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_adc_ex.c + * @author MCD Application Team + * @brief This file provides firmware functions to manage the following + * functionalities of the ADC extension peripheral: + * + Extended features functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#)Initialize the ADC low level resources by implementing the HAL_ADC_MspInit(): + (##) Enable the ADC interface clock using __HAL_RCC_ADC_CLK_ENABLE() + (##) ADC pins configuration + (+++) Enable the clock for the ADC GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE() + (+++) Configure these ADC pins in analog mode using HAL_GPIO_Init() + (##) In case of using interrupts (e.g. HAL_ADC_Start_IT()) + (+++) Configure the ADC interrupt priority using HAL_NVIC_SetPriority() + (+++) Enable the ADC IRQ handler using HAL_NVIC_EnableIRQ() + (+++) In ADC IRQ handler, call HAL_ADC_IRQHandler() + (##) In case of using DMA to control data transfer (e.g. HAL_ADC_Start_DMA()) + (+++) Enable the DMAx interface clock using __HAL_RCC_DMAx_CLK_ENABLE() + (+++) Configure and enable two DMA streams stream for managing data + transfer from peripheral to memory (output stream) + (+++) Associate the initialized DMA handle to the ADC DMA handle + using __HAL_LINKDMA() + (+++) Configure the priority and enable the NVIC for the transfer complete + interrupt on the two DMA Streams. The output stream should have higher + priority than the input stream. + (#) Configure the ADC Prescaler, conversion resolution and data alignment + using the HAL_ADC_Init() function. + + (#) Configure the ADC Injected channels group features, use HAL_ADC_Init() + and HAL_ADC_ConfigChannel() functions. + + (#) Three operation modes are available within this driver : + + *** Polling mode IO operation *** + ================================= + [..] + (+) Start the ADC peripheral using HAL_ADCEx_InjectedStart() + (+) Wait for end of conversion using HAL_ADCEx_InjectedPollForConversion(), at this stage + user can specify the value of timeout according to his end application + (+) To read the ADC converted values, use the HAL_ADCEx_InjectedGetValue() function. + (+) Stop the ADC peripheral using HAL_ADCEx_InjectedStop() + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Start the ADC peripheral using HAL_ADCEx_InjectedStart_IT() + (+) Use HAL_ADC_IRQHandler() called under ADC_IRQHandler() Interrupt subroutine + (+) At ADC end of conversion HAL_ADCEx_InjectedConvCpltCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADCEx_InjectedConvCpltCallback + (+) In case of ADC Error, HAL_ADCEx_InjectedErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADCEx_InjectedErrorCallback + (+) Stop the ADC peripheral using HAL_ADCEx_InjectedStop_IT() + + *** Multi mode ADCs Regular channels configuration *** + ====================================================== + [..] + (+) Select the Multi mode ADC regular channels features (dual or triple mode) + and configure the DMA mode using HAL_ADCEx_MultiModeConfigChannel() functions. + (+) Start the ADC peripheral using HAL_ADCEx_MultiModeStart_DMA(), at this stage the user specify the length + of data to be transferred at each end of conversion + (+) Read the ADCs converted values using the HAL_ADCEx_MultiModeGetValue() function. + + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup ADCEx ADCEx + * @brief ADC Extended driver modules + * @{ + */ + +#ifdef HAL_ADC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup ADCEx_Private_Functions + * @{ + */ +/* Private function prototypes -----------------------------------------------*/ +static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma); +static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma); +static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup ADCEx_Exported_Functions ADC Exported Functions + * @{ + */ + +/** @defgroup ADCEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Start conversion of injected channel. + (+) Stop conversion of injected channel. + (+) Start multimode and enable DMA transfer. + (+) Stop multimode and disable DMA transfer. + (+) Get result of injected channel conversion. + (+) Get result of multimode conversion. + (+) Configure injected channels. + (+) Configure multimode. + +@endverbatim + * @{ + */ + +/** + * @brief Enables the selected ADC software start conversion of the injected channels. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) +{ + __IO uint32_t counter = 0; + uint32_t tmp1 = 0, tmp2 = 0; + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Enable the ADC peripheral */ + + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for ADC stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to injected group conversion results */ + /* - Set state bitfield related to injected operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_INJ_EOC, + HAL_ADC_STATE_INJ_BUSY); + + /* Check if a regular conversion is ongoing */ + /* Note: On this device, there is no ADC error code fields related to */ + /* conversions on group injected only. In case of conversion on */ + /* going on group regular, no error code is reset. */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Clear injected group conversion flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); + + /* Check if Multimode enabled */ + if(HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI)) + { + tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); + tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); + if(tmp1 && tmp2) + { + /* Enable the selected ADC software conversion for injected group */ + hadc->Instance->CR2 |= ADC_CR2_JSWSTART; + } + } + else + { + tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); + tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); + if((hadc->Instance == ADC1) && tmp1 && tmp2) + { + /* Enable the selected ADC software conversion for injected group */ + hadc->Instance->CR2 |= ADC_CR2_JSWSTART; + } + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Enables the interrupt and starts ADC conversion of injected channels. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) +{ + __IO uint32_t counter = 0; + uint32_t tmp1 = 0, tmp2 = 0; + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Enable the ADC peripheral */ + + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for ADC stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to injected group conversion results */ + /* - Set state bitfield related to injected operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_INJ_EOC, + HAL_ADC_STATE_INJ_BUSY); + + /* Check if a regular conversion is ongoing */ + /* Note: On this device, there is no ADC error code fields related to */ + /* conversions on group injected only. In case of conversion on */ + /* going on group regular, no error code is reset. */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Clear injected group conversion flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); + + /* Enable end of conversion interrupt for injected channels */ + __HAL_ADC_ENABLE_IT(hadc, ADC_IT_JEOC); + + /* Check if Multimode enabled */ + if(HAL_IS_BIT_CLR(ADC->CCR, ADC_CCR_MULTI)) + { + tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); + tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); + if(tmp1 && tmp2) + { + /* Enable the selected ADC software conversion for injected group */ + hadc->Instance->CR2 |= ADC_CR2_JSWSTART; + } + } + else + { + tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); + tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); + if((hadc->Instance == ADC1) && tmp1 && tmp2) + { + /* Enable the selected ADC software conversion for injected group */ + hadc->Instance->CR2 |= ADC_CR2_JSWSTART; + } + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stop conversion of injected channels. Disable ADC peripheral if + * no regular conversion is on going. + * @note If ADC must be disabled and if conversion is on going on + * regular group, function HAL_ADC_Stop must be used to stop both + * injected and regular groups, and disable the ADC. + * @note If injected group mode auto-injection is enabled, + * function HAL_ADC_Stop must be used. + * @note In case of auto-injection mode, HAL_ADC_Stop must be used. + * @param hadc ADC handle + * @retval None + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion and disable ADC peripheral */ + /* Conditioned to: */ + /* - No conversion on the other group (regular group) is intended to */ + /* continue (injected and regular groups stop conversion and ADC disable */ + /* are common) */ + /* - In case of auto-injection mode, HAL_ADC_Stop must be used. */ + if(((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && + HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) ) + { + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + } + else + { + /* Update ADC state machine to error */ + SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); + + tmp_hal_status = HAL_ERROR; + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Poll for injected conversion complete + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param Timeout Timeout value in millisecond. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check End of conversion flag */ + while(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC))) + { + /* Check for the Timeout */ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hadc->State= HAL_ADC_STATE_TIMEOUT; + /* Process unlocked */ + __HAL_UNLOCK(hadc); + return HAL_TIMEOUT; + } + } + } + + /* Clear injected group conversion flag */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JSTRT | ADC_FLAG_JEOC); + + /* Update ADC state machine */ + SET_BIT(hadc->State, HAL_ADC_STATE_INJ_EOC); + + /* Determine whether any further conversion upcoming on group injected */ + /* by external trigger, continuous mode or scan sequence on going. */ + /* Note: On STM32F7, there is no independent flag of end of sequence. */ + /* The test of scan sequence on going is done either with scan */ + /* sequence disabled or with end of conversion flag set to */ + /* of end of sequence. */ + if(ADC_IS_SOFTWARE_START_INJECTED(hadc) && + (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) && + (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && + (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) ) ) ) + { + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_INJ_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Return ADC state */ + return HAL_OK; +} + +/** + * @brief Stop conversion of injected channels, disable interruption of + * end-of-conversion. Disable ADC peripheral if no regular conversion + * is on going. + * @note If ADC must be disabled and if conversion is on going on + * regular group, function HAL_ADC_Stop must be used to stop both + * injected and regular groups, and disable the ADC. + * @note If injected group mode auto-injection is enabled, + * function HAL_ADC_Stop must be used. + * @param hadc ADC handle + * @retval None + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion and disable ADC peripheral */ + /* Conditioned to: */ + /* - No conversion on the other group (regular group) is intended to */ + /* continue (injected and regular groups stop conversion and ADC disable */ + /* are common) */ + /* - In case of auto-injection mode, HAL_ADC_Stop must be used. */ + if(((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && + HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) ) + { + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Disable ADC end of conversion interrupt for injected channels */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_JEOC); + + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + } + else + { + /* Update ADC state machine to error */ + SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); + + tmp_hal_status = HAL_ERROR; + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Gets the converted value from data register of injected channel. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param InjectedRank the ADC injected rank. + * This parameter can be one of the following values: + * @arg ADC_INJECTED_RANK_1: Injected Channel1 selected + * @arg ADC_INJECTED_RANK_2: Injected Channel2 selected + * @arg ADC_INJECTED_RANK_3: Injected Channel3 selected + * @arg ADC_INJECTED_RANK_4: Injected Channel4 selected + * @retval None + */ +uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef* hadc, uint32_t InjectedRank) +{ + __IO uint32_t tmp = 0; + + /* Check the parameters */ + assert_param(IS_ADC_INJECTED_RANK(InjectedRank)); + + /* Clear injected group conversion flag to have similar behaviour as */ + /* regular group: reading data register also clears end of conversion flag. */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); + + /* Return the selected ADC converted value */ + switch(InjectedRank) + { + case ADC_INJECTED_RANK_4: + { + tmp = hadc->Instance->JDR4; + } + break; + case ADC_INJECTED_RANK_3: + { + tmp = hadc->Instance->JDR3; + } + break; + case ADC_INJECTED_RANK_2: + { + tmp = hadc->Instance->JDR2; + } + break; + case ADC_INJECTED_RANK_1: + { + tmp = hadc->Instance->JDR1; + } + break; + default: + break; + } + return tmp; +} + +/** + * @brief Enables ADC DMA request after last transfer (Multi-ADC mode) and enables ADC peripheral + * + * @note Caution: This function must be used only with the ADC master. + * + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param pData Pointer to buffer in which transferred from ADC peripheral to memory will be stored. + * @param Length The length of data to be transferred from ADC peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length) +{ + __IO uint32_t counter = 0; + + /* Check the parameters */ + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DMAContinuousRequests)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Check if ADC peripheral is disabled in order to enable it and wait during + Tstab time the ADC's stabilization */ + if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { + /* Enable the Peripheral */ + __HAL_ADC_ENABLE(hadc); + + /* Delay for temperature sensor stabilization time */ + /* Compute number of CPU cycles to wait for */ + counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000)); + while(counter != 0) + { + counter--; + } + } + + /* Start conversion if ADC is effectively enabled */ + if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Set ADC state */ + /* - Clear state bitfield related to regular group conversion results */ + /* - Set state bitfield related to regular group operation */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, + HAL_ADC_STATE_REG_BUSY); + + /* If conversions on group regular are also triggering group injected, */ + /* update ADC state. */ + if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) + { + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + } + + /* State machine update: Check if an injected conversion is ongoing */ + if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + /* Reset ADC error code fields related to conversions on group regular */ + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + } + else + { + /* Reset ADC all error code fields */ + ADC_CLEAR_ERRORCODE(hadc); + } + + /* Process unlocked */ + /* Unlock before starting ADC conversions: in case of potential */ + /* interruption, to let the process to ADC IRQ Handler. */ + __HAL_UNLOCK(hadc); + + /* Set the DMA transfer complete callback */ + hadc->DMA_Handle->XferCpltCallback = ADC_MultiModeDMAConvCplt; + + /* Set the DMA half transfer complete callback */ + hadc->DMA_Handle->XferHalfCpltCallback = ADC_MultiModeDMAHalfConvCplt; + + /* Set the DMA error callback */ + hadc->DMA_Handle->XferErrorCallback = ADC_MultiModeDMAError ; + + /* Manage ADC and DMA start: ADC overrun interruption, DMA start, ADC */ + /* start (in case of SW start): */ + + /* Clear regular group conversion flag and overrun flag */ + /* (To ensure of no unknown state from potential previous ADC operations) */ + __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC); + + /* Enable ADC overrun interrupt */ + __HAL_ADC_ENABLE_IT(hadc, ADC_IT_OVR); + + if (hadc->Init.DMAContinuousRequests != DISABLE) + { + /* Enable the selected ADC DMA request after last transfer */ + ADC->CCR |= ADC_CCR_DDS; + } + else + { + /* Disable the selected ADC EOC rising on each regular channel conversion */ + ADC->CCR &= ~ADC_CCR_DDS; + } + + /* Enable the DMA Stream */ + HAL_DMA_Start_IT(hadc->DMA_Handle, (uint32_t)&ADC->CDR, (uint32_t)pData, Length); + + /* if no external trigger present enable software conversion of regular channels */ + if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + { + /* Enable the selected ADC software conversion for regular group */ + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables ADC DMA (multi-ADC mode) and disables ADC peripheral + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef* hadc) +{ + HAL_StatusTypeDef tmp_hal_status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Stop potential conversion on going, on regular and injected groups */ + /* Disable ADC peripheral */ + __HAL_ADC_DISABLE(hadc); + + /* Check if ADC is effectively disabled */ + if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + { + /* Disable the selected ADC DMA mode for multimode */ + ADC->CCR &= ~ADC_CCR_DDS; + + /* Disable the DMA channel (in case of DMA in circular mode or stop while */ + /* DMA transfer is on going) */ + tmp_hal_status = HAL_DMA_Abort(hadc->DMA_Handle); + + /* Disable ADC overrun interrupt */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_OVR); + + /* Set ADC state */ + ADC_STATE_CLR_SET(hadc->State, + HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, + HAL_ADC_STATE_READY); + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return tmp_hal_status; +} + +/** + * @brief Returns the last ADC1, ADC2 and ADC3 regular conversions results + * data in the selected multi mode. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval The converted data value. + */ +uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef* hadc) +{ + /* Return the multi mode conversion value */ + return ADC->CDR; +} + +/** + * @brief Injected conversion complete callback in non blocking mode + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @retval None + */ +__weak void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef* hadc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ADC_InjectedConvCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Configures for the selected ADC injected channel its corresponding + * rank in the sequencer and its sample time. + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param sConfigInjected ADC configuration structure for injected channel. + * @retval None + */ +HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_InjectionConfTypeDef* sConfigInjected) +{ + +#ifdef USE_FULL_ASSERT + uint32_t tmp = 0; +#endif /* USE_FULL_ASSERT */ + + /* Check the parameters */ + assert_param(IS_ADC_CHANNEL(sConfigInjected->InjectedChannel)); + assert_param(IS_ADC_INJECTED_RANK(sConfigInjected->InjectedRank)); + assert_param(IS_ADC_SAMPLE_TIME(sConfigInjected->InjectedSamplingTime)); + assert_param(IS_ADC_EXT_INJEC_TRIG(sConfigInjected->ExternalTrigInjecConv)); + assert_param(IS_ADC_INJECTED_LENGTH(sConfigInjected->InjectedNbrOfConversion)); + assert_param(IS_FUNCTIONAL_STATE(sConfigInjected->AutoInjectedConv)); + assert_param(IS_FUNCTIONAL_STATE(sConfigInjected->InjectedDiscontinuousConvMode)); + +#ifdef USE_FULL_ASSERT + tmp = ADC_GET_RESOLUTION(hadc); + assert_param(IS_ADC_RANGE(tmp, sConfigInjected->InjectedOffset)); +#endif /* USE_FULL_ASSERT */ + + if(sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) + { + assert_param(IS_ADC_EXT_INJEC_TRIG_EDGE(sConfigInjected->ExternalTrigInjecConvEdge)); + } + + /* Process locked */ + __HAL_LOCK(hadc); + + /* if ADC_Channel_10 ... ADC_Channel_18 is selected */ + if (sConfigInjected->InjectedChannel > ADC_CHANNEL_9) + { + /* Clear the old sample time */ + hadc->Instance->SMPR1 &= ~ADC_SMPR1(ADC_SMPR1_SMP10, sConfigInjected->InjectedChannel); + + /* Set the new sample time */ + hadc->Instance->SMPR1 |= ADC_SMPR1(sConfigInjected->InjectedSamplingTime, sConfigInjected->InjectedChannel); + } + else /* ADC_Channel include in ADC_Channel_[0..9] */ + { + /* Clear the old sample time */ + hadc->Instance->SMPR2 &= ~ADC_SMPR2(ADC_SMPR2_SMP0, sConfigInjected->InjectedChannel); + + /* Set the new sample time */ + hadc->Instance->SMPR2 |= ADC_SMPR2(sConfigInjected->InjectedSamplingTime, sConfigInjected->InjectedChannel); + } + + /*---------------------------- ADCx JSQR Configuration -----------------*/ + hadc->Instance->JSQR &= ~(ADC_JSQR_JL); + hadc->Instance->JSQR |= ADC_SQR1(sConfigInjected->InjectedNbrOfConversion); + + /* Rank configuration */ + + /* Clear the old SQx bits for the selected rank */ + hadc->Instance->JSQR &= ~ADC_JSQR(ADC_JSQR_JSQ1, sConfigInjected->InjectedRank,sConfigInjected->InjectedNbrOfConversion); + + /* Set the SQx bits for the selected rank */ + hadc->Instance->JSQR |= ADC_JSQR(sConfigInjected->InjectedChannel, sConfigInjected->InjectedRank,sConfigInjected->InjectedNbrOfConversion); + + /* Enable external trigger if trigger selection is different of software */ + /* start. */ + /* Note: This configuration keeps the hardware feature of parameter */ + /* ExternalTrigConvEdge "trigger edge none" equivalent to */ + /* software start. */ + if(sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) + { + /* Select external trigger to start conversion */ + hadc->Instance->CR2 &= ~(ADC_CR2_JEXTSEL); + hadc->Instance->CR2 |= sConfigInjected->ExternalTrigInjecConv; + + /* Select external trigger polarity */ + hadc->Instance->CR2 &= ~(ADC_CR2_JEXTEN); + hadc->Instance->CR2 |= sConfigInjected->ExternalTrigInjecConvEdge; + } + else + { + /* Reset the external trigger */ + hadc->Instance->CR2 &= ~(ADC_CR2_JEXTSEL); + hadc->Instance->CR2 &= ~(ADC_CR2_JEXTEN); + } + + if (sConfigInjected->AutoInjectedConv != DISABLE) + { + /* Enable the selected ADC automatic injected group conversion */ + hadc->Instance->CR1 |= ADC_CR1_JAUTO; + } + else + { + /* Disable the selected ADC automatic injected group conversion */ + hadc->Instance->CR1 &= ~(ADC_CR1_JAUTO); + } + + if (sConfigInjected->InjectedDiscontinuousConvMode != DISABLE) + { + /* Enable the selected ADC injected discontinuous mode */ + hadc->Instance->CR1 |= ADC_CR1_JDISCEN; + } + else + { + /* Disable the selected ADC injected discontinuous mode */ + hadc->Instance->CR1 &= ~(ADC_CR1_JDISCEN); + } + + switch(sConfigInjected->InjectedRank) + { + case 1: + /* Set injected channel 1 offset */ + hadc->Instance->JOFR1 &= ~(ADC_JOFR1_JOFFSET1); + hadc->Instance->JOFR1 |= sConfigInjected->InjectedOffset; + break; + case 2: + /* Set injected channel 2 offset */ + hadc->Instance->JOFR2 &= ~(ADC_JOFR2_JOFFSET2); + hadc->Instance->JOFR2 |= sConfigInjected->InjectedOffset; + break; + case 3: + /* Set injected channel 3 offset */ + hadc->Instance->JOFR3 &= ~(ADC_JOFR3_JOFFSET3); + hadc->Instance->JOFR3 |= sConfigInjected->InjectedOffset; + break; + default: + /* Set injected channel 4 offset */ + hadc->Instance->JOFR4 &= ~(ADC_JOFR4_JOFFSET4); + hadc->Instance->JOFR4 |= sConfigInjected->InjectedOffset; + break; + } + + /* if ADC1 Channel_18 is selected enable VBAT Channel */ + if ((hadc->Instance == ADC1) && (sConfigInjected->InjectedChannel == ADC_CHANNEL_VBAT)) + { + /* Enable the VBAT channel*/ + ADC->CCR |= ADC_CCR_VBATE; + } + + /* if ADC1 Channel_16 or Channel_17 is selected enable TSVREFE Channel(Temperature sensor and VREFINT) */ + if ((hadc->Instance == ADC1) && ((sConfigInjected->InjectedChannel == ADC_CHANNEL_TEMPSENSOR) || (sConfigInjected->InjectedChannel == ADC_CHANNEL_VREFINT))) + { + /* Enable the TSVREFE channel*/ + ADC->CCR |= ADC_CCR_TSVREFE; + } + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configures the ADC multi-mode + * @param hadc pointer to a ADC_HandleTypeDef structure that contains + * the configuration information for the specified ADC. + * @param multimode pointer to an ADC_MultiModeTypeDef structure that contains + * the configuration information for multimode. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_MultiModeTypeDef* multimode) +{ + /* Check the parameters */ + assert_param(IS_ADC_MODE(multimode->Mode)); + assert_param(IS_ADC_DMA_ACCESS_MODE(multimode->DMAAccessMode)); + assert_param(IS_ADC_SAMPLING_DELAY(multimode->TwoSamplingDelay)); + + /* Process locked */ + __HAL_LOCK(hadc); + + /* Set ADC mode */ + ADC->CCR &= ~(ADC_CCR_MULTI); + ADC->CCR |= multimode->Mode; + + /* Set the ADC DMA access mode */ + ADC->CCR &= ~(ADC_CCR_DMA); + ADC->CCR |= multimode->DMAAccessMode; + + /* Set delay between two sampling phases */ + ADC->CCR &= ~(ADC_CCR_DELAY); + ADC->CCR |= multimode->TwoSamplingDelay; + + /* Process unlocked */ + __HAL_UNLOCK(hadc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + + /** + * @brief DMA transfer complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma) +{ + /* Retrieve ADC handle corresponding to current DMA handle */ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + + /* Update state machine on conversion status if not in error state */ + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL | HAL_ADC_STATE_ERROR_DMA)) + { + /* Update ADC state machine */ + SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); + + /* Determine whether any further conversion upcoming on group regular */ + /* by external trigger, continuous mode or scan sequence on going. */ + /* Note: On STM32F7, there is no independent flag of end of sequence. */ + /* The test of scan sequence on going is done either with scan */ + /* sequence disabled or with end of conversion flag set to */ + /* of end of sequence. */ + if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + { + /* Disable ADC end of single conversion interrupt on group regular */ + /* Note: Overrun interrupt was enabled with EOC interrupt in */ + /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ + /* by overrun IRQ process below. */ + __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); + + /* Set ADC state */ + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) + { + SET_BIT(hadc->State, HAL_ADC_STATE_READY); + } + } + + /* Conversion complete callback */ + HAL_ADC_ConvCpltCallback(hadc); + } + else + { + /* Call DMA error callback */ + hadc->DMA_Handle->XferErrorCallback(hdma); + } +} + +/** + * @brief DMA half transfer complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma) +{ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + /* Conversion complete callback */ + HAL_ADC_ConvHalfCpltCallback(hadc); +} + +/** + * @brief DMA error callback + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma) +{ + ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + hadc->State= HAL_ADC_STATE_ERROR_DMA; + /* Set ADC error code to DMA error */ + hadc->ErrorCode |= HAL_ADC_ERROR_DMA; + HAL_ADC_ErrorCallback(hadc); +} + +/** + * @} + */ + +#endif /* HAL_ADC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c new file mode 100644 index 000000000..a338d0710 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c @@ -0,0 +1,505 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_cortex.c + * @author MCD Application Team + * @brief CORTEX HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the CORTEX: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + + [..] + *** How to configure Interrupts using CORTEX HAL driver *** + =========================================================== + [..] + This section provides functions allowing to configure the NVIC interrupts (IRQ). + The Cortex-M4 exceptions are managed by CMSIS functions. + + (#) Configure the NVIC Priority Grouping using HAL_NVIC_SetPriorityGrouping() + function according to the following table. + (#) Configure the priority of the selected IRQ Channels using HAL_NVIC_SetPriority(). + (#) Enable the selected IRQ Channels using HAL_NVIC_EnableIRQ(). + (#) please refer to programming manual for details in how to configure priority. + + -@- When the NVIC_PRIORITYGROUP_0 is selected, IRQ preemption is no more possible. + The pending IRQ priority will be managed only by the sub priority. + + -@- IRQ priority order (sorted by highest to lowest priority): + (+@) Lowest preemption priority + (+@) Lowest sub priority + (+@) Lowest hardware priority (IRQ number) + + [..] + *** How to configure Systick using CORTEX HAL driver *** + ======================================================== + [..] + Setup SysTick Timer for time base. + + (+) The HAL_SYSTICK_Config() function calls the SysTick_Config() function which + is a CMSIS function that: + (++) Configures the SysTick Reload register with value passed as function parameter. + (++) Configures the SysTick IRQ priority to the lowest value (0x0F). + (++) Resets the SysTick Counter register. + (++) Configures the SysTick Counter clock source to be Core Clock Source (HCLK). + (++) Enables the SysTick Interrupt. + (++) Starts the SysTick Counter. + + (+) You can change the SysTick Clock source to be HCLK_Div8 by calling the macro + __HAL_CORTEX_SYSTICKCLK_CONFIG(SYSTICK_CLKSOURCE_HCLK_DIV8) just after the + HAL_SYSTICK_Config() function call. The __HAL_CORTEX_SYSTICKCLK_CONFIG() macro is defined + inside the stm32f7xx_hal_cortex.h file. + + (+) You can change the SysTick IRQ priority by calling the + HAL_NVIC_SetPriority(SysTick_IRQn,...) function just after the HAL_SYSTICK_Config() function + call. The HAL_NVIC_SetPriority() call the NVIC_SetPriority() function which is a CMSIS function. + + (+) To adjust the SysTick time base, use the following formula: + + Reload Value = SysTick Counter Clock (Hz) x Desired Time base (s) + (++) Reload Value is the parameter to be passed for HAL_SYSTICK_Config() function + (++) Reload Value should not exceed 0xFFFFFF + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup CORTEX CORTEX + * @brief CORTEX HAL module driver + * @{ + */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Functions CORTEX Exported Functions + * @{ + */ + + +/** @defgroup CORTEX_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] + This section provides the CORTEX HAL driver functions allowing to configure Interrupts + Systick functionalities + +@endverbatim + * @{ + */ + + +/** + * @brief Sets the priority grouping field (preemption priority and subpriority) + * using the required unlock sequence. + * @param PriorityGroup The priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @note When the NVIC_PriorityGroup_0 is selected, IRQ preemption is no more possible. + * The pending IRQ priority will be managed only by the subpriority. + * @retval None + */ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + + /* Set the PRIGROUP[10:8] bits according to the PriorityGroup parameter value */ + NVIC_SetPriorityGrouping(PriorityGroup); +} + +/** + * @brief Sets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @param PreemptPriority The preemption priority for the IRQn channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority + * @param SubPriority the subpriority level for the IRQ channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority. + * @retval None + */ +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t prioritygroup = 0x00; + + /* Check the parameters */ + assert_param(IS_NVIC_SUB_PRIORITY(SubPriority)); + assert_param(IS_NVIC_PREEMPTION_PRIORITY(PreemptPriority)); + + prioritygroup = NVIC_GetPriorityGrouping(); + + NVIC_SetPriority(IRQn, NVIC_EncodePriority(prioritygroup, PreemptPriority, SubPriority)); +} + +/** + * @brief Enables a device specific interrupt in the NVIC interrupt controller. + * @note To configure interrupts priority correctly, the NVIC_PriorityGroupConfig() + * function should be called before. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Enable interrupt */ + NVIC_EnableIRQ(IRQn); +} + +/** + * @brief Disables a device specific interrupt in the NVIC interrupt controller. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Disable interrupt */ + NVIC_DisableIRQ(IRQn); +} + +/** + * @brief Initiates a system reset request to reset the MCU. + * @retval None + */ +void HAL_NVIC_SystemReset(void) +{ + /* System Reset */ + NVIC_SystemReset(); +} + +/** + * @brief Initializes the System Timer and its interrupt, and starts the System Tick Timer. + * Counter is in free running mode to generate periodic interrupts. + * @param TicksNumb Specifies the ticks Number of ticks between two interrupts. + * @retval status: - 0 Function succeeded. + * - 1 Function failed. + */ +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb) +{ + return SysTick_Config(TicksNumb); +} +/** + * @} + */ + +/** @defgroup CORTEX_Exported_Functions_Group2 Peripheral Control functions + * @brief Cortex control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the CORTEX + (NVIC, SYSTICK, MPU) functionalities. + + +@endverbatim + * @{ + */ + +#if (__MPU_PRESENT == 1) +/** + * @brief Disables the MPU + * @retval None + */ +void HAL_MPU_Disable(void) +{ + /* Make sure outstanding transfers are done */ + __DMB(); + + /* Disable fault exceptions */ + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; + + /* Disable the MPU and clear the control register*/ + MPU->CTRL = 0; +} + +/** + * @brief Enables the MPU + * @param MPU_Control Specifies the control mode of the MPU during hard fault, + * NMI, FAULTMASK and privileged access to the default memory + * This parameter can be one of the following values: + * @arg MPU_HFNMI_PRIVDEF_NONE + * @arg MPU_HARDFAULT_NMI + * @arg MPU_PRIVILEGED_DEFAULT + * @arg MPU_HFNMI_PRIVDEF + * @retval None + */ +void HAL_MPU_Enable(uint32_t MPU_Control) +{ + /* Enable the MPU */ + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; + + /* Enable fault exceptions */ + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; + + /* Ensure MPU setting take effects */ + __DSB(); + __ISB(); +} + +/** + * @brief Initializes and configures the Region and the memory to be protected. + * @param MPU_Init Pointer to a MPU_Region_InitTypeDef structure that contains + * the initialization and configuration information. + * @retval None + */ +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(MPU_Init->Number)); + assert_param(IS_MPU_REGION_ENABLE(MPU_Init->Enable)); + + /* Set the Region number */ + MPU->RNR = MPU_Init->Number; + + if ((MPU_Init->Enable) != RESET) + { + /* Check the parameters */ + assert_param(IS_MPU_INSTRUCTION_ACCESS(MPU_Init->DisableExec)); + assert_param(IS_MPU_REGION_PERMISSION_ATTRIBUTE(MPU_Init->AccessPermission)); + assert_param(IS_MPU_TEX_LEVEL(MPU_Init->TypeExtField)); + assert_param(IS_MPU_ACCESS_SHAREABLE(MPU_Init->IsShareable)); + assert_param(IS_MPU_ACCESS_CACHEABLE(MPU_Init->IsCacheable)); + assert_param(IS_MPU_ACCESS_BUFFERABLE(MPU_Init->IsBufferable)); + assert_param(IS_MPU_SUB_REGION_DISABLE(MPU_Init->SubRegionDisable)); + assert_param(IS_MPU_REGION_SIZE(MPU_Init->Size)); + + MPU->RBAR = MPU_Init->BaseAddress; + MPU->RASR = ((uint32_t)MPU_Init->DisableExec << MPU_RASR_XN_Pos) | + ((uint32_t)MPU_Init->AccessPermission << MPU_RASR_AP_Pos) | + ((uint32_t)MPU_Init->TypeExtField << MPU_RASR_TEX_Pos) | + ((uint32_t)MPU_Init->IsShareable << MPU_RASR_S_Pos) | + ((uint32_t)MPU_Init->IsCacheable << MPU_RASR_C_Pos) | + ((uint32_t)MPU_Init->IsBufferable << MPU_RASR_B_Pos) | + ((uint32_t)MPU_Init->SubRegionDisable << MPU_RASR_SRD_Pos) | + ((uint32_t)MPU_Init->Size << MPU_RASR_SIZE_Pos) | + ((uint32_t)MPU_Init->Enable << MPU_RASR_ENABLE_Pos); + } + else + { + MPU->RBAR = 0x00; + MPU->RASR = 0x00; + } +} +#endif /* __MPU_PRESENT */ + +/** + * @brief Gets the priority grouping field from the NVIC Interrupt Controller. + * @retval Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field) + */ +uint32_t HAL_NVIC_GetPriorityGrouping(void) +{ + /* Get the PRIGROUP[10:8] field value */ + return NVIC_GetPriorityGrouping(); +} + +/** + * @brief Gets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @param PriorityGroup the priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @param pPreemptPriority Pointer on the Preemptive priority value (starting from 0). + * @param pSubPriority Pointer on the Subpriority value (starting from 0). + * @retval None + */ +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t *pPreemptPriority, uint32_t *pSubPriority) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + /* Get priority for Cortex-M system or device specific interrupts */ + NVIC_DecodePriority(NVIC_GetPriority(IRQn), PriorityGroup, pPreemptPriority, pSubPriority); +} + +/** + * @brief Sets Pending bit of an external interrupt. + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Set interrupt pending */ + NVIC_SetPendingIRQ(IRQn); +} + +/** + * @brief Gets Pending Interrupt (reads the pending register in the NVIC + * and returns the pending bit for the specified interrupt). + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if pending else 0 */ + return NVIC_GetPendingIRQ(IRQn); +} + +/** + * @brief Clears the pending bit of an external interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Clear pending interrupt */ + NVIC_ClearPendingIRQ(IRQn); +} + +/** + * @brief Gets active interrupt ( reads the active register in NVIC and returns the active bit). + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if active else 0 */ + return NVIC_GetActive(IRQn); +} + +/** + * @brief Configures the SysTick clock source. + * @param CLKSource specifies the SysTick clock source. + * This parameter can be one of the following values: + * @arg SYSTICK_CLKSOURCE_HCLK_DIV8: AHB clock divided by 8 selected as SysTick clock source. + * @arg SYSTICK_CLKSOURCE_HCLK: AHB clock selected as SysTick clock source. + * @retval None + */ +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource) +{ + /* Check the parameters */ + assert_param(IS_SYSTICK_CLK_SOURCE(CLKSource)); + if (CLKSource == SYSTICK_CLKSOURCE_HCLK) + { + SysTick->CTRL |= SYSTICK_CLKSOURCE_HCLK; + } + else + { + SysTick->CTRL &= ~SYSTICK_CLKSOURCE_HCLK; + } +} + +/** + * @brief This function handles SYSTICK interrupt request. + * @retval None + */ +void HAL_SYSTICK_IRQHandler(void) +{ + HAL_SYSTICK_Callback(); +} + +/** + * @brief SYSTICK callback. + * @retval None + */ +__weak void HAL_SYSTICK_Callback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_SYSTICK_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_CORTEX_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c new file mode 100644 index 000000000..1ba51ada5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc.c @@ -0,0 +1,518 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_crc.c + * @author MCD Application Team + * @brief CRC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Cyclic Redundancy Check (CRC) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + =============================================================================== + ##### How to use this driver ##### + =============================================================================== + [..] + (+) Enable CRC AHB clock using __HAL_RCC_CRC_CLK_ENABLE(); + (+) Initialize CRC calculator + (++) specify generating polynomial (peripheral default or non-default one) + (++) specify initialization value (peripheral default or non-default one) + (++) specify input data format + (++) specify input or output data inversion mode if any + (+) Use HAL_CRC_Accumulate() function to compute the CRC value of the + input data buffer starting with the previously computed CRC as + initialization value + (+) Use HAL_CRC_Calculate() function to compute the CRC value of the + input data buffer starting with the defined initialization value + (default or non-default) to initiate CRC calculation + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup CRC CRC + * @brief CRC HAL module driver. + * @{ + */ + +#ifdef HAL_CRC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup CRC_Private_Functions CRC Private Functions + * @{ + */ +static uint32_t CRC_Handle_8(CRC_HandleTypeDef *hcrc, uint8_t pBuffer[], uint32_t BufferLength); +static uint32_t CRC_Handle_16(CRC_HandleTypeDef *hcrc, uint16_t pBuffer[], uint32_t BufferLength); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup CRC_Exported_Functions CRC Exported Functions + * @{ + */ + +/** @defgroup CRC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions. + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize the CRC according to the specified parameters + in the CRC_InitTypeDef and create the associated handle + (+) DeInitialize the CRC peripheral + (+) Initialize the CRC MSP (MCU Specific Package) + (+) DeInitialize the CRC MSP + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the CRC according to the specified + * parameters in the CRC_InitTypeDef and create the associated handle. + * @param hcrc CRC handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_CRC_Init(CRC_HandleTypeDef *hcrc) +{ + /* Check the CRC handle allocation */ + if (hcrc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_CRC_ALL_INSTANCE(hcrc->Instance)); + + if (hcrc->State == HAL_CRC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hcrc->Lock = HAL_UNLOCKED; + /* Init the low level hardware */ + HAL_CRC_MspInit(hcrc); + } + + hcrc->State = HAL_CRC_STATE_BUSY; + + /* check whether or not non-default generating polynomial has been + * picked up by user */ + assert_param(IS_DEFAULT_POLYNOMIAL(hcrc->Init.DefaultPolynomialUse)); + if (hcrc->Init.DefaultPolynomialUse == DEFAULT_POLYNOMIAL_ENABLE) + { + /* initialize peripheral with default generating polynomial */ + WRITE_REG(hcrc->Instance->POL, DEFAULT_CRC32_POLY); + MODIFY_REG(hcrc->Instance->CR, CRC_CR_POLYSIZE, CRC_POLYLENGTH_32B); + } + else + { + /* initialize CRC peripheral with generating polynomial defined by user */ + if (HAL_CRCEx_Polynomial_Set(hcrc, hcrc->Init.GeneratingPolynomial, hcrc->Init.CRCLength) != HAL_OK) + { + return HAL_ERROR; + } + } + + /* check whether or not non-default CRC initial value has been + * picked up by user */ + assert_param(IS_DEFAULT_INIT_VALUE(hcrc->Init.DefaultInitValueUse)); + if (hcrc->Init.DefaultInitValueUse == DEFAULT_INIT_VALUE_ENABLE) + { + WRITE_REG(hcrc->Instance->INIT, DEFAULT_CRC_INITVALUE); + } + else + { + WRITE_REG(hcrc->Instance->INIT, hcrc->Init.InitValue); + } + + + /* set input data inversion mode */ + assert_param(IS_CRC_INPUTDATA_INVERSION_MODE(hcrc->Init.InputDataInversionMode)); + MODIFY_REG(hcrc->Instance->CR, CRC_CR_REV_IN, hcrc->Init.InputDataInversionMode); + + /* set output data inversion mode */ + assert_param(IS_CRC_OUTPUTDATA_INVERSION_MODE(hcrc->Init.OutputDataInversionMode)); + MODIFY_REG(hcrc->Instance->CR, CRC_CR_REV_OUT, hcrc->Init.OutputDataInversionMode); + + /* makes sure the input data format (bytes, halfwords or words stream) + * is properly specified by user */ + assert_param(IS_CRC_INPUTDATA_FORMAT(hcrc->InputDataFormat)); + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief DeInitialize the CRC peripheral. + * @param hcrc CRC handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_CRC_DeInit(CRC_HandleTypeDef *hcrc) +{ + /* Check the CRC handle allocation */ + if (hcrc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_CRC_ALL_INSTANCE(hcrc->Instance)); + + /* Check the CRC peripheral state */ + if (hcrc->State == HAL_CRC_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_BUSY; + + /* Reset CRC calculation unit */ + __HAL_CRC_DR_RESET(hcrc); + + /* Reset IDR register content */ + CLEAR_BIT(hcrc->Instance->IDR, CRC_IDR_IDR); + + /* DeInit the low level hardware */ + HAL_CRC_MspDeInit(hcrc); + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_RESET; + + /* Process unlocked */ + __HAL_UNLOCK(hcrc); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the CRC MSP. + * @param hcrc CRC handle + * @retval None + */ +__weak void HAL_CRC_MspInit(CRC_HandleTypeDef *hcrc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hcrc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_CRC_MspInit can be implemented in the user file + */ +} + +/** + * @brief DeInitialize the CRC MSP. + * @param hcrc CRC handle + * @retval None + */ +__weak void HAL_CRC_MspDeInit(CRC_HandleTypeDef *hcrc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hcrc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_CRC_MspDeInit can be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup CRC_Exported_Functions_Group2 Peripheral Control functions + * @brief management functions. + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) compute the 7, 8, 16 or 32-bit CRC value of an 8, 16 or 32-bit data buffer + using combination of the previous CRC value and the new one. + + [..] or + + (+) compute the 7, 8, 16 or 32-bit CRC value of an 8, 16 or 32-bit data buffer + independently of the previous CRC value. + +@endverbatim + * @{ + */ + +/** + * @brief Compute the 7, 8, 16 or 32-bit CRC value of an 8, 16 or 32-bit data buffer + * starting with the previously computed CRC as initialization value. + * @param hcrc CRC handle + * @param pBuffer pointer to the input data buffer, exact input data format is + * provided by hcrc->InputDataFormat. + * @param BufferLength input data buffer length (number of bytes if pBuffer + * type is * uint8_t, number of half-words if pBuffer type is * uint16_t, + * number of words if pBuffer type is * uint32_t). + * @note By default, the API expects a uint32_t pointer as input buffer parameter. + * Input buffer pointers with other types simply need to be cast in uint32_t + * and the API will internally adjust its input data processing based on the + * handle field hcrc->InputDataFormat. + * @retval uint32_t CRC (returned value LSBs for CRC shorter than 32 bits) + */ +uint32_t HAL_CRC_Accumulate(CRC_HandleTypeDef *hcrc, uint32_t pBuffer[], uint32_t BufferLength) +{ + uint32_t index; /* CRC input data buffer index */ + uint32_t temp = 0U; /* CRC output (read from hcrc->Instance->DR register) */ + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_BUSY; + + switch (hcrc->InputDataFormat) + { + case CRC_INPUTDATA_FORMAT_WORDS: + /* Enter Data to the CRC calculator */ + for (index = 0U; index < BufferLength; index++) + { + hcrc->Instance->DR = pBuffer[index]; + } + temp = hcrc->Instance->DR; + break; + + case CRC_INPUTDATA_FORMAT_BYTES: + temp = CRC_Handle_8(hcrc, (uint8_t *)pBuffer, BufferLength); + break; + + case CRC_INPUTDATA_FORMAT_HALFWORDS: + temp = CRC_Handle_16(hcrc, (uint16_t *)(void *)pBuffer, BufferLength); /* Derogation MisraC2012 R.11.5 */ + break; + default: + break; + } + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_READY; + + /* Return the CRC computed value */ + return temp; +} + +/** + * @brief Compute the 7, 8, 16 or 32-bit CRC value of an 8, 16 or 32-bit data buffer + * starting with hcrc->Instance->INIT as initialization value. + * @param hcrc CRC handle + * @param pBuffer pointer to the input data buffer, exact input data format is + * provided by hcrc->InputDataFormat. + * @param BufferLength input data buffer length (number of bytes if pBuffer + * type is * uint8_t, number of half-words if pBuffer type is * uint16_t, + * number of words if pBuffer type is * uint32_t). + * @note By default, the API expects a uint32_t pointer as input buffer parameter. + * Input buffer pointers with other types simply need to be cast in uint32_t + * and the API will internally adjust its input data processing based on the + * handle field hcrc->InputDataFormat. + * @retval uint32_t CRC (returned value LSBs for CRC shorter than 32 bits) + */ +uint32_t HAL_CRC_Calculate(CRC_HandleTypeDef *hcrc, uint32_t pBuffer[], uint32_t BufferLength) +{ + uint32_t index; /* CRC input data buffer index */ + uint32_t temp = 0U; /* CRC output (read from hcrc->Instance->DR register) */ + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_BUSY; + + /* Reset CRC Calculation Unit (hcrc->Instance->INIT is + * written in hcrc->Instance->DR) */ + __HAL_CRC_DR_RESET(hcrc); + + switch (hcrc->InputDataFormat) + { + case CRC_INPUTDATA_FORMAT_WORDS: + /* Enter 32-bit input data to the CRC calculator */ + for (index = 0U; index < BufferLength; index++) + { + hcrc->Instance->DR = pBuffer[index]; + } + temp = hcrc->Instance->DR; + break; + + case CRC_INPUTDATA_FORMAT_BYTES: + /* Specific 8-bit input data handling */ + temp = CRC_Handle_8(hcrc, (uint8_t *)pBuffer, BufferLength); + break; + + case CRC_INPUTDATA_FORMAT_HALFWORDS: + /* Specific 16-bit input data handling */ + temp = CRC_Handle_16(hcrc, (uint16_t *)(void *)pBuffer, BufferLength); /* Derogation MisraC2012 R.11.5 */ + break; + + default: + break; + } + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_READY; + + /* Return the CRC computed value */ + return temp; +} + +/** + * @} + */ + +/** @defgroup CRC_Exported_Functions_Group3 Peripheral State functions + * @brief Peripheral State functions. + * +@verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Return the CRC handle state. + * @param hcrc CRC handle + * @retval HAL state + */ +HAL_CRC_StateTypeDef HAL_CRC_GetState(CRC_HandleTypeDef *hcrc) +{ + /* Return CRC handle state */ + return hcrc->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup CRC_Private_Functions + * @{ + */ + +/** + * @brief Enter 8-bit input data to the CRC calculator. + * Specific data handling to optimize processing time. + * @param hcrc CRC handle + * @param pBuffer pointer to the input data buffer + * @param BufferLength input data buffer length + * @retval uint32_t CRC (returned value LSBs for CRC shorter than 32 bits) + */ +static uint32_t CRC_Handle_8(CRC_HandleTypeDef *hcrc, uint8_t pBuffer[], uint32_t BufferLength) +{ + uint32_t i; /* input data buffer index */ + uint16_t data; + __IO uint16_t *pReg; + + /* Processing time optimization: 4 bytes are entered in a row with a single word write, + * last bytes must be carefully fed to the CRC calculator to ensure a correct type + * handling by the peripheral */ + for (i = 0U; i < (BufferLength / 4U); i++) + { + hcrc->Instance->DR = ((uint32_t)pBuffer[4U * i] << 24U) | \ + ((uint32_t)pBuffer[(4U * i) + 1U] << 16U) | \ + ((uint32_t)pBuffer[(4U * i) + 2U] << 8U) | \ + (uint32_t)pBuffer[(4U * i) + 3U]; + } + /* last bytes specific handling */ + if ((BufferLength % 4U) != 0U) + { + if ((BufferLength % 4U) == 1U) + { + *(__IO uint8_t *)(__IO void *)(&hcrc->Instance->DR) = pBuffer[4U * i]; /* Derogation MisraC2012 R.11.5 */ + } + if ((BufferLength % 4U) == 2U) + { + data = ((uint16_t)(pBuffer[4U * i]) << 8U) | (uint16_t)pBuffer[(4U * i) + 1U]; + pReg = (__IO uint16_t *)(__IO void *)(&hcrc->Instance->DR); /* Derogation MisraC2012 R.11.5 */ + *pReg = data; + } + if ((BufferLength % 4U) == 3U) + { + data = ((uint16_t)(pBuffer[4U * i]) << 8U) | (uint16_t)pBuffer[(4U * i) + 1U]; + pReg = (__IO uint16_t *)(__IO void *)(&hcrc->Instance->DR); /* Derogation MisraC2012 R.11.5 */ + *pReg = data; + + *(__IO uint8_t *)(__IO void *)(&hcrc->Instance->DR) = pBuffer[(4U * i) + 2U]; /* Derogation MisraC2012 R.11.5 */ + } + } + + /* Return the CRC computed value */ + return hcrc->Instance->DR; +} + +/** + * @brief Enter 16-bit input data to the CRC calculator. + * Specific data handling to optimize processing time. + * @param hcrc CRC handle + * @param pBuffer pointer to the input data buffer + * @param BufferLength input data buffer length + * @retval uint32_t CRC (returned value LSBs for CRC shorter than 32 bits) + */ +static uint32_t CRC_Handle_16(CRC_HandleTypeDef *hcrc, uint16_t pBuffer[], uint32_t BufferLength) +{ + uint32_t i; /* input data buffer index */ + __IO uint16_t *pReg; + + /* Processing time optimization: 2 HalfWords are entered in a row with a single word write, + * in case of odd length, last HalfWord must be carefully fed to the CRC calculator to ensure + * a correct type handling by the peripheral */ + for (i = 0U; i < (BufferLength / 2U); i++) + { + hcrc->Instance->DR = ((uint32_t)pBuffer[2U * i] << 16U) | (uint32_t)pBuffer[(2U * i) + 1U]; + } + if ((BufferLength % 2U) != 0U) + { + pReg = (__IO uint16_t *)(__IO void *)(&hcrc->Instance->DR); /* Derogation MisraC2012 R.11.5 */ + *pReg = pBuffer[2U * i]; + } + + /* Return the CRC computed value */ + return hcrc->Instance->DR; +} + +/** + * @} + */ + +#endif /* HAL_CRC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c new file mode 100644 index 000000000..004cb3ae6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_crc_ex.c @@ -0,0 +1,225 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_crc_ex.c + * @author MCD Application Team + * @brief Extended CRC HAL module driver. + * This file provides firmware functions to manage the extended + * functionalities of the CRC peripheral. + * + @verbatim +================================================================================ + ##### How to use this driver ##### +================================================================================ + [..] + (+) Set user-defined generating polynomial thru HAL_CRCEx_Polynomial_Set() + (+) Configure Input or Output data inversion + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup CRCEx CRCEx + * @brief CRC Extended HAL module driver + * @{ + */ + +#ifdef HAL_CRC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup CRCEx_Exported_Functions CRC Extended Exported Functions + * @{ + */ + +/** @defgroup CRCEx_Exported_Functions_Group1 Extended Initialization/de-initialization functions + * @brief Extended Initialization and Configuration functions. + * +@verbatim + =============================================================================== + ##### Extended configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the generating polynomial + (+) Configure the input data inversion + (+) Configure the output data inversion + +@endverbatim + * @{ + */ + + +/** + * @brief Initialize the CRC polynomial if different from default one. + * @param hcrc CRC handle + * @param Pol CRC generating polynomial (7, 8, 16 or 32-bit long). + * This parameter is written in normal representation, e.g. + * @arg for a polynomial of degree 7, X^7 + X^6 + X^5 + X^2 + 1 is written 0x65 + * @arg for a polynomial of degree 16, X^16 + X^12 + X^5 + 1 is written 0x1021 + * @param PolyLength CRC polynomial length. + * This parameter can be one of the following values: + * @arg @ref CRC_POLYLENGTH_7B 7-bit long CRC (generating polynomial of degree 7) + * @arg @ref CRC_POLYLENGTH_8B 8-bit long CRC (generating polynomial of degree 8) + * @arg @ref CRC_POLYLENGTH_16B 16-bit long CRC (generating polynomial of degree 16) + * @arg @ref CRC_POLYLENGTH_32B 32-bit long CRC (generating polynomial of degree 32) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_CRCEx_Polynomial_Set(CRC_HandleTypeDef *hcrc, uint32_t Pol, uint32_t PolyLength) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t msb = 31U; /* polynomial degree is 32 at most, so msb is initialized to max value */ + + /* Check the parameters */ + assert_param(IS_CRC_POL_LENGTH(PolyLength)); + + /* check polynomial definition vs polynomial size: + * polynomial length must be aligned with polynomial + * definition. HAL_ERROR is reported if Pol degree is + * larger than that indicated by PolyLength. + * Look for MSB position: msb will contain the degree of + * the second to the largest polynomial member. E.g., for + * X^7 + X^6 + X^5 + X^2 + 1, msb = 6. */ + while ((msb-- > 0U) && ((Pol & ((uint32_t)(0x1U) << (msb & 0x1FU))) == 0U)) + { + } + + switch (PolyLength) + { + case CRC_POLYLENGTH_7B: + if (msb >= HAL_CRC_LENGTH_7B) + { + status = HAL_ERROR; + } + break; + case CRC_POLYLENGTH_8B: + if (msb >= HAL_CRC_LENGTH_8B) + { + status = HAL_ERROR; + } + break; + case CRC_POLYLENGTH_16B: + if (msb >= HAL_CRC_LENGTH_16B) + { + status = HAL_ERROR; + } + break; + + case CRC_POLYLENGTH_32B: + /* no polynomial definition vs. polynomial length issue possible */ + break; + default: + status = HAL_ERROR; + break; + } + if (status == HAL_OK) + { + /* set generating polynomial */ + WRITE_REG(hcrc->Instance->POL, Pol); + + /* set generating polynomial size */ + MODIFY_REG(hcrc->Instance->CR, CRC_CR_POLYSIZE, PolyLength); + } + /* Return function status */ + return status; +} + +/** + * @brief Set the Reverse Input data mode. + * @param hcrc CRC handle + * @param InputReverseMode Input Data inversion mode. + * This parameter can be one of the following values: + * @arg @ref CRC_INPUTDATA_INVERSION_NONE no change in bit order (default value) + * @arg @ref CRC_INPUTDATA_INVERSION_BYTE Byte-wise bit reversal + * @arg @ref CRC_INPUTDATA_INVERSION_HALFWORD HalfWord-wise bit reversal + * @arg @ref CRC_INPUTDATA_INVERSION_WORD Word-wise bit reversal + * @retval HAL status + */ +HAL_StatusTypeDef HAL_CRCEx_Input_Data_Reverse(CRC_HandleTypeDef *hcrc, uint32_t InputReverseMode) +{ + /* Check the parameters */ + assert_param(IS_CRC_INPUTDATA_INVERSION_MODE(InputReverseMode)); + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_BUSY; + + /* set input data inversion mode */ + MODIFY_REG(hcrc->Instance->CR, CRC_CR_REV_IN, InputReverseMode); + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set the Reverse Output data mode. + * @param hcrc CRC handle + * @param OutputReverseMode Output Data inversion mode. + * This parameter can be one of the following values: + * @arg @ref CRC_OUTPUTDATA_INVERSION_DISABLE no CRC inversion (default value) + * @arg @ref CRC_OUTPUTDATA_INVERSION_ENABLE bit-level inversion (e.g. for a 8-bit CRC: 0xB5 becomes 0xAD) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_CRCEx_Output_Data_Reverse(CRC_HandleTypeDef *hcrc, uint32_t OutputReverseMode) +{ + /* Check the parameters */ + assert_param(IS_CRC_OUTPUTDATA_INVERSION_MODE(OutputReverseMode)); + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_BUSY; + + /* set output data inversion mode */ + MODIFY_REG(hcrc->Instance->CR, CRC_CR_REV_OUT, OutputReverseMode); + + /* Change CRC peripheral state */ + hcrc->State = HAL_CRC_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + + + + +/** + * @} + */ + + +/** + * @} + */ + + +#endif /* HAL_CRC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c new file mode 100644 index 000000000..42b6d0834 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac.c @@ -0,0 +1,1299 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dac.c + * @author MCD Application Team + * @brief DAC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Digital to Analog Converter (DAC) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + * + @verbatim + ============================================================================== + ##### DAC Peripheral features ##### + ============================================================================== + [..] + *** DAC Channels *** + ==================== + [..] + The device integrates two 12-bit Digital Analog Converters that can + be used independently or simultaneously (dual mode): + (#) DAC channel1 with DAC_OUT1 (PA4) as output + (#) DAC channel2 with DAC_OUT2 (PA5) as output + + *** DAC Triggers *** + ==================== + [..] + Digital to Analog conversion can be non-triggered using DAC_TRIGGER_NONE + and DAC_OUT1/DAC_OUT2 is available once writing to DHRx register. + [..] + Digital to Analog conversion can be triggered by: + (#) External event: EXTI Line 9 (any GPIOx_Pin9) using DAC_TRIGGER_EXT_IT9. + The used pin (GPIOx_Pin9) must be configured in input mode. + + (#) Timers TRGO: TIM2, TIM4, TIM5, TIM6, TIM7 and TIM8 + (DAC_TRIGGER_T2_TRGO, DAC_TRIGGER_T4_TRGO...) + + (#) Software using DAC_TRIGGER_SOFTWARE + + *** DAC Buffer mode feature *** + =============================== + [..] + Each DAC channel integrates an output buffer that can be used to + reduce the output impedance, and to drive external loads directly + without having to add an external operational amplifier. + To enable, the output buffer use + sConfig.DAC_OutputBuffer = DAC_OUTPUTBUFFER_ENABLE; + [..] + (@) Refer to the device datasheet for more details about output + impedance value with and without output buffer. + + *** DAC wave generation feature *** + =================================== + [..] + Both DAC channels can be used to generate + (#) Noise wave using HAL_DACEx_NoiseWaveGenerate() + (#) Triangle wave using HAL_DACEx_TriangleWaveGenerate() + + *** DAC data format *** + ======================= + [..] + The DAC data format can be: + (#) 8-bit right alignment using DAC_ALIGN_8B_R + (#) 12-bit left alignment using DAC_ALIGN_12B_L + (#) 12-bit right alignment using DAC_ALIGN_12B_R + + *** DAC data value to voltage correspondence *** + ================================================ + [..] + The analog output voltage on each DAC channel pin is determined + by the following equation: + DAC_OUTx = VREF+ * DOR / 4095 + with DOR is the Data Output Register + VEF+ is the input voltage reference (refer to the device datasheet) + e.g. To set DAC_OUT1 to 0.7V, use + Assuming that VREF+ = 3.3V, DAC_OUT1 = (3.3 * 868) / 4095 = 0.7V + + *** DMA requests *** + ===================== + [..] + A DMA1 request can be generated when an external trigger (but not + a software trigger) occurs if DMA1 requests are enabled using + HAL_DAC_Start_DMA() + [..] + DMA1 requests are mapped as following: + (#) DAC channel1 : mapped on DMA1 Stream5 channel7 which must be + already configured + (#) DAC channel2 : mapped on DMA1 Stream6 channel7 which must be + already configured + + -@- For Dual mode and specific signal (Triangle and noise) generation please + refer to Extension Features Driver description + + + ##### How to use this driver ##### + ============================================================================== + [..] + (+) DAC APB clock must be enabled to get write access to DAC + registers using HAL_DAC_Init() + (+) Configure DAC_OUTx (DAC_OUT1: PA4, DAC_OUT2: PA5) in analog mode. + (+) Configure the DAC channel using HAL_DAC_ConfigChannel() function. + (+) Enable the DAC channel using HAL_DAC_Start() or HAL_DAC_Start_DMA functions + + *** Polling mode IO operation *** + ================================= + [..] + (+) Start the DAC peripheral using HAL_DAC_Start() + (+) To read the DAC last data output value, use the HAL_DAC_GetValue() function. + (+) Stop the DAC peripheral using HAL_DAC_Stop() + + + *** DMA mode IO operation *** + ============================== + [..] + (+) Start the DAC peripheral using HAL_DAC_Start_DMA(), at this stage the user specify the length + of data to be transferred at each end of conversion + (+) At The end of data transfer HAL_DAC_ConvCpltCallbackCh1()or HAL_DAC_ConvCpltCallbackCh2() + function is executed and user can add his own code by customization of function pointer + HAL_DAC_ConvCpltCallbackCh1 or HAL_DAC_ConvCpltCallbackCh2 + (+) In case of transfer Error, HAL_DAC_ErrorCallbackCh1() function is executed and user can + add his own code by customization of function pointer HAL_DAC_ErrorCallbackCh1 + (+) Stop the DAC peripheral using HAL_DAC_Stop_DMA() + + *** Callback registration *** + ============================================= + [..] + The compilation define USE_HAL_DAC_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + Use Functions @ref HAL_DAC_RegisterCallback() to register a user callback, + it allows to register following callbacks: + (+) ConvCpltCallbackCh1 : callback when a half transfer is completed on Ch1. + (+) ConvHalfCpltCallbackCh1 : callback when a transfer is completed on Ch1. + (+) ErrorCallbackCh1 : callback when an error occurs on Ch1. + (+) DMAUnderrunCallbackCh1 : callback when an underrun error occurs on Ch1. + (+) ConvCpltCallbackCh2 : callback when a half transfer is completed on Ch2. + (+) ConvHalfCpltCallbackCh2 : callback when a transfer is completed on Ch2. + (+) ErrorCallbackCh2 : callback when an error occurs on Ch2. + (+) DMAUnderrunCallbackCh2 : callback when an underrun error occurs on Ch2. + (+) MspInitCallback : DAC MspInit. + (+) MspDeInitCallback : DAC MspdeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_DAC_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. It allows to reset following callbacks: + (+) ConvCpltCallbackCh1 : callback when a half transfer is completed on Ch1. + (+) ConvHalfCpltCallbackCh1 : callback when a transfer is completed on Ch1. + (+) ErrorCallbackCh1 : callback when an error occurs on Ch1. + (+) DMAUnderrunCallbackCh1 : callback when an underrun error occurs on Ch1. + (+) ConvCpltCallbackCh2 : callback when a half transfer is completed on Ch2. + (+) ConvHalfCpltCallbackCh2 : callback when a transfer is completed on Ch2. + (+) ErrorCallbackCh2 : callback when an error occurs on Ch2. + (+) DMAUnderrunCallbackCh2 : callback when an underrun error occurs on Ch2. + (+) MspInitCallback : DAC MspInit. + (+) MspDeInitCallback : DAC MspdeInit. + (+) All Callbacks + This function) takes as parameters the HAL peripheral handle and the Callback ID. + + By default, after the @ref HAL_DAC_Init and if the state is HAL_DAC_STATE_RESET + all callbacks are reset to the corresponding legacy weak (surcharged) functions. + Exception done for MspInit and MspDeInit callbacks that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_DAC_Init + and @ref HAL_DAC_DeInit only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the @ref HAL_DAC_Init and @ref HAL_DAC_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in READY state only. + Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered + in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used + during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_DAC_RegisterCallback before calling @ref HAL_DAC_DeInit + or @ref HAL_DAC_Init function. + + When The compilation define USE_HAL_DAC_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + *** DAC HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DAC HAL driver. + + (+) __HAL_DAC_ENABLE : Enable the DAC peripheral + (+) __HAL_DAC_DISABLE : Disable the DAC peripheral + (+) __HAL_DAC_CLEAR_FLAG: Clear the DAC's pending flags + (+) __HAL_DAC_GET_FLAG: Get the selected DAC's flag status + + [..] + (@) You can refer to the DAC HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DAC DAC + * @brief DAC driver modules + * @{ + */ + +#ifdef HAL_DAC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup DAC_Private_Functions + * @{ + */ +/* Private function prototypes -----------------------------------------------*/ +static void DAC_DMAConvCpltCh1(DMA_HandleTypeDef *hdma); +static void DAC_DMAErrorCh1(DMA_HandleTypeDef *hdma); +static void DAC_DMAHalfConvCpltCh1(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DAC_Exported_Functions DAC Exported Functions + * @{ + */ + +/** @defgroup DAC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the DAC. + (+) De-initialize the DAC. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the DAC peripheral according to the specified parameters + * in the DAC_InitStruct. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_Init(DAC_HandleTypeDef* hdac) +{ + /* Check DAC handle */ + if(hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ + assert_param(IS_DAC_ALL_INSTANCE(hdac->Instance)); + + if(hdac->State == HAL_DAC_STATE_RESET) + { +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + /* Init the DAC Callback settings */ + hdac->ConvCpltCallbackCh1 = HAL_DAC_ConvCpltCallbackCh1; + hdac->ConvHalfCpltCallbackCh1 = HAL_DAC_ConvHalfCpltCallbackCh1; + hdac->ErrorCallbackCh1 = HAL_DAC_ErrorCallbackCh1; + hdac->DMAUnderrunCallbackCh1 = HAL_DAC_DMAUnderrunCallbackCh1; + + hdac->ConvCpltCallbackCh2 = HAL_DACEx_ConvCpltCallbackCh2; + hdac->ConvHalfCpltCallbackCh2 = HAL_DACEx_ConvHalfCpltCallbackCh2; + hdac->ErrorCallbackCh2 = HAL_DACEx_ErrorCallbackCh2; + hdac->DMAUnderrunCallbackCh2 = HAL_DACEx_DMAUnderrunCallbackCh2; + + if(hdac->MspInitCallback == NULL) + { + hdac->MspInitCallback = HAL_DAC_MspInit; + } +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + /* Allocate lock resource and initialize it */ + hdac->Lock = HAL_UNLOCKED; +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + /* Init the low level hardware */ + hdac->MspInitCallback(hdac); +#else + /* Init the low level hardware */ + HAL_DAC_MspInit(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + } + + /* Initialize the DAC state*/ + hdac->State = HAL_DAC_STATE_BUSY; + + /* Set DAC error code to none */ + hdac->ErrorCode = HAL_DAC_ERROR_NONE; + + /* Initialize the DAC state*/ + hdac->State = HAL_DAC_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Deinitializes the DAC peripheral registers to their default reset values. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_DeInit(DAC_HandleTypeDef* hdac) +{ + /* Check DAC handle */ + if(hdac == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_DAC_ALL_INSTANCE(hdac->Instance)); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + if(hdac->MspDeInitCallback == NULL) + { + hdac->MspDeInitCallback = HAL_DAC_MspDeInit; + } + /* DeInit the low level hardware */ + hdac->MspDeInitCallback(hdac); +#else + /* DeInit the low level hardware */ + HAL_DAC_MspDeInit(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + + /* Set DAC error code to none */ + hdac->ErrorCode = HAL_DAC_ERROR_NONE; + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the DAC MSP. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_MspInit(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the DAC MSP. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_MspDeInit(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_MspDeInit could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup DAC_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Start conversion. + (+) Stop conversion. + (+) Start conversion and enable DMA transfer. + (+) Stop conversion and disable DMA transfer. + (+) Get result of conversion. + +@endverbatim + * @{ + */ + +/** + * @brief Enables DAC and starts conversion of channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_Start(DAC_HandleTypeDef* hdac, uint32_t Channel) +{ + uint32_t tmp1 = 0, tmp2 = 0; + + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + + /* Process locked */ + __HAL_LOCK(hdac); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + + /* Enable the Peripheral */ + __HAL_DAC_ENABLE(hdac, Channel); + + if(Channel == DAC_CHANNEL_1) + { + tmp1 = hdac->Instance->CR & DAC_CR_TEN1; + tmp2 = hdac->Instance->CR & DAC_CR_TSEL1; + /* Check if software trigger enabled */ + if((tmp1 == DAC_CR_TEN1) && (tmp2 == DAC_CR_TSEL1)) + { + /* Enable the selected DAC software conversion */ + hdac->Instance->SWTRIGR |= (uint32_t)DAC_SWTRIGR_SWTRIG1; + } + } + else + { + tmp1 = hdac->Instance->CR & DAC_CR_TEN2; + tmp2 = hdac->Instance->CR & DAC_CR_TSEL2; + /* Check if software trigger enabled */ + if((tmp1 == DAC_CR_TEN2) && (tmp2 == DAC_CR_TSEL2)) + { + /* Enable the selected DAC software conversion*/ + hdac->Instance->SWTRIGR |= (uint32_t)DAC_SWTRIGR_SWTRIG2; + } + } + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables DAC and stop conversion of channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_Stop(DAC_HandleTypeDef* hdac, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + + /* Disable the Peripheral */ + __HAL_DAC_DISABLE(hdac, Channel); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Enables DAC and starts conversion of channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @param pData: The Source memory Buffer address. + * @param Length: The length of data to be transferred from memory to DAC peripheral + * @param Alignment: Specifies the data alignment for DAC channel. + * This parameter can be one of the following values: + * @arg DAC_ALIGN_8B_R: 8bit right data alignment selected + * @arg DAC_ALIGN_12B_L: 12bit left data alignment selected + * @arg DAC_ALIGN_12B_R: 12bit right data alignment selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t* pData, uint32_t Length, uint32_t Alignment) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + assert_param(IS_DAC_ALIGN(Alignment)); + + /* Process locked */ + __HAL_LOCK(hdac); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + + if(Channel == DAC_CHANNEL_1) + { + /* Set the DMA transfer complete callback for channel1 */ + hdac->DMA_Handle1->XferCpltCallback = DAC_DMAConvCpltCh1; + + /* Set the DMA half transfer complete callback for channel1 */ + hdac->DMA_Handle1->XferHalfCpltCallback = DAC_DMAHalfConvCpltCh1; + + /* Set the DMA error callback for channel1 */ + hdac->DMA_Handle1->XferErrorCallback = DAC_DMAErrorCh1; + + /* Enable the selected DAC channel1 DMA request */ + hdac->Instance->CR |= DAC_CR_DMAEN1; + + /* Case of use of channel 1 */ + switch(Alignment) + { + case DAC_ALIGN_12B_R: + /* Get DHR12R1 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR12R1; + break; + case DAC_ALIGN_12B_L: + /* Get DHR12L1 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR12L1; + break; + case DAC_ALIGN_8B_R: + /* Get DHR8R1 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR8R1; + break; + default: + break; + } + } + else + { + /* Set the DMA transfer complete callback for channel2 */ + hdac->DMA_Handle2->XferCpltCallback = DAC_DMAConvCpltCh2; + + /* Set the DMA half transfer complete callback for channel2 */ + hdac->DMA_Handle2->XferHalfCpltCallback = DAC_DMAHalfConvCpltCh2; + + /* Set the DMA error callback for channel2 */ + hdac->DMA_Handle2->XferErrorCallback = DAC_DMAErrorCh2; + + /* Enable the selected DAC channel2 DMA request */ + hdac->Instance->CR |= DAC_CR_DMAEN2; + + /* Case of use of channel 2 */ + switch(Alignment) + { + case DAC_ALIGN_12B_R: + /* Get DHR12R2 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR12R2; + break; + case DAC_ALIGN_12B_L: + /* Get DHR12L2 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR12L2; + break; + case DAC_ALIGN_8B_R: + /* Get DHR8R2 address */ + tmpreg = (uint32_t)&hdac->Instance->DHR8R2; + break; + default: + break; + } + } + + /* Enable the DMA Stream */ + if(Channel == DAC_CHANNEL_1) + { + /* Enable the DAC DMA underrun interrupt */ + __HAL_DAC_ENABLE_IT(hdac, DAC_IT_DMAUDR1); + + /* Enable the DMA Stream */ + HAL_DMA_Start_IT(hdac->DMA_Handle1, (uint32_t)pData, tmpreg, Length); + } + else + { + /* Enable the DAC DMA underrun interrupt */ + __HAL_DAC_ENABLE_IT(hdac, DAC_IT_DMAUDR2); + + /* Enable the DMA Stream */ + HAL_DMA_Start_IT(hdac->DMA_Handle2, (uint32_t)pData, tmpreg, Length); + } + + /* Enable the Peripheral */ + __HAL_DAC_ENABLE(hdac, Channel); + + /* Process Unlocked */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Disables DAC and stop conversion of channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_Stop_DMA(DAC_HandleTypeDef* hdac, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + + /* Disable the selected DAC channel DMA request */ + hdac->Instance->CR &= ~(DAC_CR_DMAEN1 << Channel); + + /* Disable the Peripheral */ + __HAL_DAC_DISABLE(hdac, Channel); + + /* Disable the DMA Channel */ + /* Channel1 is used */ + if(Channel == DAC_CHANNEL_1) + { + status = HAL_DMA_Abort(hdac->DMA_Handle1); + } + else /* Channel2 is used for */ + { + status = HAL_DMA_Abort(hdac->DMA_Handle2); + } + + /* Check if DMA Channel effectively disabled */ + if(status != HAL_OK) + { + /* Update DAC state machine to error */ + hdac->State = HAL_DAC_STATE_ERROR; + } + else + { + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Returns the last data output value of the selected DAC channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @retval The selected DAC channel data output value. + */ +uint32_t HAL_DAC_GetValue(DAC_HandleTypeDef* hdac, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + + /* Returns the DAC channel data output register value */ + if(Channel == DAC_CHANNEL_1) + { + return hdac->Instance->DOR1; + } + else + { + return hdac->Instance->DOR2; + } +} + +/** + * @brief Handles DAC interrupt request + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +void HAL_DAC_IRQHandler(DAC_HandleTypeDef* hdac) +{ + /* Check underrun channel 1 flag */ + if(__HAL_DAC_GET_FLAG(hdac, DAC_FLAG_DMAUDR1)) + { + /* Change DAC state to error state */ + hdac->State = HAL_DAC_STATE_ERROR; + + /* Set DAC error code to channel1 DMA underrun error */ + hdac->ErrorCode |= HAL_DAC_ERROR_DMAUNDERRUNCH1; + + /* Clear the underrun flag */ + __HAL_DAC_CLEAR_FLAG(hdac,DAC_FLAG_DMAUDR1); + + /* Disable the selected DAC channel1 DMA request */ + hdac->Instance->CR &= ~DAC_CR_DMAEN1; + + /* Error callback */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->DMAUnderrunCallbackCh1(hdac); +#else + HAL_DAC_DMAUnderrunCallbackCh1(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + } + /* Check underrun channel 2 flag */ + if(__HAL_DAC_GET_FLAG(hdac, DAC_FLAG_DMAUDR2)) + { + /* Change DAC state to error state */ + hdac->State = HAL_DAC_STATE_ERROR; + + /* Set DAC error code to channel2 DMA underrun error */ + hdac->ErrorCode |= HAL_DAC_ERROR_DMAUNDERRUNCH2; + + /* Clear the underrun flag */ + __HAL_DAC_CLEAR_FLAG(hdac,DAC_FLAG_DMAUDR2); + + /* Disable the selected DAC channel1 DMA request */ + hdac->Instance->CR &= ~DAC_CR_DMAEN2; + + /* Error callback */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->DMAUnderrunCallbackCh2(hdac); +#else + HAL_DACEx_DMAUnderrunCallbackCh2(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Conversion complete callback in non blocking mode for Channel1 + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_ConvCpltCallbackCh1(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_ConvCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Conversion half DMA transfer callback in non blocking mode for Channel1 + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_ConvHalfCpltCallbackCh1(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_ConvHalfCpltCallbackCh1 could be implemented in the user file + */ +} + +/** + * @brief Error DAC callback for Channel1. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_ErrorCallbackCh1(DAC_HandleTypeDef *hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_ErrorCallbackCh1 could be implemented in the user file + */ +} + +/** + * @brief DMA underrun DAC callback for channel1. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DAC_DMAUnderrunCallbackCh1(DAC_HandleTypeDef *hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_DMAUnderrunCallbackCh1 could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup DAC_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Configure channels. + (+) Set the specified data holding register value for DAC channel. + +@endverbatim + * @{ + */ + +/** + * @brief Configures the selected DAC channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param sConfig: DAC configuration structure. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef* hdac, DAC_ChannelConfTypeDef* sConfig, uint32_t Channel) +{ + uint32_t tmpreg1 = 0, tmpreg2 = 0; + + /* Check the DAC parameters */ + assert_param(IS_DAC_TRIGGER(sConfig->DAC_Trigger)); + assert_param(IS_DAC_OUTPUT_BUFFER_STATE(sConfig->DAC_OutputBuffer)); + assert_param(IS_DAC_CHANNEL(Channel)); + + /* Process locked */ + __HAL_LOCK(hdac); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + + /* Get the DAC CR value */ + tmpreg1 = hdac->Instance->CR; + /* Clear BOFFx, TENx, TSELx, WAVEx and MAMPx bits */ + tmpreg1 &= ~(((uint32_t)(DAC_CR_MAMP1 | DAC_CR_WAVE1 | DAC_CR_TSEL1 | DAC_CR_TEN1 | DAC_CR_BOFF1)) << Channel); + /* Configure for the selected DAC channel: buffer output, trigger */ + /* Set TSELx and TENx bits according to DAC_Trigger value */ + /* Set BOFFx bit according to DAC_OutputBuffer value */ + tmpreg2 = (sConfig->DAC_Trigger | sConfig->DAC_OutputBuffer); + /* Calculate CR register value depending on DAC_Channel */ + tmpreg1 |= tmpreg2 << Channel; + /* Write to DAC CR */ + hdac->Instance->CR = tmpreg1; + /* Disable wave generation */ + hdac->Instance->CR &= ~(DAC_CR_WAVE1 << Channel); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set the specified data holding register value for DAC channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @param Alignment: Specifies the data alignment. + * This parameter can be one of the following values: + * @arg DAC_ALIGN_8B_R: 8bit right data alignment selected + * @arg DAC_ALIGN_12B_L: 12bit left data alignment selected + * @arg DAC_ALIGN_12B_R: 12bit right data alignment selected + * @param Data: Data to be loaded in the selected data holding register. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DAC_SetValue(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Alignment, uint32_t Data) +{ + __IO uint32_t tmp = 0; + + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + assert_param(IS_DAC_ALIGN(Alignment)); + assert_param(IS_DAC_DATA(Data)); + + tmp = (uint32_t)hdac->Instance; + if(Channel == DAC_CHANNEL_1) + { + tmp += DAC_DHR12R1_ALIGNMENT(Alignment); + } + else + { + tmp += DAC_DHR12R2_ALIGNMENT(Alignment); + } + + /* Set the DAC channel1 selected data holding register */ + *(__IO uint32_t *) tmp = Data; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup DAC_Exported_Functions_Group4 Peripheral State and Errors functions + * @brief Peripheral State and Errors functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Errors functions ##### + ============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the DAC state. + (+) Check the DAC Errors. + +@endverbatim + * @{ + */ + +/** + * @brief return the DAC state + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval HAL state + */ +HAL_DAC_StateTypeDef HAL_DAC_GetState(DAC_HandleTypeDef* hdac) +{ + /* Return DAC state */ + return hdac->State; +} + + +/** + * @brief Return the DAC error code + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval DAC Error Code + */ +uint32_t HAL_DAC_GetError(DAC_HandleTypeDef *hdac) +{ + return hdac->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DAC_Exported_Functions + * @{ + */ + +/** @addtogroup DAC_Exported_Functions_Group1 + * @{ + */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User DAC Callback + * To be used instead of the weak (surcharged) predefined callback + * @param hdac DAC handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_DAC_ERROR_INVALID_CALLBACK DAC Error Callback ID + * @arg @ref HAL_DAC_CH1_COMPLETE_CB_ID DAC CH1 Complete Callback ID + * @arg @ref HAL_DAC_CH1_HALF_COMPLETE_CB_ID DAC CH1 Half Complete Callback ID + * @arg @ref HAL_DAC_CH1_ERROR_ID DAC CH1 Error Callback ID + * @arg @ref HAL_DAC_CH1_UNDERRUN_CB_ID DAC CH1 UnderRun Callback ID + * @arg @ref HAL_DAC_CH2_COMPLETE_CB_ID DAC CH2 Complete Callback ID + * @arg @ref HAL_DAC_CH2_HALF_COMPLETE_CB_ID DAC CH2 Half Complete Callback ID + * @arg @ref HAL_DAC_CH2_ERROR_ID DAC CH2 Error Callback ID + * @arg @ref HAL_DAC_CH2_UNDERRUN_CB_ID DAC CH2 UnderRun Callback ID + * @arg @ref HAL_DAC_MSP_INIT_CB_ID DAC MSP Init Callback ID + * @arg @ref HAL_DAC_MSP_DEINIT_CB_ID DAC MSP DeInit Callback ID + * + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_DAC_RegisterCallback (DAC_HandleTypeDef *hdac, HAL_DAC_CallbackIDTypeDef CallbackID, pDAC_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hdac); + + if(hdac->State == HAL_DAC_STATE_READY) + { + switch (CallbackID) + { + case HAL_DAC_CH1_COMPLETE_CB_ID : + hdac->ConvCpltCallbackCh1 = pCallback; + break; + case HAL_DAC_CH1_HALF_COMPLETE_CB_ID : + hdac->ConvHalfCpltCallbackCh1 = pCallback; + break; + case HAL_DAC_CH1_ERROR_ID : + hdac->ErrorCallbackCh1 = pCallback; + break; + case HAL_DAC_CH1_UNDERRUN_CB_ID : + hdac->DMAUnderrunCallbackCh1 = pCallback; + break; + case HAL_DAC_CH2_COMPLETE_CB_ID : + hdac->ConvCpltCallbackCh2 = pCallback; + break; + case HAL_DAC_CH2_HALF_COMPLETE_CB_ID : + hdac->ConvHalfCpltCallbackCh2 = pCallback; + break; + case HAL_DAC_CH2_ERROR_ID : + hdac->ErrorCallbackCh2 = pCallback; + break; + case HAL_DAC_CH2_UNDERRUN_CB_ID : + hdac->DMAUnderrunCallbackCh2 = pCallback; + break; + case HAL_DAC_MSP_INIT_CB_ID : + hdac->MspInitCallback = pCallback; + break; + case HAL_DAC_MSP_DEINIT_CB_ID : + hdac->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hdac->State == HAL_DAC_STATE_RESET) + { + switch (CallbackID) + { + case HAL_DAC_MSP_INIT_CB_ID : + hdac->MspInitCallback = pCallback; + break; + case HAL_DAC_MSP_DEINIT_CB_ID : + hdac->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdac); + return status; +} + +/** + * @brief Unregister a User DAC Callback + * DAC Callback is redirected to the weak (surcharged) predefined callback + * @param hdac DAC handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_DAC_CH1_COMPLETE_CB_ID DAC CH1 tranfer Complete Callback ID + * @arg @ref HAL_DAC_CH1_HALF_COMPLETE_CB_ID DAC CH1 Half Complete Callback ID + * @arg @ref HAL_DAC_CH1_ERROR_ID DAC CH1 Error Callback ID + * @arg @ref HAL_DAC_CH1_UNDERRUN_CB_ID DAC CH1 UnderRun Callback ID + * @arg @ref HAL_DAC_CH2_COMPLETE_CB_ID DAC CH2 Complete Callback ID + * @arg @ref HAL_DAC_CH2_HALF_COMPLETE_CB_ID DAC CH2 Half Complete Callback ID + * @arg @ref HAL_DAC_CH2_ERROR_ID DAC CH2 Error Callback ID + * @arg @ref HAL_DAC_CH2_UNDERRUN_CB_ID DAC CH2 UnderRun Callback ID + * @arg @ref HAL_DAC_MSP_INIT_CB_ID DAC MSP Init Callback ID + * @arg @ref HAL_DAC_MSP_DEINIT_CB_ID DAC MSP DeInit Callback ID + * @arg @ref HAL_DAC_ALL_CB_ID DAC All callbacks + * @retval status + */ +HAL_StatusTypeDef HAL_DAC_UnRegisterCallback (DAC_HandleTypeDef *hdac, HAL_DAC_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdac); + + if(hdac->State == HAL_DAC_STATE_READY) + { + switch (CallbackID) + { + case HAL_DAC_CH1_COMPLETE_CB_ID : + hdac->ConvCpltCallbackCh1 = HAL_DAC_ConvCpltCallbackCh1; + break; + case HAL_DAC_CH1_HALF_COMPLETE_CB_ID : + hdac->ConvHalfCpltCallbackCh1 = HAL_DAC_ConvHalfCpltCallbackCh1; + break; + case HAL_DAC_CH1_ERROR_ID : + hdac->ErrorCallbackCh1 = HAL_DAC_ErrorCallbackCh1; + break; + case HAL_DAC_CH1_UNDERRUN_CB_ID : + hdac->DMAUnderrunCallbackCh1 = HAL_DAC_DMAUnderrunCallbackCh1; + break; + case HAL_DAC_CH2_COMPLETE_CB_ID : + hdac->ConvCpltCallbackCh2 = HAL_DACEx_ConvCpltCallbackCh2; + break; + case HAL_DAC_CH2_HALF_COMPLETE_CB_ID : + hdac->ConvHalfCpltCallbackCh2 = HAL_DACEx_ConvHalfCpltCallbackCh2; + break; + case HAL_DAC_CH2_ERROR_ID : + hdac->ErrorCallbackCh2 = HAL_DACEx_ErrorCallbackCh2; + break; + case HAL_DAC_CH2_UNDERRUN_CB_ID : + hdac->DMAUnderrunCallbackCh2 = HAL_DACEx_DMAUnderrunCallbackCh2; + break; + case HAL_DAC_MSP_INIT_CB_ID : + hdac->MspInitCallback = HAL_DAC_MspInit; + break; + case HAL_DAC_MSP_DEINIT_CB_ID : + hdac->MspDeInitCallback = HAL_DAC_MspDeInit; + break; + case HAL_DAC_ALL_CB_ID : + hdac->ConvCpltCallbackCh1 = HAL_DAC_ConvCpltCallbackCh1; + hdac->ConvHalfCpltCallbackCh1 = HAL_DAC_ConvHalfCpltCallbackCh1; + hdac->ErrorCallbackCh1 = HAL_DAC_ErrorCallbackCh1; + hdac->DMAUnderrunCallbackCh1 = HAL_DAC_DMAUnderrunCallbackCh1; + hdac->ConvCpltCallbackCh2 = HAL_DACEx_ConvCpltCallbackCh2; + hdac->ConvHalfCpltCallbackCh2 = HAL_DACEx_ConvHalfCpltCallbackCh2; + hdac->ErrorCallbackCh2 = HAL_DACEx_ErrorCallbackCh2; + hdac->DMAUnderrunCallbackCh2 = HAL_DACEx_DMAUnderrunCallbackCh2; + hdac->MspInitCallback = HAL_DAC_MspInit; + hdac->MspDeInitCallback = HAL_DAC_MspDeInit; + break; + default : + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hdac->State == HAL_DAC_STATE_RESET) + { + switch (CallbackID) + { + case HAL_DAC_MSP_INIT_CB_ID : + hdac->MspInitCallback = HAL_DAC_MspInit; + break; + case HAL_DAC_MSP_DEINIT_CB_ID : + hdac->MspDeInitCallback = HAL_DAC_MspDeInit; + break; + default : + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdac->ErrorCode |= HAL_DAC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdac); + return status; +} +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DAC_Private_Functions + * @{ + */ + +/** + * @brief DMA conversion complete callback. + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void DAC_DMAConvCpltCh1(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ConvCpltCallbackCh1(hdac); +#else + HAL_DAC_ConvCpltCallbackCh1(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + + hdac->State= HAL_DAC_STATE_READY; +} + +/** + * @brief DMA half transfer complete callback. + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void DAC_DMAHalfConvCpltCh1(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + /* Conversion complete callback */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ConvHalfCpltCallbackCh1(hdac); +#else + HAL_DAC_ConvHalfCpltCallbackCh1(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA error callback + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void DAC_DMAErrorCh1(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + + /* Set DAC error code to DMA error */ + hdac->ErrorCode |= HAL_DAC_ERROR_DMA; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ErrorCallbackCh1(hdac); +#else + HAL_DAC_ErrorCallbackCh1(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + + hdac->State= HAL_DAC_STATE_READY; +} + +/** + * @} + */ + +#endif /* HAL_DAC_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c new file mode 100644 index 000000000..d61d9eb7e --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c @@ -0,0 +1,382 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dac_ex.c + * @author MCD Application Team + * @brief Extended DAC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of DAC extension peripheral: + * + Extended features functions + * + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (+) When Dual mode is enabled (i.e DAC Channel1 and Channel2 are used simultaneously) : + Use HAL_DACEx_DualGetValue() to get digital data to be converted and use + HAL_DACEx_DualSetValue() to set digital value to converted simultaneously in Channel 1 and Channel 2. + (+) Use HAL_DACEx_TriangleWaveGenerate() to generate Triangle signal. + (+) Use HAL_DACEx_NoiseWaveGenerate() to generate Noise signal. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DACEx DACEx + * @brief DAC driver modules + * @{ + */ + +#ifdef HAL_DAC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DACEx_Exported_Functions DAC Exported Functions + * @{ + */ + +/** @defgroup DACEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * +@verbatim + ============================================================================== + ##### Extended features functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Start conversion. + (+) Stop conversion. + (+) Start conversion and enable DMA transfer. + (+) Stop conversion and disable DMA transfer. + (+) Get result of conversion. + (+) Get result of dual mode conversion. + +@endverbatim + * @{ + */ + +/** + * @brief Returns the last data output value of the selected DAC channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval The selected DAC channel data output value. + */ +uint32_t HAL_DACEx_DualGetValue(DAC_HandleTypeDef* hdac) +{ + uint32_t tmp = 0; + + tmp |= hdac->Instance->DOR1; + + tmp |= hdac->Instance->DOR2 << 16; + + /* Returns the DAC channel data output register value */ + return tmp; +} + +/** + * @brief Enables or disables the selected DAC channel wave generation. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @param Amplitude: Select max triangle amplitude. + * This parameter can be one of the following values: + * @arg DAC_TRIANGLEAMPLITUDE_1: Select max triangle amplitude of 1 + * @arg DAC_TRIANGLEAMPLITUDE_3: Select max triangle amplitude of 3 + * @arg DAC_TRIANGLEAMPLITUDE_7: Select max triangle amplitude of 7 + * @arg DAC_TRIANGLEAMPLITUDE_15: Select max triangle amplitude of 15 + * @arg DAC_TRIANGLEAMPLITUDE_31: Select max triangle amplitude of 31 + * @arg DAC_TRIANGLEAMPLITUDE_63: Select max triangle amplitude of 63 + * @arg DAC_TRIANGLEAMPLITUDE_127: Select max triangle amplitude of 127 + * @arg DAC_TRIANGLEAMPLITUDE_255: Select max triangle amplitude of 255 + * @arg DAC_TRIANGLEAMPLITUDE_511: Select max triangle amplitude of 511 + * @arg DAC_TRIANGLEAMPLITUDE_1023: Select max triangle amplitude of 1023 + * @arg DAC_TRIANGLEAMPLITUDE_2047: Select max triangle amplitude of 2047 + * @arg DAC_TRIANGLEAMPLITUDE_4095: Select max triangle amplitude of 4095 + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude) +{ + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude)); + + /* Process locked */ + __HAL_LOCK(hdac); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + + /* Enable the selected wave generation for the selected DAC channel */ + MODIFY_REG(hdac->Instance->CR, (DAC_CR_WAVE1 | DAC_CR_MAMP1) << Channel, (DAC_CR_WAVE1_1 | Amplitude) << Channel); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Enables or disables the selected DAC channel wave generation. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Channel: The selected DAC channel. + * This parameter can be one of the following values: + * @arg DAC_CHANNEL_1: DAC Channel1 selected + * @arg DAC_CHANNEL_2: DAC Channel2 selected + * @param Amplitude: Unmask DAC channel LFSR for noise wave generation. + * This parameter can be one of the following values: + * @arg DAC_LFSRUNMASK_BIT0: Unmask DAC channel LFSR bit0 for noise wave generation + * @arg DAC_LFSRUNMASK_BITS1_0: Unmask DAC channel LFSR bit[1:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS2_0: Unmask DAC channel LFSR bit[2:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS3_0: Unmask DAC channel LFSR bit[3:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS4_0: Unmask DAC channel LFSR bit[4:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS5_0: Unmask DAC channel LFSR bit[5:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS6_0: Unmask DAC channel LFSR bit[6:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS7_0: Unmask DAC channel LFSR bit[7:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS8_0: Unmask DAC channel LFSR bit[8:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS9_0: Unmask DAC channel LFSR bit[9:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS10_0: Unmask DAC channel LFSR bit[10:0] for noise wave generation + * @arg DAC_LFSRUNMASK_BITS11_0: Unmask DAC channel LFSR bit[11:0] for noise wave generation + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DACEx_NoiseWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude) +{ + /* Check the parameters */ + assert_param(IS_DAC_CHANNEL(Channel)); + assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude)); + + /* Process locked */ + __HAL_LOCK(hdac); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_BUSY; + + /* Enable the selected wave generation for the selected DAC channel */ + MODIFY_REG(hdac->Instance->CR, (DAC_CR_WAVE1 | DAC_CR_MAMP1) << Channel, (DAC_CR_WAVE1_0 | Amplitude) << Channel); + + /* Change DAC state */ + hdac->State = HAL_DAC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdac); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set the specified data holding register value for dual DAC channel. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @param Alignment: Specifies the data alignment for dual channel DAC. + * This parameter can be one of the following values: + * DAC_ALIGN_8B_R: 8bit right data alignment selected + * DAC_ALIGN_12B_L: 12bit left data alignment selected + * DAC_ALIGN_12B_R: 12bit right data alignment selected + * @param Data1: Data for DAC Channel2 to be loaded in the selected data holding register. + * @param Data2: Data for DAC Channel1 to be loaded in the selected data holding register. + * @note In dual mode, a unique register access is required to write in both + * DAC channels at the same time. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DACEx_DualSetValue(DAC_HandleTypeDef* hdac, uint32_t Alignment, uint32_t Data1, uint32_t Data2) +{ + uint32_t data = 0, tmp = 0; + + /* Check the parameters */ + assert_param(IS_DAC_ALIGN(Alignment)); + assert_param(IS_DAC_DATA(Data1)); + assert_param(IS_DAC_DATA(Data2)); + + /* Calculate and set dual DAC data holding register value */ + if (Alignment == DAC_ALIGN_8B_R) + { + data = ((uint32_t)Data2 << 8) | Data1; + } + else + { + data = ((uint32_t)Data2 << 16) | Data1; + } + + tmp = (uint32_t)hdac->Instance; + tmp += DAC_DHR12RD_ALIGNMENT(Alignment); + + /* Set the dual DAC selected data holding register */ + *(__IO uint32_t *)tmp = data; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** + * @brief Conversion complete callback in non blocking mode for Channel2 + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DACEx_ConvCpltCallbackCh2(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DAC_ConvCpltCallbackCh2 could be implemented in the user file + */ +} + +/** + * @brief Conversion half DMA transfer callback in non blocking mode for Channel2 + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DACEx_ConvHalfCpltCallbackCh2(DAC_HandleTypeDef* hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DACEx_ConvHalfCpltCallbackCh2 could be implemented in the user file + */ +} + +/** + * @brief Error DAC callback for Channel2. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DACEx_ErrorCallbackCh2(DAC_HandleTypeDef *hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DACEx_ErrorCallbackCh2 could be implemented in the user file + */ +} + +/** + * @brief DMA underrun DAC callback for channel2. + * @param hdac: pointer to a DAC_HandleTypeDef structure that contains + * the configuration information for the specified DAC. + * @retval None + */ +__weak void HAL_DACEx_DMAUnderrunCallbackCh2(DAC_HandleTypeDef *hdac) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdac); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DACEx_DMAUnderrunCallbackCh2 could be implemented in the user file + */ +} + +/** + * @brief DMA conversion complete callback. + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +void DAC_DMAConvCpltCh2(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ConvCpltCallbackCh2(hdac); +#else + HAL_DACEx_ConvCpltCallbackCh2(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + + hdac->State= HAL_DAC_STATE_READY; +} + +/** + * @brief DMA half transfer complete callback. + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +void DAC_DMAHalfConvCpltCh2(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + /* Conversion complete callback */ +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ConvHalfCpltCallbackCh2(hdac); +#else + HAL_DACEx_ConvHalfCpltCallbackCh2(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA error callback + * @param hdma: pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +void DAC_DMAErrorCh2(DMA_HandleTypeDef *hdma) +{ + DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; + + /* Set DAC error code to DMA error */ + hdac->ErrorCode |= HAL_DAC_ERROR_DMA; + +#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) + hdac->ErrorCallbackCh2(hdac); +#else + HAL_DACEx_ErrorCallbackCh2(hdac); +#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ + + hdac->State= HAL_DAC_STATE_READY; +} + +/** + * @} + */ + +#endif /* HAL_DAC_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c new file mode 100644 index 000000000..84d080cd0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c @@ -0,0 +1,1307 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma.c + * @author MCD Application Team + * @brief DMA HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the Direct Memory Access (DMA) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral State and errors functions + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable and configure the peripheral to be connected to the DMA Stream + (except for internal SRAM/FLASH memories: no initialization is + necessary) please refer to Reference manual for connection between peripherals + and DMA requests. + + (#) For a given Stream, program the required configuration through the following parameters: + Transfer Direction, Source and Destination data formats, + Circular, Normal or peripheral flow control mode, Stream Priority level, + Source and Destination Increment mode, FIFO mode and its Threshold (if needed), + Burst mode for Source and/or Destination (if needed) using HAL_DMA_Init() function. + + -@- Prior to HAL_DMA_Init() the clock must be enabled for DMA through the following macros: + __HAL_RCC_DMA1_CLK_ENABLE() or __HAL_RCC_DMA2_CLK_ENABLE(). + + *** Polling mode IO operation *** + ================================= + [..] + (+) Use HAL_DMA_Start() to start DMA transfer after the configuration of Source + address and destination address and the Length of data to be transferred. + (+) Use HAL_DMA_PollForTransfer() to poll for the end of current transfer, in this + case a fixed Timeout can be configured by User depending from his application. + (+) Use HAL_DMA_Abort() function to abort the current transfer. + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Configure the DMA interrupt priority using HAL_NVIC_SetPriority() + (+) Enable the DMA IRQ handler using HAL_NVIC_EnableIRQ() + (+) Select Callbacks functions using HAL_DMA_RegisterCallback() + (+) Use HAL_DMA_Start_IT() to start DMA transfer after the configuration of + Source address and destination address and the Length of data to be transferred. In this + case the DMA interrupt is configured + (+) Use HAL_DMA_IRQHandler() called under DMA_IRQHandler() Interrupt subroutine + (+) At the end of data transfer HAL_DMA_IRQHandler() function is executed and user can + add his own function by customization of function pointer XferCpltCallback and + XferErrorCallback (i.e a member of DMA handle structure). + [..] + (#) Use HAL_DMA_GetState() function to return the DMA state and HAL_DMA_GetError() in case of error + detection. + + (#) Use HAL_DMA_Abort_IT() function to abort the current transfer + + -@- In Memory-to-Memory transfer mode, Circular mode is not allowed. + + -@- The FIFO is used mainly to reduce bus usage and to allow data packing/unpacking: it is + possible to set different Data Sizes for the Peripheral and the Memory (ie. you can set + Half-Word data size for the peripheral to access its data register and set Word data size + for the Memory to gain in access time. Each two half words will be packed and written in + a single access to a Word in the Memory). + + -@- When FIFO is disabled, it is not allowed to configure different Data Sizes for Source + and Destination. In this case the Peripheral Data Size will be applied to both Source + and Destination. + + *** DMA HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DMA HAL driver. + + (+) __HAL_DMA_ENABLE: Enable the specified DMA Stream. + (+) __HAL_DMA_DISABLE: Disable the specified DMA Stream. + (+) __HAL_DMA_GET_IT_SOURCE: Check whether the specified DMA Stream interrupt has occurred or not. + + [..] + (@) You can refer to the DMA HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DMA DMA + * @brief DMA HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +typedef struct +{ + __IO uint32_t ISR; /*!< DMA interrupt status register */ + __IO uint32_t Reserved0; + __IO uint32_t IFCR; /*!< DMA interrupt flag clear register */ +} DMA_Base_Registers; + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Constants + * @{ + */ + #define HAL_TIMEOUT_DMA_ABORT ((uint32_t)5) /* 5 ms */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Functions + * @{ + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Exported_Functions + * @{ + */ + +/** @addtogroup DMA_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize the DMA Stream source + and destination addresses, incrementation and data sizes, transfer direction, + circular/normal mode selection, memory-to-memory mode selection and Stream priority value. + [..] + The HAL_DMA_Init() function follows the DMA configuration procedures as described in + reference manual. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the DMA according to the specified + * parameters in the DMA_InitTypeDef and create the associated handle. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma) +{ + uint32_t tmp = 0U; + uint32_t tickstart = HAL_GetTick(); + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + assert_param(IS_DMA_CHANNEL(hdma->Init.Channel)); + assert_param(IS_DMA_DIRECTION(hdma->Init.Direction)); + assert_param(IS_DMA_PERIPHERAL_INC_STATE(hdma->Init.PeriphInc)); + assert_param(IS_DMA_MEMORY_INC_STATE(hdma->Init.MemInc)); + assert_param(IS_DMA_PERIPHERAL_DATA_SIZE(hdma->Init.PeriphDataAlignment)); + assert_param(IS_DMA_MEMORY_DATA_SIZE(hdma->Init.MemDataAlignment)); + assert_param(IS_DMA_MODE(hdma->Init.Mode)); + assert_param(IS_DMA_PRIORITY(hdma->Init.Priority)); + assert_param(IS_DMA_FIFO_MODE_STATE(hdma->Init.FIFOMode)); + /* Check the memory burst, peripheral burst and FIFO threshold parameters only + when FIFO mode is enabled */ + if(hdma->Init.FIFOMode != DMA_FIFOMODE_DISABLE) + { + assert_param(IS_DMA_FIFO_THRESHOLD(hdma->Init.FIFOThreshold)); + assert_param(IS_DMA_MEMORY_BURST(hdma->Init.MemBurst)); + assert_param(IS_DMA_PERIPHERAL_BURST(hdma->Init.PeriphBurst)); + } + + /* Allocate lock resource */ + __HAL_UNLOCK(hdma); + + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Disable the peripheral */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Get the CR register value */ + tmp = hdma->Instance->CR; + + /* Clear CHSEL, MBURST, PBURST, PL, MSIZE, PSIZE, MINC, PINC, CIRC, DIR, CT and DBM bits */ + tmp &= ((uint32_t)~(DMA_SxCR_CHSEL | DMA_SxCR_MBURST | DMA_SxCR_PBURST | \ + DMA_SxCR_PL | DMA_SxCR_MSIZE | DMA_SxCR_PSIZE | \ + DMA_SxCR_MINC | DMA_SxCR_PINC | DMA_SxCR_CIRC | \ + DMA_SxCR_DIR | DMA_SxCR_CT | DMA_SxCR_DBM)); + + /* Prepare the DMA Stream configuration */ + tmp |= hdma->Init.Channel | hdma->Init.Direction | + hdma->Init.PeriphInc | hdma->Init.MemInc | + hdma->Init.PeriphDataAlignment | hdma->Init.MemDataAlignment | + hdma->Init.Mode | hdma->Init.Priority; + + /* the Memory burst and peripheral burst are not used when the FIFO is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get memory burst and peripheral burst */ + tmp |= hdma->Init.MemBurst | hdma->Init.PeriphBurst; + } + + /* Write to DMA Stream CR register */ + hdma->Instance->CR = tmp; + + /* Get the FCR register value */ + tmp = hdma->Instance->FCR; + + /* Clear Direct mode and FIFO threshold bits */ + tmp &= (uint32_t)~(DMA_SxFCR_DMDIS | DMA_SxFCR_FTH); + + /* Prepare the DMA Stream FIFO configuration */ + tmp |= hdma->Init.FIFOMode; + + /* The FIFO threshold is not used when the FIFO mode is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get the FIFO threshold */ + tmp |= hdma->Init.FIFOThreshold; + + /* Check compatibility between FIFO threshold level and size of the memory burst */ + /* for INCR4, INCR8, INCR16 bursts */ + if (hdma->Init.MemBurst != DMA_MBURST_SINGLE) + { + if (DMA_CheckFifoParam(hdma) != HAL_OK) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_PARAM; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_ERROR; + } + } + } + + /* Write to DMA Stream FCR */ + hdma->Instance->FCR = tmp; + + /* Initialize StreamBaseAddress and StreamIndex parameters to be used to calculate + DMA steam Base Address needed by HAL_DMA_IRQHandler() and HAL_DMA_PollForTransfer() */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clear all interrupt flags */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Initialize the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the DMA peripheral + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma) +{ + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the DMA peripheral state */ + if(hdma->State == HAL_DMA_STATE_BUSY) + { + /* Return error status */ + return HAL_BUSY; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + + /* Disable the selected DMA Streamx */ + __HAL_DMA_DISABLE(hdma); + + /* Reset DMA Streamx control register */ + hdma->Instance->CR = 0U; + + /* Reset DMA Streamx number of data to transfer register */ + hdma->Instance->NDTR = 0U; + + /* Reset DMA Streamx peripheral address register */ + hdma->Instance->PAR = 0U; + + /* Reset DMA Streamx memory 0 address register */ + hdma->Instance->M0AR = 0U; + + /* Reset DMA Streamx memory 1 address register */ + hdma->Instance->M1AR = 0U; + + /* Reset DMA Streamx FIFO control register */ + hdma->Instance->FCR = (uint32_t)0x00000021U; + + /* Get DMA steam Base Address */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Clean all callbacks */ + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + + /* Reset the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Reset the DMA state */ + hdma->State = HAL_DMA_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return HAL_OK; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group2 + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and Start DMA transfer + (+) Configure the source, destination address and data length and + Start DMA transfer with interrupt + (+) Abort DMA transfer + (+) Poll for transfer complete + (+) Handle DMA interrupt request + +@endverbatim + * @{ + */ + +/** + * @brief Starts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Start the DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + hdma->Instance->FCR |= DMA_IT_FE; + + if(hdma->XferHalfCpltCallback != NULL) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + + return status; +} + +/** + * @brief Aborts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * + * @note After disabling a DMA Stream, a check for wait until the DMA Stream is + * effectively disabled is added. If a Stream is disabled + * while a data transfer is ongoing, the current data will be transferred + * and the Stream will be effectively disabled only after the transfer of + * this single data is finished. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) +{ + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + uint32_t tickstart = HAL_GetTick(); + + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_ERROR; + } + else + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state*/ + hdma->State = HAL_DMA_STATE_READY; + } + return HAL_OK; +} + +/** + * @brief Aborts the DMA Transfer in Interrupt mode. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) +{ + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + return HAL_ERROR; + } + else + { + /* Set Abort State */ + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + } + + return HAL_OK; +} + +/** + * @brief Polling for transfer complete. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CompleteLevel Specifies the DMA level complete. + * @note The polling mode is kept in this version for legacy. it is recommanded to use the IT model instead. + * This model could be used for debug purpose. + * @note The HAL_DMA_PollForTransfer API cannot be used in circular and double buffering mode (automatic circular mode). + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t mask_cpltlevel; + uint32_t tickstart = HAL_GetTick(); + uint32_t tmpisr; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs; + + if(HAL_DMA_STATE_BUSY != hdma->State) + { + /* No transfer ongoing */ + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + __HAL_UNLOCK(hdma); + return HAL_ERROR; + } + + /* Polling mode not supported in circular mode and double buffering mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != RESET) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + } + else + { + /* Half Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + } + + regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + tmpisr = regs->ISR; + + while(((tmpisr & mask_cpltlevel) == RESET) && ((hdma->ErrorCode & HAL_DMA_ERROR_TE) == RESET)) + { + /* Check for the Timeout (Not applicable in circular mode)*/ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_TIMEOUT; + } + } + + /* Get the ISR register value */ + tmpisr = regs->ISR; + + if((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + + /* Clear the Direct Mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + } + } + + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + HAL_DMA_Abort(hdma); + + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State= HAL_DMA_STATE_READY; + + return HAL_ERROR; + } + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + hdma->State = HAL_DMA_STATE_READY; + } + else + { + /* Clear the half transfer flag */ + regs->IFCR = (DMA_FLAG_HTIF0_4) << hdma->StreamIndex; + } + + return status; +} + +/** + * @brief Handles DMA interrupt request. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval None + */ +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) +{ + uint32_t tmpisr; + __IO uint32_t count = 0; + uint32_t timeout = SystemCoreClock / 9600; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + tmpisr = regs->ISR; + + /* Transfer Error Interrupt management ***************************************/ + if ((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TE) != RESET) + { + /* Disable the transfer error interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TE); + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + } + } + /* FIFO Error Interrupt management ******************************************/ + if ((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_FE) != RESET) + { + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + } + } + /* Direct Mode Error Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_DME) != RESET) + { + /* Clear the direct mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + } + } + /* Half Transfer Complete Interrupt management ******************************/ + if ((tmpisr & (DMA_FLAG_HTIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_HT) != RESET) + { + /* Clear the half transfer complete flag */ + regs->IFCR = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + + /* Multi_Buffering mode enabled */ + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferM1HalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferM1HalfCpltCallback(hdma); + } + } + } + else + { + /* Disable the half transfer interrupt if the DMA mode is not CIRCULAR */ + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the half transfer interrupt */ + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + } + } + /* Transfer Complete Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_TCIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TC) != RESET) + { + /* Clear the transfer complete flag */ + regs->IFCR = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + + if(HAL_DMA_STATE_ABORT == hdma->State) + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + if(hdma->XferAbortCallback != NULL) + { + hdma->XferAbortCallback(hdma); + } + return; + } + + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferM1CpltCallback != NULL) + { + /* Transfer complete Callback for memory1 */ + hdma->XferM1CpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete Callback for memory0 */ + hdma->XferCpltCallback(hdma); + } + } + } + /* Disable the transfer complete interrupt if the DMA mode is not CIRCULAR */ + else + { + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the transfer complete interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TC); + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + } + + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete callback */ + hdma->XferCpltCallback(hdma); + } + } + } + } + + /* manage error case */ + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + do + { + if (++count > timeout) + { + break; + } + } + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET); + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + } + + if(hdma->XferErrorCallback != NULL) + { + /* Transfer error callback */ + hdma->XferErrorCallback(hdma); + } + } +} + +/** + * @brief Register callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifer + * a DMA_HandleTypeDef structure as parameter. + * @param pCallback pointer to private callbacsk function which has pointer to + * a DMA_HandleTypeDef structure as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)) +{ + + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = pCallback; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = pCallback; + break; + + default: + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @brief UnRegister callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifer + * a HAL_DMA_CallbackIDTypeDef ENUM as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = NULL; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = NULL; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = NULL; + break; + + case HAL_DMA_XFER_ALL_CB_ID: + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + break; + + default: + status = HAL_ERROR; + break; + } + } + else + { + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group3 + * +@verbatim + =============================================================================== + ##### State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the DMA state + (+) Get error code + +@endverbatim + * @{ + */ + +/** + * @brief Returns the DMA state. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL state + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma) +{ + return hdma->State; +} + +/** + * @brief Return the DMA error code + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval DMA Error Code + */ +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma) +{ + return hdma->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMA_Private_Functions + * @{ + */ + +/** + * @brief Sets the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Clear DBM bit */ + hdma->Instance->CR &= (uint32_t)(~DMA_SxCR_DBM); + + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Memory to Peripheral */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Peripheral to Memory */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @brief Returns the DMA Stream base address depending on stream number + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval Stream base address + */ +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma) +{ + uint32_t stream_number = (((uint32_t)hdma->Instance & 0xFFU) - 16U) / 24U; + + /* lookup table for necessary bitshift of flags within status registers */ + static const uint8_t flagBitshiftOffset[8U] = {0U, 6U, 16U, 22U, 0U, 6U, 16U, 22U}; + hdma->StreamIndex = flagBitshiftOffset[stream_number]; + + if (stream_number > 3U) + { + /* return pointer to HISR and HIFCR */ + hdma->StreamBaseAddress = (((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)) + 4U); + } + else + { + /* return pointer to LISR and LIFCR */ + hdma->StreamBaseAddress = ((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)); + } + + return hdma->StreamBaseAddress; +} + +/** + * @brief Check compatibility between FIFO threshold level and size of the memory burst + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmp = hdma->Init.FIFOThreshold; + + /* Memory Data size equal to Byte */ + if(hdma->Init.MemDataAlignment == DMA_MDATAALIGN_BYTE) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + break; + default: + break; + } + } + + /* Memory Data size equal to Half-Word */ + else if (hdma->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + /* Memory Data size equal to Word */ + else + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_HALFFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_FULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + return status; +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c new file mode 100644 index 000000000..c08a4b9a6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma2d.c @@ -0,0 +1,2008 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma2d.c + * @author MCD Application Team + * @brief DMA2D HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the DMA2D peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Program the required configuration through the following parameters: + the transfer mode, the output color mode and the output offset using + HAL_DMA2D_Init() function. + + (#) Program the required configuration through the following parameters: + the input color mode, the input color, the input alpha value, the alpha mode, + the red/blue swap mode, the inverted alpha mode and the input offset using + HAL_DMA2D_ConfigLayer() function for foreground or/and background layer. + + *** Polling mode IO operation *** + ================================= + [..] + (#) Configure pdata parameter (explained hereafter), destination and data length + and enable the transfer using HAL_DMA2D_Start(). + (#) Wait for end of transfer using HAL_DMA2D_PollForTransfer(), at this stage + user can specify the value of timeout according to his end application. + + *** Interrupt mode IO operation *** + =================================== + [..] + (#) Configure pdata parameter, destination and data length and enable + the transfer using HAL_DMA2D_Start_IT(). + (#) Use HAL_DMA2D_IRQHandler() called under DMA2D_IRQHandler() interrupt subroutine. + (#) At the end of data transfer HAL_DMA2D_IRQHandler() function is executed and user can + add his own function by customization of function pointer XferCpltCallback (member + of DMA2D handle structure). + (#) In case of error, the HAL_DMA2D_IRQHandler() function calls the callback + XferErrorCallback. + + -@- In Register-to-Memory transfer mode, pdata parameter is the register + color, in Memory-to-memory or Memory-to-Memory with pixel format + conversion pdata is the source address. + + -@- Configure the foreground source address, the background source address, + the destination and data length then Enable the transfer using + HAL_DMA2D_BlendingStart() in polling mode and HAL_DMA2D_BlendingStart_IT() + in interrupt mode. + + -@- HAL_DMA2D_BlendingStart() and HAL_DMA2D_BlendingStart_IT() functions + are used if the memory to memory with blending transfer mode is selected. + + (#) Optionally, configure and enable the CLUT using HAL_DMA2D_CLUTLoad() in polling + mode or HAL_DMA2D_CLUTLoad_IT() in interrupt mode. + + (#) Optionally, configure the line watermark in using the API HAL_DMA2D_ProgramLineEvent(). + + (#) Optionally, configure the dead time value in the AHB clock cycle inserted between two + consecutive accesses on the AHB master port in using the API HAL_DMA2D_ConfigDeadTime() + and enable/disable the functionality with the APIs HAL_DMA2D_EnableDeadTime() or + HAL_DMA2D_DisableDeadTime(). + + (#) The transfer can be suspended, resumed and aborted using the following + functions: HAL_DMA2D_Suspend(), HAL_DMA2D_Resume(), HAL_DMA2D_Abort(). + + (#) The CLUT loading can be suspended, resumed and aborted using the following + functions: HAL_DMA2D_CLUTLoading_Suspend(), HAL_DMA2D_CLUTLoading_Resume(), + HAL_DMA2D_CLUTLoading_Abort(). + + (#) To control the DMA2D state, use the following function: HAL_DMA2D_GetState(). + + (#) To read the DMA2D error code, use the following function: HAL_DMA2D_GetError(). + + *** DMA2D HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DMA2D HAL driver : + + (+) __HAL_DMA2D_ENABLE: Enable the DMA2D peripheral. + (+) __HAL_DMA2D_GET_FLAG: Get the DMA2D pending flags. + (+) __HAL_DMA2D_CLEAR_FLAG: Clear the DMA2D pending flags. + (+) __HAL_DMA2D_ENABLE_IT: Enable the specified DMA2D interrupts. + (+) __HAL_DMA2D_DISABLE_IT: Disable the specified DMA2D interrupts. + (+) __HAL_DMA2D_GET_IT_SOURCE: Check whether the specified DMA2D interrupt is enabled or not. + + *** Callback registration *** + =================================== + [..] + (#) The compilation define USE_HAL_DMA2D_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use function @ref HAL_DMA2D_RegisterCallback() to register a user callback. + + (#) Function @ref HAL_DMA2D_RegisterCallback() allows to register following callbacks: + (+) XferCpltCallback : callback for transfer complete. + (+) XferErrorCallback : callback for transfer error. + (+) LineEventCallback : callback for line event. + (+) CLUTLoadingCpltCallback : callback for CLUT loading completion. + (+) MspInitCallback : DMA2D MspInit. + (+) MspDeInitCallback : DMA2D MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + (#) Use function @ref HAL_DMA2D_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. + @ref HAL_DMA2D_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) XferCpltCallback : callback for transfer complete. + (+) XferErrorCallback : callback for transfer error. + (+) LineEventCallback : callback for line event. + (+) CLUTLoadingCpltCallback : callback for CLUT loading completion. + (+) MspInitCallback : DMA2D MspInit. + (+) MspDeInitCallback : DMA2D MspDeInit. + + (#) By default, after the @ref HAL_DMA2D_Init and if the state is HAL_DMA2D_STATE_RESET + all callbacks are reset to the corresponding legacy weak (surcharged) functions: + examples @ref HAL_DMA2D_LineEventCallback(), @ref HAL_DMA2D_CLUTLoadingCpltCallback() + Exception done for MspInit and MspDeInit callbacks that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_DMA2D_Init + and @ref HAL_DMA2D_DeInit only when these callbacks are null (not registered beforehand) + If not, MspInit or MspDeInit are not null, the @ref HAL_DMA2D_Init and @ref HAL_DMA2D_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand). + + Exception as well for Transfer Completion and Transfer Error callbacks that are not defined + as weak (surcharged) functions. They must be defined by the user to be resorted to. + + Callbacks can be registered/unregistered in READY state only. + Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered + in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used + during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_DMA2D_RegisterCallback before calling @ref HAL_DMA2D_DeInit + or @ref HAL_DMA2D_Init function. + + When The compilation define USE_HAL_DMA2D_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + [..] + (@) You can refer to the DMA2D HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +#ifdef HAL_DMA2D_MODULE_ENABLED +#if defined (DMA2D) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DMA2D DMA2D + * @brief DMA2D HAL module driver + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup DMA2D_Private_Constants DMA2D Private Constants + * @{ + */ + +/** @defgroup DMA2D_TimeOut DMA2D Time Out + * @{ + */ +#define DMA2D_TIMEOUT_ABORT (1000U) /*!< 1s */ +#define DMA2D_TIMEOUT_SUSPEND (1000U) /*!< 1s */ +/** + * @} + */ + +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup DMA2D_Private_Functions DMA2D Private Functions + * @{ + */ +static void DMA2D_SetConfig(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height); +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DMA2D_Exported_Functions DMA2D Exported Functions + * @{ + */ + +/** @defgroup DMA2D_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the DMA2D + (+) De-initialize the DMA2D + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the DMA2D according to the specified + * parameters in the DMA2D_InitTypeDef and create the associated handle. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Init(DMA2D_HandleTypeDef *hdma2d) +{ + /* Check the DMA2D peripheral state */ + if(hdma2d == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_DMA2D_ALL_INSTANCE(hdma2d->Instance)); + assert_param(IS_DMA2D_MODE(hdma2d->Init.Mode)); + assert_param(IS_DMA2D_CMODE(hdma2d->Init.ColorMode)); + assert_param(IS_DMA2D_OFFSET(hdma2d->Init.OutputOffset)); +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + assert_param(IS_DMA2D_ALPHA_INVERTED(hdma2d->Init.AlphaInverted)); + assert_param(IS_DMA2D_RB_SWAP(hdma2d->Init.RedBlueSwap)); +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) + if (hdma2d->State == HAL_DMA2D_STATE_RESET) + { + /* Reset Callback pointers in HAL_DMA2D_STATE_RESET only */ + hdma2d->LineEventCallback = HAL_DMA2D_LineEventCallback; + hdma2d->CLUTLoadingCpltCallback = HAL_DMA2D_CLUTLoadingCpltCallback; + if(hdma2d->MspInitCallback == NULL) + { + hdma2d->MspInitCallback = HAL_DMA2D_MspInit; + } + + /* Init the low level hardware */ + hdma2d->MspInitCallback(hdma2d); + } +#else + if(hdma2d->State == HAL_DMA2D_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hdma2d->Lock = HAL_UNLOCKED; + /* Init the low level hardware */ + HAL_DMA2D_MspInit(hdma2d); + } +#endif /* (USE_HAL_DMA2D_REGISTER_CALLBACKS) */ + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* DMA2D CR register configuration -------------------------------------------*/ + MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_MODE, hdma2d->Init.Mode); + + /* DMA2D OPFCCR register configuration ---------------------------------------*/ + MODIFY_REG(hdma2d->Instance->OPFCCR, DMA2D_OPFCCR_CM, hdma2d->Init.ColorMode); + + /* DMA2D OOR register configuration ------------------------------------------*/ + MODIFY_REG(hdma2d->Instance->OOR, DMA2D_OOR_LO, hdma2d->Init.OutputOffset); +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + /* DMA2D OPFCCR AI and RBS fields setting (Output Alpha Inversion)*/ + MODIFY_REG(hdma2d->Instance->OPFCCR,(DMA2D_OPFCCR_AI|DMA2D_OPFCCR_RBS), ((hdma2d->Init.AlphaInverted << DMA2D_OPFCCR_AI_Pos) | (hdma2d->Init.RedBlueSwap << DMA2D_OPFCCR_RBS_Pos))); +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + + /* Update error code */ + hdma2d->ErrorCode = HAL_DMA2D_ERROR_NONE; + + /* Initialize the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Deinitializes the DMA2D peripheral registers to their default reset + * values. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval None + */ + +HAL_StatusTypeDef HAL_DMA2D_DeInit(DMA2D_HandleTypeDef *hdma2d) +{ + + /* Check the DMA2D peripheral state */ + if(hdma2d == NULL) + { + return HAL_ERROR; + } + + /* Before aborting any DMA2D transfer or CLUT loading, check + first whether or not DMA2D clock is enabled */ + if (__HAL_RCC_DMA2D_IS_CLK_ENABLED()) + { + /* Abort DMA2D transfer if any */ + if ((hdma2d->Instance->CR & DMA2D_CR_START) == DMA2D_CR_START) + { + if (HAL_DMA2D_Abort(hdma2d) != HAL_OK) + { + /* Issue when aborting DMA2D transfer */ + return HAL_ERROR; + } + } + else + { + /* Abort background CLUT loading if any */ + if ((hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START) == DMA2D_BGPFCCR_START) + { + if (HAL_DMA2D_CLUTLoading_Abort(hdma2d, 0U) != HAL_OK) + { + /* Issue when aborting background CLUT loading */ + return HAL_ERROR; + } + } + else + { + /* Abort foreground CLUT loading if any */ + if ((hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START) == DMA2D_FGPFCCR_START) + { + if (HAL_DMA2D_CLUTLoading_Abort(hdma2d, 1U) != HAL_OK) + { + /* Issue when aborting foreground CLUT loading */ + return HAL_ERROR; + } + } + } + } + } + + /* Reset DMA2D control registers*/ + hdma2d->Instance->CR = 0U; + hdma2d->Instance->IFCR = 0x3FU; + hdma2d->Instance->FGOR = 0U; + hdma2d->Instance->BGOR = 0U; + hdma2d->Instance->FGPFCCR = 0U; + hdma2d->Instance->BGPFCCR = 0U; + hdma2d->Instance->OPFCCR = 0U; + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) + + if(hdma2d->MspDeInitCallback == NULL) + { + hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit; + } + + /* DeInit the low level hardware */ + hdma2d->MspDeInitCallback(hdma2d); + +#else + /* Carry on with de-initialization of low level hardware */ + HAL_DMA2D_MspDeInit(hdma2d); +#endif /* (USE_HAL_DMA2D_REGISTER_CALLBACKS) */ + + /* Update error code */ + hdma2d->ErrorCode = HAL_DMA2D_ERROR_NONE; + + /* Initialize the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Initializes the DMA2D MSP. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval None + */ +__weak void HAL_DMA2D_MspInit(DMA2D_HandleTypeDef* hdma2d) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma2d); + + /* NOTE : This function should not be modified; when the callback is needed, + the HAL_DMA2D_MspInit can be implemented in the user file. + */ +} + +/** + * @brief DeInitializes the DMA2D MSP. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval None + */ +__weak void HAL_DMA2D_MspDeInit(DMA2D_HandleTypeDef* hdma2d) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma2d); + + /* NOTE : This function should not be modified; when the callback is needed, + the HAL_DMA2D_MspDeInit can be implemented in the user file. + */ +} + +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User DMA2D Callback + * To be used instead of the weak (surcharged) predefined callback + * @param hdma2d DMA2D handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_DMA2D_TRANSFERCOMPLETE_CB_ID DMA2D transfer complete Callback ID + * @arg @ref HAL_DMA2D_TRANSFERERROR_CB_ID DMA2D transfer error Callback ID + * @arg @ref HAL_DMA2D_LINEEVENT_CB_ID DMA2D line event Callback ID + * @arg @ref HAL_DMA2D_CLUTLOADINGCPLT_CB_ID DMA2D CLUT loading completion Callback ID + * @arg @ref HAL_DMA2D_MSPINIT_CB_ID DMA2D MspInit callback ID + * @arg @ref HAL_DMA2D_MSPDEINIT_CB_ID DMA2D MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @note No weak predefined callbacks are defined for HAL_DMA2D_TRANSFERCOMPLETE_CB_ID or HAL_DMA2D_TRANSFERERROR_CB_ID + * @retval status + */ +HAL_StatusTypeDef HAL_DMA2D_RegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID, pDMA2D_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hdma2d); + + if(HAL_DMA2D_STATE_READY == hdma2d->State) + { + switch (CallbackID) + { + case HAL_DMA2D_TRANSFERCOMPLETE_CB_ID : + hdma2d->XferCpltCallback = pCallback; + break; + + case HAL_DMA2D_TRANSFERERROR_CB_ID : + hdma2d->XferErrorCallback = pCallback; + break; + + case HAL_DMA2D_LINEEVENT_CB_ID : + hdma2d->LineEventCallback = pCallback; + break; + + case HAL_DMA2D_CLUTLOADINGCPLT_CB_ID : + hdma2d->CLUTLoadingCpltCallback = pCallback; + break; + + case HAL_DMA2D_MSPINIT_CB_ID : + hdma2d->MspInitCallback = pCallback; + break; + + case HAL_DMA2D_MSPDEINIT_CB_ID : + hdma2d->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if(HAL_DMA2D_STATE_RESET == hdma2d->State) + { + switch (CallbackID) + { + case HAL_DMA2D_MSPINIT_CB_ID : + hdma2d->MspInitCallback = pCallback; + break; + + case HAL_DMA2D_MSPDEINIT_CB_ID : + hdma2d->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma2d); + return status; +} + +/** + * @brief Unregister a DMA2D Callback + * DMA2D Callback is redirected to the weak (surcharged) predefined callback + * @param hdma2d DMA2D handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_DMA2D_TRANSFERCOMPLETE_CB_ID DMA2D transfer complete Callback ID + * @arg @ref HAL_DMA2D_TRANSFERERROR_CB_ID DMA2D transfer error Callback ID + * @arg @ref HAL_DMA2D_LINEEVENT_CB_ID DMA2D line event Callback ID + * @arg @ref HAL_DMA2D_CLUTLOADINGCPLT_CB_ID DMA2D CLUT loading completion Callback ID + * @arg @ref HAL_DMA2D_MSPINIT_CB_ID DMA2D MspInit callback ID + * @arg @ref HAL_DMA2D_MSPDEINIT_CB_ID DMA2D MspDeInit callback ID + * @note No weak predefined callbacks are defined for HAL_DMA2D_TRANSFERCOMPLETE_CB_ID or HAL_DMA2D_TRANSFERERROR_CB_ID + * @retval status + */ +HAL_StatusTypeDef HAL_DMA2D_UnRegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID) +{ +HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma2d); + + if(HAL_DMA2D_STATE_READY == hdma2d->State) + { + switch (CallbackID) + { + case HAL_DMA2D_TRANSFERCOMPLETE_CB_ID : + hdma2d->XferCpltCallback = NULL; + break; + + case HAL_DMA2D_TRANSFERERROR_CB_ID : + hdma2d->XferErrorCallback = NULL; + break; + + case HAL_DMA2D_LINEEVENT_CB_ID : + hdma2d->LineEventCallback = HAL_DMA2D_LineEventCallback; + break; + + case HAL_DMA2D_CLUTLOADINGCPLT_CB_ID : + hdma2d->CLUTLoadingCpltCallback = HAL_DMA2D_CLUTLoadingCpltCallback; + break; + + case HAL_DMA2D_MSPINIT_CB_ID : + hdma2d->MspInitCallback = HAL_DMA2D_MspInit; /* Legacy weak (surcharged) Msp Init */ + break; + + case HAL_DMA2D_MSPDEINIT_CB_ID : + hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit; /* Legacy weak (surcharged) Msp DeInit */ + break; + + default : + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if(HAL_DMA2D_STATE_RESET == hdma2d->State) + { + switch (CallbackID) + { + case HAL_DMA2D_MSPINIT_CB_ID : + hdma2d->MspInitCallback = HAL_DMA2D_MspInit; /* Legacy weak (surcharged) Msp Init */ + break; + + case HAL_DMA2D_MSPDEINIT_CB_ID : + hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit; /* Legacy weak (surcharged) Msp DeInit */ + break; + + default : + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma2d); + return status; +} +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + +/** + * @} + */ + + +/** @defgroup DMA2D_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the pdata, destination address and data size then + start the DMA2D transfer. + (+) Configure the source for foreground and background, destination address + and data size then start a MultiBuffer DMA2D transfer. + (+) Configure the pdata, destination address and data size then + start the DMA2D transfer with interrupt. + (+) Configure the source for foreground and background, destination address + and data size then start a MultiBuffer DMA2D transfer with interrupt. + (+) Abort DMA2D transfer. + (+) Suspend DMA2D transfer. + (+) Resume DMA2D transfer. + (+) Enable CLUT transfer. + (+) Configure CLUT loading then start transfer in polling mode. + (+) Configure CLUT loading then start transfer in interrupt mode. + (+) Abort DMA2D CLUT loading. + (+) Suspend DMA2D CLUT loading. + (+) Resume DMA2D CLUT loading. + (+) Poll for transfer complete. + (+) handle DMA2D interrupt request. + (+) Transfer watermark callback. + (+) CLUT Transfer Complete callback. + + +@endverbatim + * @{ + */ + +/** + * @brief Start the DMA2D Transfer. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param pdata Configure the source memory Buffer address if + * Memory-to-Memory or Memory-to-Memory with pixel format + * conversion mode is selected, or configure + * the color value if Register-to-Memory mode is selected. + * @param DstAddress The destination memory Buffer address. + * @param Width The width of data to be transferred from source to destination (expressed in number of pixels per line). + * @param Height The height of data to be transferred from source to destination (expressed in number of lines). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Start(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LINE(Height)); + assert_param(IS_DMA2D_PIXEL(Width)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure the source, destination address and the data size */ + DMA2D_SetConfig(hdma2d, pdata, DstAddress, Width, Height); + + /* Enable the Peripheral */ + __HAL_DMA2D_ENABLE(hdma2d); + + return HAL_OK; +} + +/** + * @brief Start the DMA2D Transfer with interrupt enabled. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param pdata Configure the source memory Buffer address if + * the Memory-to-Memory or Memory-to-Memory with pixel format + * conversion mode is selected, or configure + * the color value if Register-to-Memory mode is selected. + * @param DstAddress The destination memory Buffer address. + * @param Width The width of data to be transferred from source to destination (expressed in number of pixels per line). + * @param Height The height of data to be transferred from source to destination (expressed in number of lines). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Start_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LINE(Height)); + assert_param(IS_DMA2D_PIXEL(Width)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure the source, destination address and the data size */ + DMA2D_SetConfig(hdma2d, pdata, DstAddress, Width, Height); + + /* Enable the transfer complete, transfer error and configuration error interrupts */ + __HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TC|DMA2D_IT_TE|DMA2D_IT_CE); + + /* Enable the Peripheral */ + __HAL_DMA2D_ENABLE(hdma2d); + + return HAL_OK; +} + +/** + * @brief Start the multi-source DMA2D Transfer. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param SrcAddress1 The source memory Buffer address for the foreground layer. + * @param SrcAddress2 The source memory Buffer address for the background layer. + * @param DstAddress The destination memory Buffer address. + * @param Width The width of data to be transferred from source to destination (expressed in number of pixels per line). + * @param Height The height of data to be transferred from source to destination (expressed in number of lines). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_BlendingStart(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2, uint32_t DstAddress, uint32_t Width, uint32_t Height) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LINE(Height)); + assert_param(IS_DMA2D_PIXEL(Width)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure DMA2D Stream source2 address */ + WRITE_REG(hdma2d->Instance->BGMAR, SrcAddress2); + + /* Configure the source, destination address and the data size */ + DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height); + + /* Enable the Peripheral */ + __HAL_DMA2D_ENABLE(hdma2d); + + return HAL_OK; +} + +/** + * @brief Start the multi-source DMA2D Transfer with interrupt enabled. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param SrcAddress1 The source memory Buffer address for the foreground layer. + * @param SrcAddress2 The source memory Buffer address for the background layer. + * @param DstAddress The destination memory Buffer address. + * @param Width The width of data to be transferred from source to destination (expressed in number of pixels per line). + * @param Height The height of data to be transferred from source to destination (expressed in number of lines). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_BlendingStart_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2, uint32_t DstAddress, uint32_t Width, uint32_t Height) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LINE(Height)); + assert_param(IS_DMA2D_PIXEL(Width)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure DMA2D Stream source2 address */ + WRITE_REG(hdma2d->Instance->BGMAR, SrcAddress2); + + /* Configure the source, destination address and the data size */ + DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height); + + /* Enable the transfer complete, transfer error and configuration error interrupts */ + __HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TC|DMA2D_IT_TE|DMA2D_IT_CE); + + /* Enable the Peripheral */ + __HAL_DMA2D_ENABLE(hdma2d); + + return HAL_OK; +} + +/** + * @brief Abort the DMA2D Transfer. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Abort(DMA2D_HandleTypeDef *hdma2d) +{ + uint32_t tickstart; + + /* Abort the DMA2D transfer */ + /* START bit is reset to make sure not to set it again, in the event the HW clears it + between the register read and the register write by the CPU (writing 0 has no + effect on START bitvalue) */ + MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_ABORT|DMA2D_CR_START, DMA2D_CR_ABORT); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if the DMA2D is effectively disabled */ + while((hdma2d->Instance->CR & DMA2D_CR_START) != 0U) + { + if((HAL_GetTick() - tickstart ) > DMA2D_TIMEOUT_ABORT) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_TIMEOUT; + } + } + + /* Disable the Transfer Complete, Transfer Error and Configuration Error interrupts */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TC|DMA2D_IT_TE|DMA2D_IT_CE); + + /* Change the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Suspend the DMA2D Transfer. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Suspend(DMA2D_HandleTypeDef *hdma2d) +{ + uint32_t tickstart; + + /* Suspend the DMA2D transfer */ + /* START bit is reset to make sure not to set it again, in the event the HW clears it + between the register read and the register write by the CPU (writing 0 has no + effect on START bitvalue). */ + MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_SUSP|DMA2D_CR_START, DMA2D_CR_SUSP); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if the DMA2D is effectively suspended */ + while ((hdma2d->Instance->CR & (DMA2D_CR_SUSP | DMA2D_CR_START)) == DMA2D_CR_START) + { + if((HAL_GetTick() - tickstart ) > DMA2D_TIMEOUT_SUSPEND) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Check whether or not a transfer is actually suspended and change the DMA2D state accordingly */ + if ((hdma2d->Instance->CR & DMA2D_CR_START) != 0U) + { + hdma2d->State = HAL_DMA2D_STATE_SUSPEND; + } + else + { + /* Make sure SUSP bit is cleared since it is meaningless + when no tranfer is on-going */ + CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP); + } + + return HAL_OK; +} + +/** + * @brief Resume the DMA2D Transfer. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_Resume(DMA2D_HandleTypeDef *hdma2d) +{ + /* Check the SUSP and START bits */ + if((hdma2d->Instance->CR & (DMA2D_CR_SUSP | DMA2D_CR_START)) == (DMA2D_CR_SUSP | DMA2D_CR_START)) + { + /* Ongoing transfer is suspended: change the DMA2D state before resuming */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + } + + /* Resume the DMA2D transfer */ + /* START bit is reset to make sure not to set it again, in the event the HW clears it + between the register read and the register write by the CPU (writing 0 has no + effect on START bitvalue). */ + CLEAR_BIT(hdma2d->Instance->CR, (DMA2D_CR_SUSP|DMA2D_CR_START)); + + return HAL_OK; +} + + +/** + * @brief Enable the DMA2D CLUT Transfer. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_EnableCLUT(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Enable the background CLUT loading */ + SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START); + } + else + { + /* Enable the foreground CLUT loading */ + SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START); + } + + return HAL_OK; +} + + +/** + * @brief Start DMA2D CLUT Loading. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains + * the configuration information for the color look up table. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @note Invoking this API is similar to calling HAL_DMA2D_ConfigCLUT() then HAL_DMA2D_EnableCLUT(). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_CLUTLoad(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LAYER(LayerIdx)); + assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode)); + assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure the CLUT of the background DMA2D layer */ + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Write background CLUT memory address */ + WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write background CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos))); + + /* Enable the CLUT loading for the background */ + SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START); + } + /* Configure the CLUT of the foreground DMA2D layer */ + else + { + /* Write foreground CLUT memory address */ + WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write foreground CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos))); + + /* Enable the CLUT loading for the foreground */ + SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START); + } + + return HAL_OK; +} + +/** + * @brief Start DMA2D CLUT Loading with interrupt enabled. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains + * the configuration information for the color look up table. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_CLUTLoad_IT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LAYER(LayerIdx)); + assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode)); + assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure the CLUT of the background DMA2D layer */ + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Write background CLUT memory address */ + WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write background CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos))); + + /* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */ + __HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE |DMA2D_IT_CAE); + + /* Enable the CLUT loading for the background */ + SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START); + } + /* Configure the CLUT of the foreground DMA2D layer */ + else + { + /* Write foreground CLUT memory address */ + WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write foreground CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos))); + + /* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */ + __HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE |DMA2D_IT_CAE); + + /* Enable the CLUT loading for the foreground */ + SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START); + } + + return HAL_OK; +} + +/** + * @brief Abort the DMA2D CLUT loading. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Abort(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx) +{ + uint32_t tickstart; + const __IO uint32_t * reg = &(hdma2d->Instance->BGPFCCR); /* by default, point at background register */ + + /* Abort the CLUT loading */ + SET_BIT(hdma2d->Instance->CR, DMA2D_CR_ABORT); + + /* If foreground CLUT loading is considered, update local variables */ + if(LayerIdx == DMA2D_FOREGROUND_LAYER) + { + reg = &(hdma2d->Instance->FGPFCCR); + } + + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if the CLUT loading is aborted */ + while((*reg & DMA2D_BGPFCCR_START) != 0U) + { + if((HAL_GetTick() - tickstart ) > DMA2D_TIMEOUT_ABORT) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_TIMEOUT; + } + } + + /* Disable the CLUT Transfer Complete, Transfer Error, Configuration Error and CLUT Access Error interrupts */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE |DMA2D_IT_CAE); + + /* Change the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Suspend the DMA2D CLUT loading. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Suspend(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx) +{ + uint32_t tickstart; + uint32_t loadsuspended; + const __IO uint32_t * reg = &(hdma2d->Instance->BGPFCCR); /* by default, point at background register */ + + /* Suspend the CLUT loading */ + SET_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP); + + /* If foreground CLUT loading is considered, update local variables */ + if(LayerIdx == DMA2D_FOREGROUND_LAYER) + { + reg = &(hdma2d->Instance->FGPFCCR); + } + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if the CLUT loading is suspended */ + loadsuspended = ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP)? 1UL: 0UL; /*1st condition: Suspend Check*/ + loadsuspended |= ((*reg & DMA2D_BGPFCCR_START) != DMA2D_BGPFCCR_START)? 1UL: 0UL; /*2nd condition: Not Start Check */ + while (loadsuspended == 0UL) + { + if((HAL_GetTick() - tickstart ) > DMA2D_TIMEOUT_SUSPEND) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + loadsuspended = ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP)? 1UL: 0UL; /*1st condition: Suspend Check*/ + loadsuspended |= ((*reg & DMA2D_BGPFCCR_START) != DMA2D_BGPFCCR_START)? 1UL: 0UL; /*2nd condition: Not Start Check */ + } + + /* Check whether or not a transfer is actually suspended and change the DMA2D state accordingly */ + if ((*reg & DMA2D_BGPFCCR_START) != 0U) + { + hdma2d->State = HAL_DMA2D_STATE_SUSPEND; + } + else + { + /* Make sure SUSP bit is cleared since it is meaningless + when no tranfer is on-going */ + CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP); + } + + return HAL_OK; +} + +/** + * @brief Resume the DMA2D CLUT loading. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Resume(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx) +{ + /* Check the SUSP and START bits for background or foreground CLUT loading */ + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Background CLUT loading suspension check */ + if ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP) + { + if((hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START) == DMA2D_BGPFCCR_START) + { + /* Ongoing CLUT loading is suspended: change the DMA2D state before resuming */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + } + } + } + else + { + /* Foreground CLUT loading suspension check */ + if ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP) + { + if ((hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START) == DMA2D_FGPFCCR_START) + { + /* Ongoing CLUT loading is suspended: change the DMA2D state before resuming */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + } + } + } + + /* Resume the CLUT loading */ + CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP); + + return HAL_OK; +} + + +/** + + * @brief Polling for transfer complete or CLUT loading. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_PollForTransfer(DMA2D_HandleTypeDef *hdma2d, uint32_t Timeout) +{ + uint32_t tickstart; + uint32_t layer_start; + __IO uint32_t isrflags = 0x0U; + + /* Polling for DMA2D transfer */ + if((hdma2d->Instance->CR & DMA2D_CR_START) != 0U) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_DMA2D_GET_FLAG(hdma2d, DMA2D_FLAG_TC) == 0U) + { + isrflags = READ_REG(hdma2d->Instance->ISR); + if ((isrflags & (DMA2D_FLAG_CE|DMA2D_FLAG_TE)) != 0U) + { + if ((isrflags & DMA2D_FLAG_CE) != 0U) + { + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE; + } + if ((isrflags & DMA2D_FLAG_TE) != 0U) + { + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE; + } + /* Clear the transfer and configuration error flags */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CE | DMA2D_FLAG_TE); + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_ERROR; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_ERROR; + } + /* Check for the Timeout */ + if(Timeout != HAL_MAX_DELAY) + { + if(((HAL_GetTick() - tickstart ) > Timeout)||(Timeout == 0U)) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_TIMEOUT; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_TIMEOUT; + } + } + } + } + /* Polling for CLUT loading (foreground or background) */ + layer_start = hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START; + layer_start |= hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START; + if (layer_start != 0U) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_DMA2D_GET_FLAG(hdma2d, DMA2D_FLAG_CTC) == 0U) + { + isrflags = READ_REG(hdma2d->Instance->ISR); + if ((isrflags & (DMA2D_FLAG_CAE|DMA2D_FLAG_CE|DMA2D_FLAG_TE)) != 0U) + { + if ((isrflags & DMA2D_FLAG_CAE) != 0U) + { + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CAE; + } + if ((isrflags & DMA2D_FLAG_CE) != 0U) + { + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE; + } + if ((isrflags & DMA2D_FLAG_TE) != 0U) + { + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE; + } + /* Clear the CLUT Access Error, Configuration Error and Transfer Error flags */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CAE | DMA2D_FLAG_CE | DMA2D_FLAG_TE); + + /* Change DMA2D state */ + hdma2d->State= HAL_DMA2D_STATE_ERROR; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_ERROR; + } + /* Check for the Timeout */ + if(Timeout != HAL_MAX_DELAY) + { + if(((HAL_GetTick() - tickstart ) > Timeout)||(Timeout == 0U)) + { + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT; + + /* Change the DMA2D state */ + hdma2d->State= HAL_DMA2D_STATE_TIMEOUT; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_TIMEOUT; + } + } + } + } + + /* Clear the transfer complete and CLUT loading flags */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TC|DMA2D_FLAG_CTC); + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} +/** + * @brief Handle DMA2D interrupt request. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL status + */ +void HAL_DMA2D_IRQHandler(DMA2D_HandleTypeDef *hdma2d) +{ + uint32_t isrflags = READ_REG(hdma2d->Instance->ISR); + uint32_t crflags = READ_REG(hdma2d->Instance->CR); + + /* Transfer Error Interrupt management ***************************************/ + if ((isrflags & DMA2D_FLAG_TE) != 0U) + { + if ((crflags & DMA2D_IT_TE) != 0U) + { + /* Disable the transfer Error interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TE); + + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE; + + /* Clear the transfer error flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TE); + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + if(hdma2d->XferErrorCallback != NULL) + { + /* Transfer error Callback */ + hdma2d->XferErrorCallback(hdma2d); + } + } + } + /* Configuration Error Interrupt management **********************************/ + if ((isrflags & DMA2D_FLAG_CE) != 0U) + { + if ((crflags & DMA2D_IT_CE) != 0U) + { + /* Disable the Configuration Error interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CE); + + /* Clear the Configuration error flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CE); + + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE; + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + if(hdma2d->XferErrorCallback != NULL) + { + /* Transfer error Callback */ + hdma2d->XferErrorCallback(hdma2d); + } + } + } + /* CLUT access Error Interrupt management ***********************************/ + if ((isrflags & DMA2D_FLAG_CAE) != 0U) + { + if ((crflags & DMA2D_IT_CAE) != 0U) + { + /* Disable the CLUT access error interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CAE); + + /* Clear the CLUT access error flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CAE); + + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CAE; + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + if(hdma2d->XferErrorCallback != NULL) + { + /* Transfer error Callback */ + hdma2d->XferErrorCallback(hdma2d); + } + } + } + /* Transfer watermark Interrupt management **********************************/ + if ((isrflags & DMA2D_FLAG_TW) != 0U) + { + if ((crflags & DMA2D_IT_TW) != 0U) + { + /* Disable the transfer watermark interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TW); + + /* Clear the transfer watermark flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TW); + + /* Transfer watermark Callback */ +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) + hdma2d->LineEventCallback(hdma2d); +#else + HAL_DMA2D_LineEventCallback(hdma2d); +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + + } + } + /* Transfer Complete Interrupt management ************************************/ + if ((isrflags & DMA2D_FLAG_TC) != 0U) + { + if ((crflags & DMA2D_IT_TC) != 0U) + { + /* Disable the transfer complete interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TC); + + /* Clear the transfer complete flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TC); + + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_NONE; + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + if(hdma2d->XferCpltCallback != NULL) + { + /* Transfer complete Callback */ + hdma2d->XferCpltCallback(hdma2d); + } + } + } + /* CLUT Transfer Complete Interrupt management ******************************/ + if ((isrflags & DMA2D_FLAG_CTC) != 0U) + { + if ((crflags & DMA2D_IT_CTC) != 0U) + { + /* Disable the CLUT transfer complete interrupt */ + __HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CTC); + + /* Clear the CLUT transfer complete flag */ + __HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CTC); + + /* Update error code */ + hdma2d->ErrorCode |= HAL_DMA2D_ERROR_NONE; + + /* Change DMA2D state */ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + /* CLUT Transfer complete Callback */ +#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1) + hdma2d->CLUTLoadingCpltCallback(hdma2d); +#else + HAL_DMA2D_CLUTLoadingCpltCallback(hdma2d); +#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */ + } + } + +} + +/** + * @brief Transfer watermark callback. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval None + */ +__weak void HAL_DMA2D_LineEventCallback(DMA2D_HandleTypeDef *hdma2d) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma2d); + + /* NOTE : This function should not be modified; when the callback is needed, + the HAL_DMA2D_LineEventCallback can be implemented in the user file. + */ +} + +/** + * @brief CLUT Transfer Complete callback. + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval None + */ +__weak void HAL_DMA2D_CLUTLoadingCpltCallback(DMA2D_HandleTypeDef *hdma2d) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma2d); + + /* NOTE : This function should not be modified; when the callback is needed, + the HAL_DMA2D_CLUTLoadingCpltCallback can be implemented in the user file. + */ +} + +/** + * @} + */ + +/** @defgroup DMA2D_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the DMA2D foreground or background layer parameters. + (+) Configure the DMA2D CLUT transfer. + (+) Configure the line watermark + (+) Configure the dead time value. + (+) Enable or disable the dead time value functionality. + + +@endverbatim + * @{ + */ + +/** + * @brief Configure the DMA2D Layer according to the specified + * parameters in the DMA2D_HandleTypeDef. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_ConfigLayer(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx) +{ + DMA2D_LayerCfgTypeDef *pLayerCfg; + uint32_t regMask, regValue; + + /* Check the parameters */ + assert_param(IS_DMA2D_LAYER(LayerIdx)); + assert_param(IS_DMA2D_OFFSET(hdma2d->LayerCfg[LayerIdx].InputOffset)); + if(hdma2d->Init.Mode != DMA2D_R2M) + { + assert_param(IS_DMA2D_INPUT_COLOR_MODE(hdma2d->LayerCfg[LayerIdx].InputColorMode)); + if(hdma2d->Init.Mode != DMA2D_M2M) + { + assert_param(IS_DMA2D_ALPHA_MODE(hdma2d->LayerCfg[LayerIdx].AlphaMode)); + } + } +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + assert_param(IS_DMA2D_ALPHA_INVERTED(hdma2d->LayerCfg[LayerIdx].AlphaInverted)); + assert_param(IS_DMA2D_RB_SWAP(hdma2d->LayerCfg[LayerIdx].RedBlueSwap)); +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + pLayerCfg = &hdma2d->LayerCfg[LayerIdx]; + + /* Prepare the value to be written to the BGPFCCR or FGPFCCR register */ +#if defined (DMA2D_ALPHA_INV_RB_SWAP_SUPPORT) + regValue = pLayerCfg->InputColorMode | (pLayerCfg->AlphaMode << DMA2D_BGPFCCR_AM_Pos) |\ + (pLayerCfg->AlphaInverted << DMA2D_BGPFCCR_AI_Pos) | (pLayerCfg->RedBlueSwap << DMA2D_BGPFCCR_RBS_Pos); + regMask = (DMA2D_BGPFCCR_CM | DMA2D_BGPFCCR_AM | DMA2D_BGPFCCR_ALPHA | DMA2D_BGPFCCR_AI | DMA2D_BGPFCCR_RBS); +#else + regValue = pLayerCfg->InputColorMode | (pLayerCfg->AlphaMode << DMA2D_BGPFCCR_AM_Pos); + regMask = DMA2D_BGPFCCR_CM | DMA2D_BGPFCCR_AM | DMA2D_BGPFCCR_ALPHA; +#endif /* DMA2D_ALPHA_INV_RB_SWAP_SUPPORT */ + + + if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8)) + { + regValue |= (pLayerCfg->InputAlpha & DMA2D_BGPFCCR_ALPHA); + } + else + { + regValue |= (pLayerCfg->InputAlpha << DMA2D_BGPFCCR_ALPHA_Pos); + } + + /* Configure the background DMA2D layer */ + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Write DMA2D BGPFCCR register */ + MODIFY_REG(hdma2d->Instance->BGPFCCR, regMask, regValue); + + /* DMA2D BGOR register configuration -------------------------------------*/ + WRITE_REG(hdma2d->Instance->BGOR, pLayerCfg->InputOffset); + + /* DMA2D BGCOLR register configuration -------------------------------------*/ + if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8)) + { + WRITE_REG(hdma2d->Instance->BGCOLR, pLayerCfg->InputAlpha & (DMA2D_BGCOLR_BLUE|DMA2D_BGCOLR_GREEN|DMA2D_BGCOLR_RED)); + } + } + /* Configure the foreground DMA2D layer */ + else + { + + + /* Write DMA2D FGPFCCR register */ + MODIFY_REG(hdma2d->Instance->FGPFCCR, regMask, regValue); + + /* DMA2D FGOR register configuration -------------------------------------*/ + WRITE_REG(hdma2d->Instance->FGOR, pLayerCfg->InputOffset); + + /* DMA2D FGCOLR register configuration -------------------------------------*/ + if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8)) + { + WRITE_REG(hdma2d->Instance->FGCOLR, pLayerCfg->InputAlpha & (DMA2D_FGCOLR_BLUE|DMA2D_FGCOLR_GREEN|DMA2D_FGCOLR_RED)); + } + } + /* Initialize the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Configure the DMA2D CLUT Transfer. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains + * the configuration information for the color look up table. + * @param LayerIdx DMA2D Layer index. + * This parameter can be one of the following values: + * DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_ConfigCLUT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LAYER(LayerIdx)); + assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode)); + assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size)); + + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Configure the CLUT of the background DMA2D layer */ + if(LayerIdx == DMA2D_BACKGROUND_LAYER) + { + /* Write background CLUT memory address */ + WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write background CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos))); + } + /* Configure the CLUT of the foreground DMA2D layer */ + else + { + /* Write foreground CLUT memory address */ + WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT); + + /* Write foreground CLUT size and CLUT color mode */ + MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM), + ((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos))); + } + + /* Set the DMA2D state to Ready*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + + +/** + * @brief Configure the line watermark. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @param Line Line Watermark configuration (maximum 16-bit long value expected). + * @note HAL_DMA2D_ProgramLineEvent() API enables the transfer watermark interrupt. + * @note The transfer watermark interrupt is disabled once it has occurred. + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_DMA2D_ProgramLineEvent(DMA2D_HandleTypeDef *hdma2d, uint32_t Line) +{ + /* Check the parameters */ + assert_param(IS_DMA2D_LINEWATERMARK(Line)); + + if (Line > DMA2D_LWR_LW) + { + return HAL_ERROR; + } + else + { + /* Process locked */ + __HAL_LOCK(hdma2d); + + /* Change DMA2D peripheral state */ + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Sets the Line watermark configuration */ + WRITE_REG(hdma2d->Instance->LWR, Line); + + /* Enable the Line interrupt */ + __HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TW); + + /* Initialize the DMA2D state*/ + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; + } +} + +/** + * @brief Enable DMA2D dead time feature. + * @param hdma2d DMA2D handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_EnableDeadTime(DMA2D_HandleTypeDef *hdma2d) +{ + /* Process Locked */ + __HAL_LOCK(hdma2d); + + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Set DMA2D_AMTCR EN bit */ + SET_BIT(hdma2d->Instance->AMTCR, DMA2D_AMTCR_EN); + + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Disable DMA2D dead time feature. + * @param hdma2d DMA2D handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_DisableDeadTime(DMA2D_HandleTypeDef *hdma2d) +{ + /* Process Locked */ + __HAL_LOCK(hdma2d); + + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Clear DMA2D_AMTCR EN bit */ + CLEAR_BIT(hdma2d->Instance->AMTCR, DMA2D_AMTCR_EN); + + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @brief Configure dead time. + * @note The dead time value represents the guaranteed minimum number of cycles between + * two consecutive transactions on the AHB bus. + * @param hdma2d DMA2D handle. + * @param DeadTime dead time value. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA2D_ConfigDeadTime(DMA2D_HandleTypeDef *hdma2d, uint8_t DeadTime) +{ + /* Process Locked */ + __HAL_LOCK(hdma2d); + + hdma2d->State = HAL_DMA2D_STATE_BUSY; + + /* Set DMA2D_AMTCR DT field */ + MODIFY_REG(hdma2d->Instance->AMTCR, DMA2D_AMTCR_DT, (((uint32_t) DeadTime) << DMA2D_AMTCR_DT_Pos)); + + hdma2d->State = HAL_DMA2D_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma2d); + + return HAL_OK; +} + +/** + * @} + */ + + +/** @defgroup DMA2D_Exported_Functions_Group4 Peripheral State and Error functions + * @brief Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to: + (+) Get the DMA2D state + (+) Get the DMA2D error code + +@endverbatim + * @{ + */ + +/** + * @brief Return the DMA2D state + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the DMA2D. + * @retval HAL state + */ +HAL_DMA2D_StateTypeDef HAL_DMA2D_GetState(DMA2D_HandleTypeDef *hdma2d) +{ + return hdma2d->State; +} + +/** + * @brief Return the DMA2D error code + * @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for DMA2D. + * @retval DMA2D Error Code + */ +uint32_t HAL_DMA2D_GetError(DMA2D_HandleTypeDef *hdma2d) +{ + return hdma2d->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + + +/** @defgroup DMA2D_Private_Functions DMA2D Private Functions + * @{ + */ + +/** + * @brief Set the DMA2D transfer parameters. + * @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains + * the configuration information for the specified DMA2D. + * @param pdata The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param Width The width of data to be transferred from source to destination. + * @param Height The height of data to be transferred from source to destination. + * @retval HAL status + */ +static void DMA2D_SetConfig(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width, uint32_t Height) +{ + uint32_t tmp; + uint32_t tmp1; + uint32_t tmp2; + uint32_t tmp3; + uint32_t tmp4; + + /* Configure DMA2D data size */ + MODIFY_REG(hdma2d->Instance->NLR, (DMA2D_NLR_NL|DMA2D_NLR_PL), (Height| (Width << DMA2D_NLR_PL_Pos))); + + /* Configure DMA2D destination address */ + WRITE_REG(hdma2d->Instance->OMAR, DstAddress); + + /* Register to memory DMA2D mode selected */ + if (hdma2d->Init.Mode == DMA2D_R2M) + { + tmp1 = pdata & DMA2D_OCOLR_ALPHA_1; + tmp2 = pdata & DMA2D_OCOLR_RED_1; + tmp3 = pdata & DMA2D_OCOLR_GREEN_1; + tmp4 = pdata & DMA2D_OCOLR_BLUE_1; + + /* Prepare the value to be written to the OCOLR register according to the color mode */ + if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_ARGB8888) + { + tmp = (tmp3 | tmp2 | tmp1| tmp4); + } + else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_RGB888) + { + tmp = (tmp3 | tmp2 | tmp4); + } + else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_RGB565) + { + tmp2 = (tmp2 >> 19U); + tmp3 = (tmp3 >> 10U); + tmp4 = (tmp4 >> 3U ); + tmp = ((tmp3 << 5U) | (tmp2 << 11U) | tmp4); + } + else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_ARGB1555) + { + tmp1 = (tmp1 >> 31U); + tmp2 = (tmp2 >> 19U); + tmp3 = (tmp3 >> 11U); + tmp4 = (tmp4 >> 3U ); + tmp = ((tmp3 << 5U) | (tmp2 << 10U) | (tmp1 << 15U) | tmp4); + } + else /* Dhdma2d->Init.ColorMode = DMA2D_OUTPUT_ARGB4444 */ + { + tmp1 = (tmp1 >> 28U); + tmp2 = (tmp2 >> 20U); + tmp3 = (tmp3 >> 12U); + tmp4 = (tmp4 >> 4U ); + tmp = ((tmp3 << 4U) | (tmp2 << 8U) | (tmp1 << 12U) | tmp4); + } + /* Write to DMA2D OCOLR register */ + WRITE_REG(hdma2d->Instance->OCOLR, tmp); + } + else /* M2M, M2M_PFC or M2M_Blending DMA2D Mode */ + { + /* Configure DMA2D source address */ + WRITE_REG(hdma2d->Instance->FGMAR, pdata); + } +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* DMA2D */ +#endif /* HAL_DMA2D_MODULE_ENABLED */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c new file mode 100644 index 000000000..cb96197b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c @@ -0,0 +1,310 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma_ex.c + * @author MCD Application Team + * @brief DMA Extension HAL module driver + * This file provides firmware functions to manage the following + * functionalities of the DMA Extension peripheral: + * + Extended features functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The DMA Extension HAL driver can be used as follows: + (+) Start a multi buffer transfer using the HAL_DMA_MultiBufferStart() function + for polling mode or HAL_DMA_MultiBufferStart_IT() for interrupt mode. + + -@- In Memory-to-Memory transfer mode, Multi (Double) Buffer mode is not allowed. + -@- When Multi (Double) Buffer mode is enabled, the transfer is circular by default. + -@- In Multi (Double) buffer mode, it is possible to update the base address for + the AHB memory port on the fly (DMA_SxM0AR or DMA_SxM1AR) when the stream is enabled. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DMAEx DMAEx + * @brief DMA Extended HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private Constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMAEx_Private_Functions + * @{ + */ + +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ + +/** @addtogroup DMAEx_Exported_Functions + * @{ + */ + + +/** @addtogroup DMAEx_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer with interrupt + (+) Change on the fly the memory0 or memory1 address. + +@endverbatim + * @{ + */ + + +/** + * @brief Starts the multi_buffer DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + status = HAL_ERROR; + } + else + { + /* Process Locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Enable the double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Return error status */ + status = HAL_BUSY; + } + } + return status; +} + +/** + * @brief Starts the multi_buffer DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Enable the Double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all flags */ + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TC_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_HT_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TE_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_DME_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_FE_FLAG_INDEX(hdma)); + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + hdma->Instance->FCR |= DMA_IT_FE; + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Change the memory0 or memory1 address on the fly. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param Address The new address + * @param memory the memory to be changed, This parameter can be one of + * the following values: + * MEMORY0 / + * MEMORY1 + * @note The MEMORY0 address can be changed only when the current transfer use + * MEMORY1 and the MEMORY1 address can be changed only when the current + * transfer use MEMORY0. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory) +{ + if(memory == MEMORY0) + { + /* change the memory0 address */ + hdma->Instance->M0AR = Address; + } + else + { + /* change the memory1 address */ + hdma->Instance->M1AR = Address; + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMAEx_Private_Functions + * @{ + */ + +/** + * @brief Set the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Peripheral to Memory */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Memory to Peripheral */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c new file mode 100644 index 000000000..a01a6defd --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dsi.c @@ -0,0 +1,2705 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dsi.c + * @author MCD Application Team + * @brief DSI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the DSI peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Use @ref HAL_DSI_Init() function to initialize the DSI Host IP and program the required + PLL parameters, number of lanes and TX Escape clock divider. + (#) Use @ref HAL_DSI_ConfigAdaptedCommandMode() function to configure the DSI host in adapted + command mode. + (#) When operating in video mode , use @ref HAL_DSI_ConfigVideoMode() to configure the DSI host. + (#) Function @ref HAL_DSI_ConfigCommand() is used to configure the DSI commands behavior in low power mode. + (#) To configure the DSI PHY timings parameters, use function @ref HAL_DSI_ConfigPhyTimer(). + (#) The DSI Host can be started/stopped using respectively functions @ref HAL_DSI_Start() and @ref HAL_DSI_Stop(). + Functions @ref HAL_DSI_ShortWrite(), @ref HAL_DSI_LongWrite() and @ref HAL_DSI_Read() allows respectively + to write DSI short packets, long packets and to read DSI packets. + + (#) The DSI Host Offers two Low power modes : + (+) Low Power Mode on data lanes only: Only DSI data lanes are shut down. + It is possible to enter/exit from this mode using respectively functions @ref HAL_DSI_EnterULPMData() + and @ref HAL_DSI_ExitULPMData() + + (+) Low Power Mode on data and clock lanes : All DSI lanes are shut down including data and clock lanes. + It is possible to enter/exit from this mode using respectively functions @ref HAL_DSI_EnterULPM() + and @ref HAL_DSI_ExitULPM() + + (#) User can select the DSI errors to be reported/monitored using function @ref HAL_DSI_ConfigErrorMonitor() + When an error occurs, the callback @ref HAL_DSI_ErrorCallback() is asserted and then user can retrieve + the error code by calling function @ref HAL_DSI_GetError() + + (#) To control DSI state you can use the following function: HAL_DSI_GetState() + + *** DSI HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DSI HAL driver. + + (+) __HAL_DSI_ENABLE: Enable the DSI Host. + (+) __HAL_DSI_DISABLE: Disable the DSI Host. + (+) __HAL_DSI_WRAPPER_ENABLE: Enables the DSI wrapper. + (+) __HAL_DSI_WRAPPER_DISABLE: Disable the DSI wrapper. + (+) __HAL_DSI_PLL_ENABLE: Enables the DSI PLL. + (+) __HAL_DSI_PLL_DISABLE: Disables the DSI PLL. + (+) __HAL_DSI_REG_ENABLE: Enables the DSI regulator. + (+) __HAL_DSI_REG_DISABLE: Disables the DSI regulator. + (+) __HAL_DSI_GET_FLAG: Get the DSI pending flags. + (+) __HAL_DSI_CLEAR_FLAG: Clears the DSI pending flags. + (+) __HAL_DSI_ENABLE_IT: Enables the specified DSI interrupts. + (+) __HAL_DSI_DISABLE_IT: Disables the specified DSI interrupts. + (+) __HAL_DSI_GET_IT_SOURCE: Checks whether the specified DSI interrupt source is enabled or not. + + + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_DSI_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function @ref HAL_DSI_RegisterCallback() to register a callback. + + Function @ref HAL_DSI_RegisterCallback() allows to register following callbacks: + (+) TearingEffectCallback : DSI Tearing Effect Callback. + (+) EndOfRefreshCallback : DSI End Of Refresh Callback. + (+) ErrorCallback : DSI Error Callback + (+) MspInitCallback : DSI MspInit. + (+) MspDeInitCallback : DSI MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_DSI_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_DSI_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TearingEffectCallback : DSI Tearing Effect Callback. + (+) EndOfRefreshCallback : DSI End Of Refresh Callback. + (+) ErrorCallback : DSI Error Callback + (+) MspInitCallback : DSI MspInit. + (+) MspDeInitCallback : DSI MspDeInit. + + By default, after the HAL_DSI_Init and when the state is HAL_DSI_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_DSI_TearingEffectCallback(), @ref HAL_DSI_EndOfRefreshCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_DSI_Init/ @ref HAL_DSI_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the @ref HAL_DSI_Init/ @ref HAL_DSI_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_DSI_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_DSI_STATE_READY or HAL_DSI_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_DSI_RegisterCallback() before calling @ref HAL_DSI_DeInit + or HAL_DSI_Init function. + + When The compilation define USE_HAL_DSI_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + [..] + (@) You can refer to the DSI HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +#ifdef HAL_DSI_MODULE_ENABLED + +#if defined(DSI) + +/** @addtogroup DSI + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @addtogroup DSI_Private_Constants + * @{ + */ +#define DSI_TIMEOUT_VALUE ((uint32_t)1000U) /* 1s */ + +#define DSI_ERROR_ACK_MASK (DSI_ISR0_AE0 | DSI_ISR0_AE1 | DSI_ISR0_AE2 | DSI_ISR0_AE3 | \ + DSI_ISR0_AE4 | DSI_ISR0_AE5 | DSI_ISR0_AE6 | DSI_ISR0_AE7 | \ + DSI_ISR0_AE8 | DSI_ISR0_AE9 | DSI_ISR0_AE10 | DSI_ISR0_AE11 | \ + DSI_ISR0_AE12 | DSI_ISR0_AE13 | DSI_ISR0_AE14 | DSI_ISR0_AE15) +#define DSI_ERROR_PHY_MASK (DSI_ISR0_PE0 | DSI_ISR0_PE1 | DSI_ISR0_PE2 | DSI_ISR0_PE3 | DSI_ISR0_PE4) +#define DSI_ERROR_TX_MASK DSI_ISR1_TOHSTX +#define DSI_ERROR_RX_MASK DSI_ISR1_TOLPRX +#define DSI_ERROR_ECC_MASK (DSI_ISR1_ECCSE | DSI_ISR1_ECCME) +#define DSI_ERROR_CRC_MASK DSI_ISR1_CRCE +#define DSI_ERROR_PSE_MASK DSI_ISR1_PSE +#define DSI_ERROR_EOT_MASK DSI_ISR1_EOTPE +#define DSI_ERROR_OVF_MASK DSI_ISR1_LPWRE +#define DSI_ERROR_GEN_MASK (DSI_ISR1_GCWRE | DSI_ISR1_GPWRE | DSI_ISR1_GPTXE | DSI_ISR1_GPRDE | DSI_ISR1_GPRXE) +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static void DSI_ConfigPacketHeader(DSI_TypeDef *DSIx, uint32_t ChannelID, uint32_t DataType, uint32_t Data0, + uint32_t Data1); + +static HAL_StatusTypeDef DSI_ShortWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t Param1, + uint32_t Param2); + +/* Private functions ---------------------------------------------------------*/ +/** + * @brief Generic DSI packet header configuration + * @param DSIx Pointer to DSI register base + * @param ChannelID Virtual channel ID of the header packet + * @param DataType Packet data type of the header packet + * This parameter can be any value of : + * @ref DSI_SHORT_WRITE_PKT_Data_Type + * or @ref DSI_LONG_WRITE_PKT_Data_Type + * or @ref DSI_SHORT_READ_PKT_Data_Type + * or DSI_MAX_RETURN_PKT_SIZE + * @param Data0 Word count LSB + * @param Data1 Word count MSB + * @retval None + */ +static void DSI_ConfigPacketHeader(DSI_TypeDef *DSIx, + uint32_t ChannelID, + uint32_t DataType, + uint32_t Data0, + uint32_t Data1) +{ + /* Update the DSI packet header with new information */ + DSIx->GHCR = (DataType | (ChannelID << 6U) | (Data0 << 8U) | (Data1 << 16U)); +} + +/** + * @brief write short DCS or short Generic command + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ChannelID Virtual channel ID. + * @param Mode DSI short packet data type. + * This parameter can be any value of @ref DSI_SHORT_WRITE_PKT_Data_Type. + * @param Param1 DSC command or first generic parameter. + * This parameter can be any value of @ref DSI_DCS_Command or a + * generic command code. + * @param Param2 DSC parameter or second generic parameter. + * @retval HAL status + */ +static HAL_StatusTypeDef DSI_ShortWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t Param1, + uint32_t Param2) +{ + uint32_t tickstart; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for Command FIFO Empty */ + while((hdsi->Instance->GPSR & DSI_GPSR_CMDFE) == 0U) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > DSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Configure the packet to send a short DCS command with 0 or 1 parameter */ + /* Update the DSI packet header with new information */ + hdsi->Instance->GHCR = (Mode | (ChannelID << 6U) | (Param1 << 8U) | (Param2 << 16U)); + + return HAL_OK; +} + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup DSI_Exported_Functions + * @{ + */ + +/** @defgroup DSI_Group1 Initialization and Configuration functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the DSI + (+) De-initialize the DSI + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the DSI according to the specified + * parameters in the DSI_InitTypeDef and create the associated handle. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param PLLInit pointer to a DSI_PLLInitTypeDef structure that contains + * the PLL Clock structure definition for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Init(DSI_HandleTypeDef *hdsi, DSI_PLLInitTypeDef *PLLInit) +{ + uint32_t tickstart; + uint32_t unitIntervalx4; + uint32_t tempIDF; + + /* Check the DSI handle allocation */ + if (hdsi == NULL) + { + return HAL_ERROR; + } + + /* Check function parameters */ + assert_param(IS_DSI_PLL_NDIV(PLLInit->PLLNDIV)); + assert_param(IS_DSI_PLL_IDF(PLLInit->PLLIDF)); + assert_param(IS_DSI_PLL_ODF(PLLInit->PLLODF)); + assert_param(IS_DSI_AUTO_CLKLANE_CONTROL(hdsi->Init.AutomaticClockLaneControl)); + assert_param(IS_DSI_NUMBER_OF_LANES(hdsi->Init.NumberOfLanes)); + +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + if (hdsi->State == HAL_DSI_STATE_RESET) + { + /* Reset the DSI callback to the legacy weak callbacks */ + hdsi->TearingEffectCallback = HAL_DSI_TearingEffectCallback; /* Legacy weak TearingEffectCallback */ + hdsi->EndOfRefreshCallback = HAL_DSI_EndOfRefreshCallback; /* Legacy weak EndOfRefreshCallback */ + hdsi->ErrorCallback = HAL_DSI_ErrorCallback; /* Legacy weak ErrorCallback */ + + if (hdsi->MspInitCallback == NULL) + { + hdsi->MspInitCallback = HAL_DSI_MspInit; + } + /* Initialize the low level hardware */ + hdsi->MspInitCallback(hdsi); + } +#else + if (hdsi->State == HAL_DSI_STATE_RESET) + { + /* Initialize the low level hardware */ + HAL_DSI_MspInit(hdsi); + } +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + + /* Change DSI peripheral state */ + hdsi->State = HAL_DSI_STATE_BUSY; + + /**************** Turn on the regulator and enable the DSI PLL ****************/ + + /* Enable the regulator */ + __HAL_DSI_REG_ENABLE(hdsi); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the regulator is ready */ + while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_RRS) == 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set the PLL division factors */ + hdsi->Instance->WRPCR &= ~(DSI_WRPCR_PLL_NDIV | DSI_WRPCR_PLL_IDF | DSI_WRPCR_PLL_ODF); + hdsi->Instance->WRPCR |= (((PLLInit->PLLNDIV) << 2U) | ((PLLInit->PLLIDF) << 11U) | ((PLLInit->PLLODF) << 16U)); + + /* Enable the DSI PLL */ + __HAL_DSI_PLL_ENABLE(hdsi); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for the lock of the PLL */ + while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_PLLLS) == 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /*************************** Set the PHY parameters ***************************/ + + /* D-PHY clock and digital enable*/ + hdsi->Instance->PCTLR |= (DSI_PCTLR_CKE | DSI_PCTLR_DEN); + + /* Clock lane configuration */ + hdsi->Instance->CLCR &= ~(DSI_CLCR_DPCC | DSI_CLCR_ACR); + hdsi->Instance->CLCR |= (DSI_CLCR_DPCC | hdsi->Init.AutomaticClockLaneControl); + + /* Configure the number of active data lanes */ + hdsi->Instance->PCONFR &= ~DSI_PCONFR_NL; + hdsi->Instance->PCONFR |= hdsi->Init.NumberOfLanes; + + /************************ Set the DSI clock parameters ************************/ + + /* Set the TX escape clock division factor */ + hdsi->Instance->CCR &= ~DSI_CCR_TXECKDIV; + hdsi->Instance->CCR |= hdsi->Init.TXEscapeCkdiv; + + /* Calculate the bit period in high-speed mode in unit of 0.25 ns (UIX4) */ + /* The equation is : UIX4 = IntegerPart( (1000/F_PHY_Mhz) * 4 ) */ + /* Where : F_PHY_Mhz = (NDIV * HSE_Mhz) / (IDF * ODF) */ + tempIDF = (PLLInit->PLLIDF > 0U) ? PLLInit->PLLIDF : 1U; + unitIntervalx4 = (4000000U * tempIDF * ((1UL << (0x3U & PLLInit->PLLODF)))) / ((HSE_VALUE / 1000U) * PLLInit->PLLNDIV); + + /* Set the bit period in high-speed mode */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_UIX4; + hdsi->Instance->WPCR[0U] |= unitIntervalx4; + + /****************************** Error management *****************************/ + + /* Disable all error interrupts and reset the Error Mask */ + hdsi->Instance->IER[0U] = 0U; + hdsi->Instance->IER[1U] = 0U; + hdsi->ErrorMsk = 0U; + + /* Initialise the error code */ + hdsi->ErrorCode = HAL_DSI_ERROR_NONE; + + /* Initialize the DSI state*/ + hdsi->State = HAL_DSI_STATE_READY; + + return HAL_OK; +} + +/** + * @brief De-initializes the DSI peripheral registers to their default reset + * values. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_DeInit(DSI_HandleTypeDef *hdsi) +{ + /* Check the DSI handle allocation */ + if (hdsi == NULL) + { + return HAL_ERROR; + } + + /* Change DSI peripheral state */ + hdsi->State = HAL_DSI_STATE_BUSY; + + /* Disable the DSI wrapper */ + __HAL_DSI_WRAPPER_DISABLE(hdsi); + + /* Disable the DSI host */ + __HAL_DSI_DISABLE(hdsi); + + /* D-PHY clock and digital disable */ + hdsi->Instance->PCTLR &= ~(DSI_PCTLR_CKE | DSI_PCTLR_DEN); + + /* Turn off the DSI PLL */ + __HAL_DSI_PLL_DISABLE(hdsi); + + /* Disable the regulator */ + __HAL_DSI_REG_DISABLE(hdsi); + +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + if (hdsi->MspDeInitCallback == NULL) + { + hdsi->MspDeInitCallback = HAL_DSI_MspDeInit; + } + /* DeInit the low level hardware */ + hdsi->MspDeInitCallback(hdsi); +#else + /* DeInit the low level hardware */ + HAL_DSI_MspDeInit(hdsi); +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + + /* Initialise the error code */ + hdsi->ErrorCode = HAL_DSI_ERROR_NONE; + + /* Initialize the DSI state*/ + hdsi->State = HAL_DSI_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Enable the error monitor flags + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ActiveErrors indicates which error interrupts will be enabled. + * This parameter can be any combination of @ref DSI_Error_Data_Type. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigErrorMonitor(DSI_HandleTypeDef *hdsi, uint32_t ActiveErrors) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + hdsi->Instance->IER[0U] = 0U; + hdsi->Instance->IER[1U] = 0U; + + /* Store active errors to the handle */ + hdsi->ErrorMsk = ActiveErrors; + + if ((ActiveErrors & HAL_DSI_ERROR_ACK) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[0U] |= DSI_ERROR_ACK_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_PHY) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[0U] |= DSI_ERROR_PHY_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_TX) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_TX_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_RX) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_RX_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_ECC) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_ECC_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_CRC) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_CRC_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_PSE) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_PSE_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_EOT) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_EOT_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_OVF) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_OVF_MASK; + } + + if ((ActiveErrors & HAL_DSI_ERROR_GEN) != 0U) + { + /* Enable the interrupt generation on selected errors */ + hdsi->Instance->IER[1U] |= DSI_ERROR_GEN_MASK; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Initializes the DSI MSP. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval None + */ +__weak void HAL_DSI_MspInit(DSI_HandleTypeDef *hdsi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdsi); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DSI_MspInit could be implemented in the user file + */ +} + +/** + * @brief De-initializes the DSI MSP. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval None + */ +__weak void HAL_DSI_MspDeInit(DSI_HandleTypeDef *hdsi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdsi); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DSI_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User DSI Callback + * To be used instead of the weak predefined callback + * @param hdsi dsi handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_DSI_TEARING_EFFECT_CB_ID Tearing Effect Callback ID + * @arg @ref HAL_DSI_ENDOF_REFRESH_CB_ID End Of Refresh Callback ID + * @arg @ref HAL_DSI_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_DSI_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_DSI_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_DSI_RegisterCallback(DSI_HandleTypeDef *hdsi, HAL_DSI_CallbackIDTypeDef CallbackID, + pDSI_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hdsi); + + if (hdsi->State == HAL_DSI_STATE_READY) + { + switch (CallbackID) + { + case HAL_DSI_TEARING_EFFECT_CB_ID : + hdsi->TearingEffectCallback = pCallback; + break; + + case HAL_DSI_ENDOF_REFRESH_CB_ID : + hdsi->EndOfRefreshCallback = pCallback; + break; + + case HAL_DSI_ERROR_CB_ID : + hdsi->ErrorCallback = pCallback; + break; + + case HAL_DSI_MSPINIT_CB_ID : + hdsi->MspInitCallback = pCallback; + break; + + case HAL_DSI_MSPDEINIT_CB_ID : + hdsi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hdsi->State == HAL_DSI_STATE_RESET) + { + switch (CallbackID) + { + case HAL_DSI_MSPINIT_CB_ID : + hdsi->MspInitCallback = pCallback; + break; + + case HAL_DSI_MSPDEINIT_CB_ID : + hdsi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdsi); + + return status; +} + +/** + * @brief Unregister a DSI Callback + * DSI callabck is redirected to the weak predefined callback + * @param hdsi dsi handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_DSI_TEARING_EFFECT_CB_ID Tearing Effect Callback ID + * @arg @ref HAL_DSI_ENDOF_REFRESH_CB_ID End Of Refresh Callback ID + * @arg @ref HAL_DSI_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_DSI_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_DSI_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_DSI_UnRegisterCallback(DSI_HandleTypeDef *hdsi, HAL_DSI_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdsi); + + if (hdsi->State == HAL_DSI_STATE_READY) + { + switch (CallbackID) + { + case HAL_DSI_TEARING_EFFECT_CB_ID : + hdsi->TearingEffectCallback = HAL_DSI_TearingEffectCallback; /* Legacy weak TearingEffectCallback */ + break; + + case HAL_DSI_ENDOF_REFRESH_CB_ID : + hdsi->EndOfRefreshCallback = HAL_DSI_EndOfRefreshCallback; /* Legacy weak EndOfRefreshCallback */ + break; + + case HAL_DSI_ERROR_CB_ID : + hdsi->ErrorCallback = HAL_DSI_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_DSI_MSPINIT_CB_ID : + hdsi->MspInitCallback = HAL_DSI_MspInit; /* Legcay weak MspInit Callback */ + break; + + case HAL_DSI_MSPDEINIT_CB_ID : + hdsi->MspDeInitCallback = HAL_DSI_MspDeInit; /* Legcay weak MspDeInit Callback */ + break; + + default : + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hdsi->State == HAL_DSI_STATE_RESET) + { + switch (CallbackID) + { + case HAL_DSI_MSPINIT_CB_ID : + hdsi->MspInitCallback = HAL_DSI_MspInit; /* Legcay weak MspInit Callback */ + break; + + case HAL_DSI_MSPDEINIT_CB_ID : + hdsi->MspDeInitCallback = HAL_DSI_MspDeInit; /* Legcay weak MspDeInit Callback */ + break; + + default : + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hdsi->ErrorCode |= HAL_DSI_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdsi); + + return status; +} +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup DSI_Group2 IO operation functions + * @brief IO operation functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides function allowing to: + (+) Handle DSI interrupt request + +@endverbatim + * @{ + */ +/** + * @brief Handles DSI interrupt request. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +void HAL_DSI_IRQHandler(DSI_HandleTypeDef *hdsi) +{ + uint32_t ErrorStatus0, ErrorStatus1; + + /* Tearing Effect Interrupt management ***************************************/ + if (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_TE) != 0U) + { + if (__HAL_DSI_GET_IT_SOURCE(hdsi, DSI_IT_TE) != 0U) + { + /* Clear the Tearing Effect Interrupt Flag */ + __HAL_DSI_CLEAR_FLAG(hdsi, DSI_FLAG_TE); + + /* Tearing Effect Callback */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + /*Call registered Tearing Effect callback */ + hdsi->TearingEffectCallback(hdsi); +#else + /*Call legacy Tearing Effect callback*/ + HAL_DSI_TearingEffectCallback(hdsi); +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + } + } + + /* End of Refresh Interrupt management ***************************************/ + if (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_ER) != 0U) + { + if (__HAL_DSI_GET_IT_SOURCE(hdsi, DSI_IT_ER) != 0U) + { + /* Clear the End of Refresh Interrupt Flag */ + __HAL_DSI_CLEAR_FLAG(hdsi, DSI_FLAG_ER); + + /* End of Refresh Callback */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + /*Call registered End of refresh callback */ + hdsi->EndOfRefreshCallback(hdsi); +#else + /*Call Legacy End of refresh callback */ + HAL_DSI_EndOfRefreshCallback(hdsi); +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + } + } + + /* Error Interrupts management ***********************************************/ + if (hdsi->ErrorMsk != 0U) + { + ErrorStatus0 = hdsi->Instance->ISR[0U]; + ErrorStatus0 &= hdsi->Instance->IER[0U]; + ErrorStatus1 = hdsi->Instance->ISR[1U]; + ErrorStatus1 &= hdsi->Instance->IER[1U]; + + if ((ErrorStatus0 & DSI_ERROR_ACK_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_ACK; + } + + if ((ErrorStatus0 & DSI_ERROR_PHY_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_PHY; + } + + if ((ErrorStatus1 & DSI_ERROR_TX_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_TX; + } + + if ((ErrorStatus1 & DSI_ERROR_RX_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_RX; + } + + if ((ErrorStatus1 & DSI_ERROR_ECC_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_ECC; + } + + if ((ErrorStatus1 & DSI_ERROR_CRC_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_CRC; + } + + if ((ErrorStatus1 & DSI_ERROR_PSE_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_PSE; + } + + if ((ErrorStatus1 & DSI_ERROR_EOT_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_EOT; + } + + if ((ErrorStatus1 & DSI_ERROR_OVF_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_OVF; + } + + if ((ErrorStatus1 & DSI_ERROR_GEN_MASK) != 0U) + { + hdsi->ErrorCode |= HAL_DSI_ERROR_GEN; + } + + /* Check only selected errors */ + if (hdsi->ErrorCode != HAL_DSI_ERROR_NONE) + { + /* DSI error interrupt callback */ +#if (USE_HAL_DSI_REGISTER_CALLBACKS == 1) + /*Call registered Error callback */ + hdsi->ErrorCallback(hdsi); +#else + /*Call Legacy Error callback */ + HAL_DSI_ErrorCallback(hdsi); +#endif /* USE_HAL_DSI_REGISTER_CALLBACKS */ + } + } +} + +/** + * @brief Tearing Effect DSI callback. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval None + */ +__weak void HAL_DSI_TearingEffectCallback(DSI_HandleTypeDef *hdsi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdsi); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DSI_TearingEffectCallback could be implemented in the user file + */ +} + +/** + * @brief End of Refresh DSI callback. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval None + */ +__weak void HAL_DSI_EndOfRefreshCallback(DSI_HandleTypeDef *hdsi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdsi); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DSI_EndOfRefreshCallback could be implemented in the user file + */ +} + +/** + * @brief Operation Error DSI callback. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval None + */ +__weak void HAL_DSI_ErrorCallback(DSI_HandleTypeDef *hdsi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdsi); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_DSI_ErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup DSI_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the Generic interface read-back Virtual Channel ID + (+) Select video mode and configure the corresponding parameters + (+) Configure command transmission mode: High-speed or Low-power + (+) Configure the flow control + (+) Configure the DSI PHY timer + (+) Configure the DSI HOST timeout + (+) Configure the DSI HOST timeout + (+) Start/Stop the DSI module + (+) Refresh the display in command mode + (+) Controls the display color mode in Video mode + (+) Control the display shutdown in Video mode + (+) write short DCS or short Generic command + (+) write long DCS or long Generic command + (+) Read command (DCS or generic) + (+) Enter/Exit the Ultra Low Power Mode on data only (D-PHY PLL running) + (+) Enter/Exit the Ultra Low Power Mode on data only and clock (D-PHY PLL turned off) + (+) Start/Stop test pattern generation + (+) Slew-Rate And Delay Tuning + (+) Low-Power Reception Filter Tuning + (+) Activate an additional current path on all lanes to meet the SDDTx parameter + (+) Custom lane pins configuration + (+) Set custom timing for the PHY + (+) Force the Clock/Data Lane in TX Stop Mode + (+) Force LP Receiver in Low-Power Mode + (+) Force Data Lanes in RX Mode after a BTA + (+) Enable a pull-down on the lanes to prevent from floating states when unused + (+) Switch off the contention detection on data lanes + +@endverbatim + * @{ + */ + +/** + * @brief Configure the Generic interface read-back Virtual Channel ID. + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param VirtualChannelID Virtual channel ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetGenericVCID(DSI_HandleTypeDef *hdsi, uint32_t VirtualChannelID) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Update the GVCID register */ + hdsi->Instance->GVCIDR &= ~DSI_GVCIDR_VCID; + hdsi->Instance->GVCIDR |= VirtualChannelID; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Select video mode and configure the corresponding parameters + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param VidCfg pointer to a DSI_VidCfgTypeDef structure that contains + * the DSI video mode configuration parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigVideoMode(DSI_HandleTypeDef *hdsi, DSI_VidCfgTypeDef *VidCfg) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_COLOR_CODING(VidCfg->ColorCoding)); + assert_param(IS_DSI_VIDEO_MODE_TYPE(VidCfg->Mode)); + assert_param(IS_DSI_LP_COMMAND(VidCfg->LPCommandEnable)); + assert_param(IS_DSI_LP_HFP(VidCfg->LPHorizontalFrontPorchEnable)); + assert_param(IS_DSI_LP_HBP(VidCfg->LPHorizontalBackPorchEnable)); + assert_param(IS_DSI_LP_VACTIVE(VidCfg->LPVerticalActiveEnable)); + assert_param(IS_DSI_LP_VFP(VidCfg->LPVerticalFrontPorchEnable)); + assert_param(IS_DSI_LP_VBP(VidCfg->LPVerticalBackPorchEnable)); + assert_param(IS_DSI_LP_VSYNC(VidCfg->LPVerticalSyncActiveEnable)); + assert_param(IS_DSI_FBTAA(VidCfg->FrameBTAAcknowledgeEnable)); + assert_param(IS_DSI_DE_POLARITY(VidCfg->DEPolarity)); + assert_param(IS_DSI_VSYNC_POLARITY(VidCfg->VSPolarity)); + assert_param(IS_DSI_HSYNC_POLARITY(VidCfg->HSPolarity)); + /* Check the LooselyPacked variant only in 18-bit mode */ + if (VidCfg->ColorCoding == DSI_RGB666) + { + assert_param(IS_DSI_LOOSELY_PACKED(VidCfg->LooselyPacked)); + } + + /* Select video mode by resetting CMDM and DSIM bits */ + hdsi->Instance->MCR &= ~DSI_MCR_CMDM; + hdsi->Instance->WCFGR &= ~DSI_WCFGR_DSIM; + + /* Configure the video mode transmission type */ + hdsi->Instance->VMCR &= ~DSI_VMCR_VMT; + hdsi->Instance->VMCR |= VidCfg->Mode; + + /* Configure the video packet size */ + hdsi->Instance->VPCR &= ~DSI_VPCR_VPSIZE; + hdsi->Instance->VPCR |= VidCfg->PacketSize; + + /* Set the chunks number to be transmitted through the DSI link */ + hdsi->Instance->VCCR &= ~DSI_VCCR_NUMC; + hdsi->Instance->VCCR |= VidCfg->NumberOfChunks; + + /* Set the size of the null packet */ + hdsi->Instance->VNPCR &= ~DSI_VNPCR_NPSIZE; + hdsi->Instance->VNPCR |= VidCfg->NullPacketSize; + + /* Select the virtual channel for the LTDC interface traffic */ + hdsi->Instance->LVCIDR &= ~DSI_LVCIDR_VCID; + hdsi->Instance->LVCIDR |= VidCfg->VirtualChannelID; + + /* Configure the polarity of control signals */ + hdsi->Instance->LPCR &= ~(DSI_LPCR_DEP | DSI_LPCR_VSP | DSI_LPCR_HSP); + hdsi->Instance->LPCR |= (VidCfg->DEPolarity | VidCfg->VSPolarity | VidCfg->HSPolarity); + + /* Select the color coding for the host */ + hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_COLC; + hdsi->Instance->LCOLCR |= VidCfg->ColorCoding; + + /* Select the color coding for the wrapper */ + hdsi->Instance->WCFGR &= ~DSI_WCFGR_COLMUX; + hdsi->Instance->WCFGR |= ((VidCfg->ColorCoding) << 1U); + + /* Enable/disable the loosely packed variant to 18-bit configuration */ + if (VidCfg->ColorCoding == DSI_RGB666) + { + hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_LPE; + hdsi->Instance->LCOLCR |= VidCfg->LooselyPacked; + } + + /* Set the Horizontal Synchronization Active (HSA) in lane byte clock cycles */ + hdsi->Instance->VHSACR &= ~DSI_VHSACR_HSA; + hdsi->Instance->VHSACR |= VidCfg->HorizontalSyncActive; + + /* Set the Horizontal Back Porch (HBP) in lane byte clock cycles */ + hdsi->Instance->VHBPCR &= ~DSI_VHBPCR_HBP; + hdsi->Instance->VHBPCR |= VidCfg->HorizontalBackPorch; + + /* Set the total line time (HLINE=HSA+HBP+HACT+HFP) in lane byte clock cycles */ + hdsi->Instance->VLCR &= ~DSI_VLCR_HLINE; + hdsi->Instance->VLCR |= VidCfg->HorizontalLine; + + /* Set the Vertical Synchronization Active (VSA) */ + hdsi->Instance->VVSACR &= ~DSI_VVSACR_VSA; + hdsi->Instance->VVSACR |= VidCfg->VerticalSyncActive; + + /* Set the Vertical Back Porch (VBP)*/ + hdsi->Instance->VVBPCR &= ~DSI_VVBPCR_VBP; + hdsi->Instance->VVBPCR |= VidCfg->VerticalBackPorch; + + /* Set the Vertical Front Porch (VFP)*/ + hdsi->Instance->VVFPCR &= ~DSI_VVFPCR_VFP; + hdsi->Instance->VVFPCR |= VidCfg->VerticalFrontPorch; + + /* Set the Vertical Active period*/ + hdsi->Instance->VVACR &= ~DSI_VVACR_VA; + hdsi->Instance->VVACR |= VidCfg->VerticalActive; + + /* Configure the command transmission mode */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPCE; + hdsi->Instance->VMCR |= VidCfg->LPCommandEnable; + + /* Low power largest packet size */ + hdsi->Instance->LPMCR &= ~DSI_LPMCR_LPSIZE; + hdsi->Instance->LPMCR |= ((VidCfg->LPLargestPacketSize) << 16U); + + /* Low power VACT largest packet size */ + hdsi->Instance->LPMCR &= ~DSI_LPMCR_VLPSIZE; + hdsi->Instance->LPMCR |= VidCfg->LPVACTLargestPacketSize; + + /* Enable LP transition in HFP period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPHFPE; + hdsi->Instance->VMCR |= VidCfg->LPHorizontalFrontPorchEnable; + + /* Enable LP transition in HBP period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPHBPE; + hdsi->Instance->VMCR |= VidCfg->LPHorizontalBackPorchEnable; + + /* Enable LP transition in VACT period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPVAE; + hdsi->Instance->VMCR |= VidCfg->LPVerticalActiveEnable; + + /* Enable LP transition in VFP period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPVFPE; + hdsi->Instance->VMCR |= VidCfg->LPVerticalFrontPorchEnable; + + /* Enable LP transition in VBP period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPVBPE; + hdsi->Instance->VMCR |= VidCfg->LPVerticalBackPorchEnable; + + /* Enable LP transition in vertical sync period */ + hdsi->Instance->VMCR &= ~DSI_VMCR_LPVSAE; + hdsi->Instance->VMCR |= VidCfg->LPVerticalSyncActiveEnable; + + /* Enable the request for an acknowledge response at the end of a frame */ + hdsi->Instance->VMCR &= ~DSI_VMCR_FBTAAE; + hdsi->Instance->VMCR |= VidCfg->FrameBTAAcknowledgeEnable; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Select adapted command mode and configure the corresponding parameters + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param CmdCfg pointer to a DSI_CmdCfgTypeDef structure that contains + * the DSI command mode configuration parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigAdaptedCommandMode(DSI_HandleTypeDef *hdsi, DSI_CmdCfgTypeDef *CmdCfg) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_COLOR_CODING(CmdCfg->ColorCoding)); + assert_param(IS_DSI_TE_SOURCE(CmdCfg->TearingEffectSource)); + assert_param(IS_DSI_TE_POLARITY(CmdCfg->TearingEffectPolarity)); + assert_param(IS_DSI_AUTOMATIC_REFRESH(CmdCfg->AutomaticRefresh)); + assert_param(IS_DSI_VS_POLARITY(CmdCfg->VSyncPol)); + assert_param(IS_DSI_TE_ACK_REQUEST(CmdCfg->TEAcknowledgeRequest)); + assert_param(IS_DSI_DE_POLARITY(CmdCfg->DEPolarity)); + assert_param(IS_DSI_VSYNC_POLARITY(CmdCfg->VSPolarity)); + assert_param(IS_DSI_HSYNC_POLARITY(CmdCfg->HSPolarity)); + + /* Select command mode by setting CMDM and DSIM bits */ + hdsi->Instance->MCR |= DSI_MCR_CMDM; + hdsi->Instance->WCFGR &= ~DSI_WCFGR_DSIM; + hdsi->Instance->WCFGR |= DSI_WCFGR_DSIM; + + /* Select the virtual channel for the LTDC interface traffic */ + hdsi->Instance->LVCIDR &= ~DSI_LVCIDR_VCID; + hdsi->Instance->LVCIDR |= CmdCfg->VirtualChannelID; + + /* Configure the polarity of control signals */ + hdsi->Instance->LPCR &= ~(DSI_LPCR_DEP | DSI_LPCR_VSP | DSI_LPCR_HSP); + hdsi->Instance->LPCR |= (CmdCfg->DEPolarity | CmdCfg->VSPolarity | CmdCfg->HSPolarity); + + /* Select the color coding for the host */ + hdsi->Instance->LCOLCR &= ~DSI_LCOLCR_COLC; + hdsi->Instance->LCOLCR |= CmdCfg->ColorCoding; + + /* Select the color coding for the wrapper */ + hdsi->Instance->WCFGR &= ~DSI_WCFGR_COLMUX; + hdsi->Instance->WCFGR |= ((CmdCfg->ColorCoding) << 1U); + + /* Configure the maximum allowed size for write memory command */ + hdsi->Instance->LCCR &= ~DSI_LCCR_CMDSIZE; + hdsi->Instance->LCCR |= CmdCfg->CommandSize; + + /* Configure the tearing effect source and polarity and select the refresh mode */ + hdsi->Instance->WCFGR &= ~(DSI_WCFGR_TESRC | DSI_WCFGR_TEPOL | DSI_WCFGR_AR | DSI_WCFGR_VSPOL); + hdsi->Instance->WCFGR |= (CmdCfg->TearingEffectSource | CmdCfg->TearingEffectPolarity | CmdCfg->AutomaticRefresh | + CmdCfg->VSyncPol); + + /* Configure the tearing effect acknowledge request */ + hdsi->Instance->CMCR &= ~DSI_CMCR_TEARE; + hdsi->Instance->CMCR |= CmdCfg->TEAcknowledgeRequest; + + /* Enable the Tearing Effect interrupt */ + __HAL_DSI_ENABLE_IT(hdsi, DSI_IT_TE); + + /* Enable the End of Refresh interrupt */ + __HAL_DSI_ENABLE_IT(hdsi, DSI_IT_ER); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Configure command transmission mode: High-speed or Low-power + * and enable/disable acknowledge request after packet transmission + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param LPCmd pointer to a DSI_LPCmdTypeDef structure that contains + * the DSI command transmission mode configuration parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigCommand(DSI_HandleTypeDef *hdsi, DSI_LPCmdTypeDef *LPCmd) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + assert_param(IS_DSI_LP_GSW0P(LPCmd->LPGenShortWriteNoP)); + assert_param(IS_DSI_LP_GSW1P(LPCmd->LPGenShortWriteOneP)); + assert_param(IS_DSI_LP_GSW2P(LPCmd->LPGenShortWriteTwoP)); + assert_param(IS_DSI_LP_GSR0P(LPCmd->LPGenShortReadNoP)); + assert_param(IS_DSI_LP_GSR1P(LPCmd->LPGenShortReadOneP)); + assert_param(IS_DSI_LP_GSR2P(LPCmd->LPGenShortReadTwoP)); + assert_param(IS_DSI_LP_GLW(LPCmd->LPGenLongWrite)); + assert_param(IS_DSI_LP_DSW0P(LPCmd->LPDcsShortWriteNoP)); + assert_param(IS_DSI_LP_DSW1P(LPCmd->LPDcsShortWriteOneP)); + assert_param(IS_DSI_LP_DSR0P(LPCmd->LPDcsShortReadNoP)); + assert_param(IS_DSI_LP_DLW(LPCmd->LPDcsLongWrite)); + assert_param(IS_DSI_LP_MRDP(LPCmd->LPMaxReadPacket)); + assert_param(IS_DSI_ACK_REQUEST(LPCmd->AcknowledgeRequest)); + + /* Select High-speed or Low-power for command transmission */ + hdsi->Instance->CMCR &= ~(DSI_CMCR_GSW0TX | \ + DSI_CMCR_GSW1TX | \ + DSI_CMCR_GSW2TX | \ + DSI_CMCR_GSR0TX | \ + DSI_CMCR_GSR1TX | \ + DSI_CMCR_GSR2TX | \ + DSI_CMCR_GLWTX | \ + DSI_CMCR_DSW0TX | \ + DSI_CMCR_DSW1TX | \ + DSI_CMCR_DSR0TX | \ + DSI_CMCR_DLWTX | \ + DSI_CMCR_MRDPS); + hdsi->Instance->CMCR |= (LPCmd->LPGenShortWriteNoP | \ + LPCmd->LPGenShortWriteOneP | \ + LPCmd->LPGenShortWriteTwoP | \ + LPCmd->LPGenShortReadNoP | \ + LPCmd->LPGenShortReadOneP | \ + LPCmd->LPGenShortReadTwoP | \ + LPCmd->LPGenLongWrite | \ + LPCmd->LPDcsShortWriteNoP | \ + LPCmd->LPDcsShortWriteOneP | \ + LPCmd->LPDcsShortReadNoP | \ + LPCmd->LPDcsLongWrite | \ + LPCmd->LPMaxReadPacket); + + /* Configure the acknowledge request after each packet transmission */ + hdsi->Instance->CMCR &= ~DSI_CMCR_ARE; + hdsi->Instance->CMCR |= LPCmd->AcknowledgeRequest; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Configure the flow control parameters + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param FlowControl flow control feature(s) to be enabled. + * This parameter can be any combination of @ref DSI_FlowControl. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigFlowControl(DSI_HandleTypeDef *hdsi, uint32_t FlowControl) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_FLOW_CONTROL(FlowControl)); + + /* Set the DSI Host Protocol Configuration Register */ + hdsi->Instance->PCR &= ~DSI_FLOW_CONTROL_ALL; + hdsi->Instance->PCR |= FlowControl; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Configure the DSI PHY timer parameters + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param PhyTimers DSI_PHY_TimerTypeDef structure that contains + * the DSI PHY timing parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigPhyTimer(DSI_HandleTypeDef *hdsi, DSI_PHY_TimerTypeDef *PhyTimers) +{ + uint32_t maxTime; + /* Process locked */ + __HAL_LOCK(hdsi); + + maxTime = (PhyTimers->ClockLaneLP2HSTime > PhyTimers->ClockLaneHS2LPTime) ? PhyTimers->ClockLaneLP2HSTime : + PhyTimers->ClockLaneHS2LPTime; + + /* Clock lane timer configuration */ + + /* In Automatic Clock Lane control mode, the DSI Host can turn off the clock lane between two + High-Speed transmission. + To do so, the DSI Host calculates the time required for the clock lane to change from HighSpeed + to Low-Power and from Low-Power to High-Speed. + This timings are configured by the HS2LP_TIME and LP2HS_TIME in the DSI Host Clock Lane Timer Configuration Register (DSI_CLTCR). + But the DSI Host is not calculating LP2HS_TIME + HS2LP_TIME but 2 x HS2LP_TIME. + + Workaround : Configure HS2LP_TIME and LP2HS_TIME with the same value being the max of HS2LP_TIME or LP2HS_TIME. + */ + hdsi->Instance->CLTCR &= ~(DSI_CLTCR_LP2HS_TIME | DSI_CLTCR_HS2LP_TIME); + hdsi->Instance->CLTCR |= (maxTime | ((maxTime) << 16U)); + + /* Data lane timer configuration */ + hdsi->Instance->DLTCR &= ~(DSI_DLTCR_MRD_TIME | DSI_DLTCR_LP2HS_TIME | DSI_DLTCR_HS2LP_TIME); + hdsi->Instance->DLTCR |= (PhyTimers->DataLaneMaxReadTime | ((PhyTimers->DataLaneLP2HSTime) << 16U) | (( + PhyTimers->DataLaneHS2LPTime) << 24U)); + + /* Configure the wait period to request HS transmission after a stop state */ + hdsi->Instance->PCONFR &= ~DSI_PCONFR_SW_TIME; + hdsi->Instance->PCONFR |= ((PhyTimers->StopWaitTime) << 8U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Configure the DSI HOST timeout parameters + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param HostTimeouts DSI_HOST_TimeoutTypeDef structure that contains + * the DSI host timeout parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ConfigHostTimeouts(DSI_HandleTypeDef *hdsi, DSI_HOST_TimeoutTypeDef *HostTimeouts) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Set the timeout clock division factor */ + hdsi->Instance->CCR &= ~DSI_CCR_TOCKDIV; + hdsi->Instance->CCR |= ((HostTimeouts->TimeoutCkdiv) << 8U); + + /* High-speed transmission timeout */ + hdsi->Instance->TCCR[0U] &= ~DSI_TCCR0_HSTX_TOCNT; + hdsi->Instance->TCCR[0U] |= ((HostTimeouts->HighSpeedTransmissionTimeout) << 16U); + + /* Low-power reception timeout */ + hdsi->Instance->TCCR[0U] &= ~DSI_TCCR0_LPRX_TOCNT; + hdsi->Instance->TCCR[0U] |= HostTimeouts->LowPowerReceptionTimeout; + + /* High-speed read timeout */ + hdsi->Instance->TCCR[1U] &= ~DSI_TCCR1_HSRD_TOCNT; + hdsi->Instance->TCCR[1U] |= HostTimeouts->HighSpeedReadTimeout; + + /* Low-power read timeout */ + hdsi->Instance->TCCR[2U] &= ~DSI_TCCR2_LPRD_TOCNT; + hdsi->Instance->TCCR[2U] |= HostTimeouts->LowPowerReadTimeout; + + /* High-speed write timeout */ + hdsi->Instance->TCCR[3U] &= ~DSI_TCCR3_HSWR_TOCNT; + hdsi->Instance->TCCR[3U] |= HostTimeouts->HighSpeedWriteTimeout; + + /* High-speed write presp mode */ + hdsi->Instance->TCCR[3U] &= ~DSI_TCCR3_PM; + hdsi->Instance->TCCR[3U] |= HostTimeouts->HighSpeedWritePrespMode; + + /* Low-speed write timeout */ + hdsi->Instance->TCCR[4U] &= ~DSI_TCCR4_LPWR_TOCNT; + hdsi->Instance->TCCR[4U] |= HostTimeouts->LowPowerWriteTimeout; + + /* BTA timeout */ + hdsi->Instance->TCCR[5U] &= ~DSI_TCCR5_BTA_TOCNT; + hdsi->Instance->TCCR[5U] |= HostTimeouts->BTATimeout; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Start the DSI module + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Start(DSI_HandleTypeDef *hdsi) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Enable the DSI host */ + __HAL_DSI_ENABLE(hdsi); + + /* Enable the DSI wrapper */ + __HAL_DSI_WRAPPER_ENABLE(hdsi); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Stop the DSI module + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Stop(DSI_HandleTypeDef *hdsi) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Disable the DSI host */ + __HAL_DSI_DISABLE(hdsi); + + /* Disable the DSI wrapper */ + __HAL_DSI_WRAPPER_DISABLE(hdsi); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Refresh the display in command mode + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Refresh(DSI_HandleTypeDef *hdsi) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Update the display */ + hdsi->Instance->WCR |= DSI_WCR_LTDCEN; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Controls the display color mode in Video mode + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ColorMode Color mode (full or 8-colors). + * This parameter can be any value of @ref DSI_Color_Mode + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ColorMode(DSI_HandleTypeDef *hdsi, uint32_t ColorMode) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_COLOR_MODE(ColorMode)); + + /* Update the display color mode */ + hdsi->Instance->WCR &= ~DSI_WCR_COLM; + hdsi->Instance->WCR |= ColorMode; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Control the display shutdown in Video mode + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param Shutdown Shut-down (Display-ON or Display-OFF). + * This parameter can be any value of @ref DSI_ShutDown + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Shutdown(DSI_HandleTypeDef *hdsi, uint32_t Shutdown) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_SHUT_DOWN(Shutdown)); + + /* Update the display Shutdown */ + hdsi->Instance->WCR &= ~DSI_WCR_SHTDN; + hdsi->Instance->WCR |= Shutdown; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief write short DCS or short Generic command + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ChannelID Virtual channel ID. + * @param Mode DSI short packet data type. + * This parameter can be any value of @ref DSI_SHORT_WRITE_PKT_Data_Type. + * @param Param1 DSC command or first generic parameter. + * This parameter can be any value of @ref DSI_DCS_Command or a + * generic command code. + * @param Param2 DSC parameter or second generic parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ShortWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t Param1, + uint32_t Param2) +{ + HAL_StatusTypeDef status; + /* Check the parameters */ + assert_param(IS_DSI_SHORT_WRITE_PACKET_TYPE(Mode)); + + /* Process locked */ + __HAL_LOCK(hdsi); + + status = DSI_ShortWrite(hdsi, ChannelID, Mode, Param1, Param2); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return status; +} + +/** + * @brief write long DCS or long Generic command + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ChannelID Virtual channel ID. + * @param Mode DSI long packet data type. + * This parameter can be any value of @ref DSI_LONG_WRITE_PKT_Data_Type. + * @param NbParams Number of parameters. + * @param Param1 DSC command or first generic parameter. + * This parameter can be any value of @ref DSI_DCS_Command or a + * generic command code + * @param ParametersTable Pointer to parameter values table. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_LongWrite(DSI_HandleTypeDef *hdsi, + uint32_t ChannelID, + uint32_t Mode, + uint32_t NbParams, + uint32_t Param1, + uint8_t *ParametersTable) +{ + uint32_t uicounter, nbBytes, count; + uint32_t tickstart; + uint32_t fifoword; + uint8_t *pparams = ParametersTable; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_LONG_WRITE_PACKET_TYPE(Mode)); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for Command FIFO Empty */ + while ((hdsi->Instance->GPSR & DSI_GPSR_CMDFE) == 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + + /* Set the DCS code on payload byte 1, and the other parameters on the write FIFO command*/ + fifoword = Param1; + nbBytes = (NbParams < 3U) ? NbParams : 3U; + + for (count = 0U; count < nbBytes; count++) + { + fifoword |= (((uint32_t)(*(pparams + count))) << (8U + (8U * count))); + } + hdsi->Instance->GPDR = fifoword; + + uicounter = NbParams - nbBytes; + pparams += nbBytes; + /* Set the Next parameters on the write FIFO command*/ + while (uicounter != 0U) + { + nbBytes = (uicounter < 4U) ? uicounter : 4U; + fifoword = 0U; + for (count = 0U; count < nbBytes; count++) + { + fifoword |= (((uint32_t)(*(pparams + count))) << (8U * count)); + } + hdsi->Instance->GPDR = fifoword; + + uicounter -= nbBytes; + pparams += nbBytes; + } + + /* Configure the packet to send a long DCS command */ + DSI_ConfigPacketHeader(hdsi->Instance, + ChannelID, + Mode, + ((NbParams + 1U) & 0x00FFU), + (((NbParams + 1U) & 0xFF00U) >> 8U)); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Read command (DCS or generic) + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param ChannelNbr Virtual channel ID + * @param Array pointer to a buffer to store the payload of a read back operation. + * @param Size Data size to be read (in byte). + * @param Mode DSI read packet data type. + * This parameter can be any value of @ref DSI_SHORT_READ_PKT_Data_Type. + * @param DCSCmd DCS get/read command. + * @param ParametersTable Pointer to parameter values table. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_Read(DSI_HandleTypeDef *hdsi, + uint32_t ChannelNbr, + uint8_t *Array, + uint32_t Size, + uint32_t Mode, + uint32_t DCSCmd, + uint8_t *ParametersTable) +{ + uint32_t tickstart; + uint8_t *pdata = Array; + uint32_t datasize = Size; + uint32_t fifoword; + uint32_t nbbytes; + uint32_t count; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check the parameters */ + assert_param(IS_DSI_READ_PACKET_TYPE(Mode)); + + if (datasize > 2U) + { + /* set max return packet size */ + if (DSI_ShortWrite(hdsi, ChannelNbr, DSI_MAX_RETURN_PKT_SIZE, ((datasize) & 0xFFU), + (((datasize) >> 8U) & 0xFFU)) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + } + + /* Configure the packet to read command */ + if (Mode == DSI_DCS_SHORT_PKT_READ) + { + DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, DCSCmd, 0U); + } + else if (Mode == DSI_GEN_SHORT_PKT_READ_P0) + { + DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, 0U, 0U); + } + else if (Mode == DSI_GEN_SHORT_PKT_READ_P1) + { + DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, ParametersTable[0U], 0U); + } + else if (Mode == DSI_GEN_SHORT_PKT_READ_P2) + { + DSI_ConfigPacketHeader(hdsi->Instance, ChannelNbr, Mode, ParametersTable[0U], ParametersTable[1U]); + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* If DSI fifo is not empty, read requested bytes */ + while (((int32_t)(datasize)) > 0) + { + if ((hdsi->Instance->GPSR & DSI_GPSR_PRDFE) == 0U) + { + fifoword = hdsi->Instance->GPDR; + nbbytes = (datasize < 4U) ? datasize : 4U; + + for (count = 0U; count < nbbytes; count++) + { + *pdata = (uint8_t)(fifoword >> (8U * count)); + pdata++; + datasize--; + } + } + + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Enter the ULPM (Ultra Low Power Mode) with the D-PHY PLL running + * (only data lanes are in ULPM) + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_EnterULPMData(DSI_HandleTypeDef *hdsi) +{ + uint32_t tickstart; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* ULPS Request on Data Lanes */ + hdsi->Instance->PUCR |= DSI_PUCR_URDL; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the D-PHY active lanes enter into ULPM */ + if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) + { + while ((hdsi->Instance->PSR & DSI_PSR_UAN0) != 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1)) != 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Exit the ULPM (Ultra Low Power Mode) with the D-PHY PLL running + * (only data lanes are in ULPM) + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ExitULPMData(DSI_HandleTypeDef *hdsi) +{ + uint32_t tickstart; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Exit ULPS on Data Lanes */ + hdsi->Instance->PUCR |= DSI_PUCR_UEDL; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until all active lanes exit ULPM */ + if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) + { + while ((hdsi->Instance->PSR & DSI_PSR_UAN0) != DSI_PSR_UAN0) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1)) != (DSI_PSR_UAN0 | DSI_PSR_UAN1)) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* wait for 1 ms*/ + HAL_Delay(1U); + + /* De-assert the ULPM requests and the ULPM exit bits */ + hdsi->Instance->PUCR = 0U; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Enter the ULPM (Ultra Low Power Mode) with the D-PHY PLL turned off + * (both data and clock lanes are in ULPM) + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_EnterULPM(DSI_HandleTypeDef *hdsi) +{ + uint32_t tickstart; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Clock lane configuration: no more HS request */ + hdsi->Instance->CLCR &= ~DSI_CLCR_DPCC; + + /* Use system PLL as byte lane clock source before stopping DSIPHY clock source */ + __HAL_RCC_DSI_CONFIG(RCC_DSICLKSOURCE_PLLR); + + /* ULPS Request on Clock and Data Lanes */ + hdsi->Instance->PUCR |= (DSI_PUCR_URCL | DSI_PUCR_URDL); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until all active lanes exit ULPM */ + if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UANC)) != 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1 | DSI_PSR_UANC)) != 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* Turn off the DSI PLL */ + __HAL_DSI_PLL_DISABLE(hdsi); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Exit the ULPM (Ultra Low Power Mode) with the D-PHY PLL turned off + * (both data and clock lanes are in ULPM) + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ExitULPM(DSI_HandleTypeDef *hdsi) +{ + uint32_t tickstart; + + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Turn on the DSI PLL */ + __HAL_DSI_PLL_ENABLE(hdsi); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for the lock of the PLL */ + while (__HAL_DSI_GET_FLAG(hdsi, DSI_FLAG_PLLLS) == 0U) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + + /* Exit ULPS on Clock and Data Lanes */ + hdsi->Instance->PUCR |= (DSI_PUCR_UECL | DSI_PUCR_UEDL); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until all active lanes exit ULPM */ + if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_ONE_DATA_LANE) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UANC)) != (DSI_PSR_UAN0 | DSI_PSR_UANC)) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else if ((hdsi->Instance->PCONFR & DSI_PCONFR_NL) == DSI_TWO_DATA_LANES) + { + while ((hdsi->Instance->PSR & (DSI_PSR_UAN0 | DSI_PSR_UAN1 | DSI_PSR_UANC)) != (DSI_PSR_UAN0 | DSI_PSR_UAN1 | + DSI_PSR_UANC)) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > DSI_TIMEOUT_VALUE) + { + /* Process Unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_TIMEOUT; + } + } + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* wait for 1 ms */ + HAL_Delay(1U); + + /* De-assert the ULPM requests and the ULPM exit bits */ + hdsi->Instance->PUCR = 0U; + + /* Switch the lanbyteclock source in the RCC from system PLL to D-PHY */ + __HAL_RCC_DSI_CONFIG(RCC_DSICLKSOURCE_DSIPHY); + + /* Restore clock lane configuration to HS */ + hdsi->Instance->CLCR |= DSI_CLCR_DPCC; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Start test pattern generation + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param Mode Pattern generator mode + * This parameter can be one of the following values: + * 0 : Color bars (horizontal or vertical) + * 1 : BER pattern (vertical only) + * @param Orientation Pattern generator orientation + * This parameter can be one of the following values: + * 0 : Vertical color bars + * 1 : Horizontal color bars + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_PatternGeneratorStart(DSI_HandleTypeDef *hdsi, uint32_t Mode, uint32_t Orientation) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Configure pattern generator mode and orientation */ + hdsi->Instance->VMCR &= ~(DSI_VMCR_PGM | DSI_VMCR_PGO); + hdsi->Instance->VMCR |= ((Mode << 20U) | (Orientation << 24U)); + + /* Enable pattern generator by setting PGE bit */ + hdsi->Instance->VMCR |= DSI_VMCR_PGE; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Stop test pattern generation + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_PatternGeneratorStop(DSI_HandleTypeDef *hdsi) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Disable pattern generator by clearing PGE bit */ + hdsi->Instance->VMCR &= ~DSI_VMCR_PGE; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Set Slew-Rate And Delay Tuning + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param CommDelay Communication delay to be adjusted. + * This parameter can be any value of @ref DSI_Communication_Delay + * @param Lane select between clock or data lanes. + * This parameter can be any value of @ref DSI_Lane_Group + * @param Value Custom value of the slew-rate or delay + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetSlewRateAndDelayTuning(DSI_HandleTypeDef *hdsi, uint32_t CommDelay, uint32_t Lane, + uint32_t Value) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_DSI_COMMUNICATION_DELAY(CommDelay)); + assert_param(IS_DSI_LANE_GROUP(Lane)); + + switch (CommDelay) + { + case DSI_SLEW_RATE_HSTX: + if (Lane == DSI_CLOCK_LANE) + { + /* High-Speed Transmission Slew Rate Control on Clock Lane */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXSRCCL; + hdsi->Instance->WPCR[1U] |= Value << 16U; + } + else if (Lane == DSI_DATA_LANES) + { + /* High-Speed Transmission Slew Rate Control on Data Lanes */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXSRCDL; + hdsi->Instance->WPCR[1U] |= Value << 18U; + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + break; + case DSI_SLEW_RATE_LPTX: + if (Lane == DSI_CLOCK_LANE) + { + /* Low-Power transmission Slew Rate Compensation on Clock Lane */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPSRCCL; + hdsi->Instance->WPCR[1U] |= Value << 6U; + } + else if (Lane == DSI_DATA_LANES) + { + /* Low-Power transmission Slew Rate Compensation on Data Lanes */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPSRCDL; + hdsi->Instance->WPCR[1U] |= Value << 8U; + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + break; + case DSI_HS_DELAY: + if (Lane == DSI_CLOCK_LANE) + { + /* High-Speed Transmission Delay on Clock Lane */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXDCL; + hdsi->Instance->WPCR[1U] |= Value; + } + else if (Lane == DSI_DATA_LANES) + { + /* High-Speed Transmission Delay on Data Lanes */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_HSTXDDL; + hdsi->Instance->WPCR[1U] |= Value << 2U; + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + break; + default: + break; + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Low-Power Reception Filter Tuning + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param Frequency cutoff frequency of low-pass filter at the input of LPRX + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetLowPowerRXFilter(DSI_HandleTypeDef *hdsi, uint32_t Frequency) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Low-Power RX low-pass Filtering Tuning */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_LPRXFT; + hdsi->Instance->WPCR[1U] |= Frequency << 25U; + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Activate an additional current path on all lanes to meet the SDDTx parameter + * defined in the MIPI D-PHY specification + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetSDD(DSI_HandleTypeDef *hdsi, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_FUNCTIONAL_STATE(State)); + + /* Activate/Disactivate additional current path on all lanes */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_SDDC; + hdsi->Instance->WPCR[1U] |= ((uint32_t)State << 12U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Custom lane pins configuration + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param CustomLane Function to be applyed on selected lane. + * This parameter can be any value of @ref DSI_CustomLane + * @param Lane select between clock or data lane 0 or data lane 1. + * This parameter can be any value of @ref DSI_Lane_Select + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetLanePinsConfiguration(DSI_HandleTypeDef *hdsi, uint32_t CustomLane, uint32_t Lane, + FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_DSI_CUSTOM_LANE(CustomLane)); + assert_param(IS_DSI_LANE(Lane)); + assert_param(IS_FUNCTIONAL_STATE(State)); + + switch (CustomLane) + { + case DSI_SWAP_LANE_PINS: + if (Lane == DSI_CLK_LANE) + { + /* Swap pins on clock lane */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWCL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 6U); + } + else if (Lane == DSI_DATA_LANE0) + { + /* Swap pins on data lane 0 */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWDL0; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 7U); + } + else if (Lane == DSI_DATA_LANE1) + { + /* Swap pins on data lane 1 */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_SWDL1; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 8U); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + break; + case DSI_INVERT_HS_SIGNAL: + if (Lane == DSI_CLK_LANE) + { + /* Invert HS signal on clock lane */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSICL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 9U); + } + else if (Lane == DSI_DATA_LANE0) + { + /* Invert HS signal on data lane 0 */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSIDL0; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 10U); + } + else if (Lane == DSI_DATA_LANE1) + { + /* Invert HS signal on data lane 1 */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_HSIDL1; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 11U); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + break; + default: + break; + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Set custom timing for the PHY + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param Timing PHY timing to be adjusted. + * This parameter can be any value of @ref DSI_PHY_Timing + * @param State ENABLE or DISABLE + * @param Value Custom value of the timing + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetPHYTimings(DSI_HandleTypeDef *hdsi, uint32_t Timing, FunctionalState State, uint32_t Value) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_DSI_PHY_TIMING(Timing)); + assert_param(IS_FUNCTIONAL_STATE(State)); + + switch (Timing) + { + case DSI_TCLK_POST: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKPOSTEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 27U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[4U] &= ~DSI_WPCR4_TCLKPOST; + hdsi->Instance->WPCR[4U] |= Value & DSI_WPCR4_TCLKPOST; + } + + break; + case DSI_TLPX_CLK: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TLPXCEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 26U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_TLPXC; + hdsi->Instance->WPCR[3U] |= (Value << 24U) & DSI_WPCR3_TLPXC; + } + + break; + case DSI_THS_EXIT: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSEXITEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 25U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_THSEXIT; + hdsi->Instance->WPCR[3U] |= (Value << 16U) & DSI_WPCR3_THSEXIT; + } + + break; + case DSI_TLPX_DATA: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TLPXDEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 24U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_TLPXD; + hdsi->Instance->WPCR[3U] |= (Value << 8U) & DSI_WPCR3_TLPXD; + } + + break; + case DSI_THS_ZERO: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSZEROEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 23U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[3U] &= ~DSI_WPCR3_THSZERO; + hdsi->Instance->WPCR[3U] |= Value & DSI_WPCR3_THSZERO; + } + + break; + case DSI_THS_TRAIL: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSTRAILEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 22U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_THSTRAIL; + hdsi->Instance->WPCR[2U] |= (Value << 24U) & DSI_WPCR2_THSTRAIL; + } + + break; + case DSI_THS_PREPARE: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_THSPREPEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 21U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_THSPREP; + hdsi->Instance->WPCR[2U] |= (Value << 16U) & DSI_WPCR2_THSPREP; + } + + break; + case DSI_TCLK_ZERO: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKZEROEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 20U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_TCLKZERO; + hdsi->Instance->WPCR[2U] |= (Value << 8U) & DSI_WPCR2_TCLKZERO; + } + + break; + case DSI_TCLK_PREPARE: + /* Enable/Disable custom timing setting */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TCLKPREPEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 19U); + + if (State != DISABLE) + { + /* Set custom value */ + hdsi->Instance->WPCR[2U] &= ~DSI_WPCR2_TCLKPREP; + hdsi->Instance->WPCR[2U] |= Value & DSI_WPCR2_TCLKPREP; + } + + break; + default: + break; + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Force the Clock/Data Lane in TX Stop Mode + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param Lane select between clock or data lanes. + * This parameter can be any value of @ref DSI_Lane_Group + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ForceTXStopMode(DSI_HandleTypeDef *hdsi, uint32_t Lane, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_DSI_LANE_GROUP(Lane)); + assert_param(IS_FUNCTIONAL_STATE(State)); + + if (Lane == DSI_CLOCK_LANE) + { + /* Force/Unforce the Clock Lane in TX Stop Mode */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_FTXSMCL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 12U); + } + else if (Lane == DSI_DATA_LANES) + { + /* Force/Unforce the Data Lanes in TX Stop Mode */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_FTXSMDL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 13U); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_ERROR; + } + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Force LP Receiver in Low-Power Mode + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ForceRXLowPower(DSI_HandleTypeDef *hdsi, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_FUNCTIONAL_STATE(State)); + + /* Force/Unforce LP Receiver in Low-Power Mode */ + hdsi->Instance->WPCR[1U] &= ~DSI_WPCR1_FLPRXLPM; + hdsi->Instance->WPCR[1U] |= ((uint32_t)State << 22U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Force Data Lanes in RX Mode after a BTA + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_ForceDataLanesInRX(DSI_HandleTypeDef *hdsi, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_FUNCTIONAL_STATE(State)); + + /* Force Data Lanes in RX Mode */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_TDDL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 16U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Enable a pull-down on the lanes to prevent from floating states when unused + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetPullDown(DSI_HandleTypeDef *hdsi, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_FUNCTIONAL_STATE(State)); + + /* Enable/Disable pull-down on lanes */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_PDEN; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 18U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @brief Switch off the contention detection on data lanes + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @param State ENABLE or DISABLE + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DSI_SetContentionDetectionOff(DSI_HandleTypeDef *hdsi, FunctionalState State) +{ + /* Process locked */ + __HAL_LOCK(hdsi); + + /* Check function parameters */ + assert_param(IS_FUNCTIONAL_STATE(State)); + + /* Contention Detection on Data Lanes OFF */ + hdsi->Instance->WPCR[0U] &= ~DSI_WPCR0_CDOFFDL; + hdsi->Instance->WPCR[0U] |= ((uint32_t)State << 14U); + + /* Process unlocked */ + __HAL_UNLOCK(hdsi); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup DSI_Group4 Peripheral State and Errors functions + * @brief Peripheral State and Errors functions + * +@verbatim + =============================================================================== + ##### Peripheral State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the DSI state. + (+) Get error code. + +@endverbatim + * @{ + */ + +/** + * @brief Return the DSI state + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval HAL state + */ +HAL_DSI_StateTypeDef HAL_DSI_GetState(DSI_HandleTypeDef *hdsi) +{ + return hdsi->State; +} + +/** + * @brief Return the DSI error code + * @param hdsi pointer to a DSI_HandleTypeDef structure that contains + * the configuration information for the DSI. + * @retval DSI Error Code + */ +uint32_t HAL_DSI_GetError(DSI_HandleTypeDef *hdsi) +{ + /* Get the error code */ + return hdsi->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* DSI */ + +#endif /* HAL_DSI_MODULE_ENABLED */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c new file mode 100644 index 000000000..0dea1145c --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c @@ -0,0 +1,2291 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_eth.c + * @author MCD Application Team + * @brief ETH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Ethernet (ETH) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#)Declare a ETH_HandleTypeDef handle structure, for example: + ETH_HandleTypeDef heth; + + (#)Fill parameters of Init structure in heth handle + + (#)Call HAL_ETH_Init() API to initialize the Ethernet peripheral (MAC, DMA, ...) + + (#)Initialize the ETH low level resources through the HAL_ETH_MspInit() API: + (##) Enable the Ethernet interface clock using + (+++) __HAL_RCC_ETHMAC_CLK_ENABLE(); + (+++) __HAL_RCC_ETHMACTX_CLK_ENABLE(); + (+++) __HAL_RCC_ETHMACRX_CLK_ENABLE(); + + (##) Initialize the related GPIO clocks + (##) Configure Ethernet pin-out + (##) Configure Ethernet NVIC interrupt (IT mode) + + (#)Initialize Ethernet DMA Descriptors in chain mode and point to allocated buffers: + (##) HAL_ETH_DMATxDescListInit(); for Transmission process + (##) HAL_ETH_DMARxDescListInit(); for Reception process + + (#)Enable MAC and DMA transmission and reception: + (##) HAL_ETH_Start(); + + (#)Prepare ETH DMA TX Descriptors and give the hand to ETH DMA to transfer + the frame to MAC TX FIFO: + (##) HAL_ETH_TransmitFrame(); + + (#)Poll for a received frame in ETH RX DMA Descriptors and get received + frame parameters + (##) HAL_ETH_GetReceivedFrame(); (should be called into an infinite loop) + + (#) Get a received frame when an ETH RX interrupt occurs: + (##) HAL_ETH_GetReceivedFrame_IT(); (called in IT mode only) + + (#) Communicate with external PHY device: + (##) Read a specific register from the PHY + HAL_ETH_ReadPHYRegister(); + (##) Write data to a specific RHY register: + HAL_ETH_WritePHYRegister(); + + (#) Configure the Ethernet MAC after ETH peripheral initialization + HAL_ETH_ConfigMAC(); all MAC parameters should be filled. + + (#) Configure the Ethernet DMA after ETH peripheral initialization + HAL_ETH_ConfigDMA(); all DMA parameters should be filled. + +*** Callback registration *** + ============================================= + + The compilation define USE_HAL_ETH_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function @ref HAL_ETH_RegisterCallback() to register an interrupt callback. + + Function @ref HAL_ETH_RegisterCallback() allows to register following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) DMAErrorCallback : DMA Error Callback. + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_ETH_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_ETH_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) DMAErrorCallback : DMA Error Callback. + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + By default, after the HAL_ETH_Init and when the state is HAL_ETH_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_ETH_TxCpltCallback(), @ref HAL_ETH_RxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_ETH_Init/ @ref HAL_ETH_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the HAL_ETH_Init/ @ref HAL_ETH_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_ETH_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_ETH_STATE_READY or HAL_ETH_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_ETH_RegisterCallback() before calling @ref HAL_ETH_DeInit + or HAL_ETH_Init function. + + When The compilation define USE_HAL_ETH_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup ETH ETH + * @brief ETH HAL module driver + * @{ + */ + +#ifdef HAL_ETH_MODULE_ENABLED +#if defined (ETH) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup ETH_Private_Constants ETH Private Constants + * @{ + */ +#define ETH_TIMEOUT_SWRESET ((uint32_t)500) +#define ETH_TIMEOUT_LINKED_STATE ((uint32_t)5000) +#define ETH_TIMEOUT_AUTONEGO_COMPLETED ((uint32_t)5000) + +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup ETH_Private_Functions ETH Private Functions + * @{ + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth, uint32_t err); +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr); +static void ETH_MACReceptionEnable(ETH_HandleTypeDef *heth); +static void ETH_MACReceptionDisable(ETH_HandleTypeDef *heth); +static void ETH_MACTransmissionEnable(ETH_HandleTypeDef *heth); +static void ETH_MACTransmissionDisable(ETH_HandleTypeDef *heth); +static void ETH_DMATransmissionEnable(ETH_HandleTypeDef *heth); +static void ETH_DMATransmissionDisable(ETH_HandleTypeDef *heth); +static void ETH_DMAReceptionEnable(ETH_HandleTypeDef *heth); +static void ETH_DMAReceptionDisable(ETH_HandleTypeDef *heth); +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup ETH_Exported_Functions ETH Exported Functions + * @{ + */ + +/** @defgroup ETH_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the Ethernet peripheral + (+) De-initialize the Ethernet peripheral + + @endverbatim + * @{ + */ + +/** + * @brief Initializes the Ethernet MAC and DMA according to default + * parameters. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth) +{ + uint32_t tempreg = 0, phyreg = 0; + uint32_t hclk = 60000000; + uint32_t tickstart = 0; + uint32_t err = ETH_SUCCESS; + + /* Check the ETH peripheral state */ + if(heth == NULL) + { + return HAL_ERROR; + } + + /* Check parameters */ + assert_param(IS_ETH_AUTONEGOTIATION(heth->Init.AutoNegotiation)); + assert_param(IS_ETH_RX_MODE(heth->Init.RxMode)); + assert_param(IS_ETH_CHECKSUM_MODE(heth->Init.ChecksumMode)); + assert_param(IS_ETH_MEDIA_INTERFACE(heth->Init.MediaInterface)); + + if(heth->State == HAL_ETH_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + heth->Lock = HAL_UNLOCKED; +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + ETH_InitCallbacksToDefault(heth); + + if(heth->MspInitCallback == NULL) + { + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + heth->MspInitCallback = HAL_ETH_MspInit; + } + heth->MspInitCallback(heth); + +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspInit(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* Enable SYSCFG Clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Select MII or RMII Mode*/ + SYSCFG->PMC &= ~(SYSCFG_PMC_MII_RMII_SEL); + SYSCFG->PMC |= (uint32_t)heth->Init.MediaInterface; + + /* Ethernet Software reset */ + /* Set the SWR bit: resets all MAC subsystem internal registers and logic */ + /* After reset all the registers holds their respective reset values */ + (heth->Instance)->DMABMR |= ETH_DMABMR_SR; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for software reset */ + while (((heth->Instance)->DMABMR & ETH_DMABMR_SR) != (uint32_t)RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_SWRESET) + { + heth->State= HAL_ETH_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Note: The SWR is not performed if the ETH_RX_CLK or the ETH_TX_CLK are + not available, please check your external PHY or the IO configuration */ + + return HAL_TIMEOUT; + } + } + + /*-------------------------------- MAC Initialization ----------------------*/ + /* Get the ETHERNET MACMIIAR value */ + tempreg = (heth->Instance)->MACMIIAR; + /* Clear CSR Clock Range CR[2:0] bits */ + tempreg &= ETH_MACMIIAR_CR_MASK; + + /* Get hclk frequency value */ + hclk = HAL_RCC_GetHCLKFreq(); + + /* Set CR bits depending on hclk value */ + if((hclk >= 20000000)&&(hclk < 35000000)) + { + /* CSR Clock Range between 20-35 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div16; + } + else if((hclk >= 35000000)&&(hclk < 60000000)) + { + /* CSR Clock Range between 35-60 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div26; + } + else if((hclk >= 60000000)&&(hclk < 100000000)) + { + /* CSR Clock Range between 60-100 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div42; + } + else if((hclk >= 100000000)&&(hclk < 150000000)) + { + /* CSR Clock Range between 100-150 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div62; + } + else /* ((hclk >= 150000000)&&(hclk <= 216000000)) */ + { + /* CSR Clock Range between 150-216 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div102; + } + + /* Write to ETHERNET MAC MIIAR: Configure the ETHERNET CSR Clock Range */ + (heth->Instance)->MACMIIAR = (uint32_t)tempreg; + + /*-------------------- PHY initialization and configuration ----------------*/ + /* Put the PHY in reset mode */ + if((HAL_ETH_WritePHYRegister(heth, PHY_BCR, PHY_RESET)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Delay to assure PHY reset */ + HAL_Delay(PHY_RESET_DELAY); + + if((heth->Init).AutoNegotiation != ETH_AUTONEGOTIATION_DISABLE) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* We wait for linked status */ + do + { + HAL_ETH_ReadPHYRegister(heth, PHY_BSR, &phyreg); + + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_LINKED_STATE) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + } while (((phyreg & PHY_LINKED_STATUS) != PHY_LINKED_STATUS)); + + + /* Enable Auto-Negotiation */ + if((HAL_ETH_WritePHYRegister(heth, PHY_BCR, PHY_AUTONEGOTIATION)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the auto-negotiation will be completed */ + do + { + HAL_ETH_ReadPHYRegister(heth, PHY_BSR, &phyreg); + + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_AUTONEGO_COMPLETED) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + } while (((phyreg & PHY_AUTONEGO_COMPLETE) != PHY_AUTONEGO_COMPLETE)); + + /* Read the result of the auto-negotiation */ + if((HAL_ETH_ReadPHYRegister(heth, PHY_SR, &phyreg)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Configure the MAC with the Duplex Mode fixed by the auto-negotiation process */ + if((phyreg & PHY_DUPLEX_STATUS) != (uint32_t)RESET) + { + /* Set Ethernet duplex mode to Full-duplex following the auto-negotiation */ + (heth->Init).DuplexMode = ETH_MODE_FULLDUPLEX; + } + else + { + /* Set Ethernet duplex mode to Half-duplex following the auto-negotiation */ + (heth->Init).DuplexMode = ETH_MODE_HALFDUPLEX; + } + /* Configure the MAC with the speed fixed by the auto-negotiation process */ + if((phyreg & PHY_SPEED_STATUS) == PHY_SPEED_STATUS) + { + /* Set Ethernet speed to 10M following the auto-negotiation */ + (heth->Init).Speed = ETH_SPEED_10M; + } + else + { + /* Set Ethernet speed to 100M following the auto-negotiation */ + (heth->Init).Speed = ETH_SPEED_100M; + } + } + else /* AutoNegotiation Disable */ + { + /* Check parameters */ + assert_param(IS_ETH_SPEED(heth->Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth->Init.DuplexMode)); + + /* Set MAC Speed and Duplex Mode */ + if(HAL_ETH_WritePHYRegister(heth, PHY_BCR, ((uint16_t)((heth->Init).DuplexMode >> 3) | + (uint16_t)((heth->Init).Speed >> 1))) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Delay to assure PHY configuration */ + HAL_Delay(PHY_CONFIG_DELAY); + } + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief De-Initializes the ETH peripheral. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth) +{ + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + if(heth->MspDeInitCallback == NULL) + { + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + } + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + heth->MspDeInitCallback(heth); +#else + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspDeInit(heth); +#endif + + /* Set ETH HAL state to Disabled */ + heth->State= HAL_ETH_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the DMA Tx descriptors in chain mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param DMATxDescTab Pointer to the first Tx desc list + * @param TxBuff Pointer to the first TxBuffer list + * @param TxBuffCount Number of the used Tx desc in the list + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DMATxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMATxDescTab, uint8_t *TxBuff, uint32_t TxBuffCount) +{ + uint32_t i = 0; + ETH_DMADescTypeDef *dmatxdesc; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Set the DMATxDescToSet pointer with the first one of the DMATxDescTab list */ + heth->TxDesc = DMATxDescTab; + + /* Fill each DMATxDesc descriptor with the right values */ + for(i=0; i < TxBuffCount; i++) + { + /* Get the pointer on the ith member of the Tx Desc list */ + dmatxdesc = DMATxDescTab + i; + + /* Set Second Address Chained bit */ + dmatxdesc->Status = ETH_DMATXDESC_TCH; + + /* Set Buffer1 address pointer */ + dmatxdesc->Buffer1Addr = (uint32_t)(&TxBuff[i*ETH_TX_BUF_SIZE]); + + if ((heth->Init).ChecksumMode == ETH_CHECKSUM_BY_HARDWARE) + { + /* Set the DMA Tx descriptors checksum insertion */ + dmatxdesc->Status |= ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL; + } + + /* Initialize the next descriptor with the Next Descriptor Polling Enable */ + if(i < (TxBuffCount-1)) + { + /* Set next descriptor address register with next descriptor base address */ + dmatxdesc->Buffer2NextDescAddr = (uint32_t)(DMATxDescTab+i+1); + } + else + { + /* For last descriptor, set next descriptor address register equal to the first descriptor base address */ + dmatxdesc->Buffer2NextDescAddr = (uint32_t) DMATxDescTab; + } + } + + /* Set Transmit Descriptor List Address Register */ + (heth->Instance)->DMATDLAR = (uint32_t) DMATxDescTab; + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the DMA Rx descriptors in chain mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param DMARxDescTab Pointer to the first Rx desc list + * @param RxBuff Pointer to the first RxBuffer list + * @param RxBuffCount Number of the used Rx desc in the list + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DMARxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMARxDescTab, uint8_t *RxBuff, uint32_t RxBuffCount) +{ + uint32_t i = 0; + ETH_DMADescTypeDef *DMARxDesc; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Set the Ethernet RxDesc pointer with the first one of the DMARxDescTab list */ + heth->RxDesc = DMARxDescTab; + + /* Fill each DMARxDesc descriptor with the right values */ + for(i=0; i < RxBuffCount; i++) + { + /* Get the pointer on the ith member of the Rx Desc list */ + DMARxDesc = DMARxDescTab+i; + + /* Set Own bit of the Rx descriptor Status */ + DMARxDesc->Status = ETH_DMARXDESC_OWN; + + /* Set Buffer1 size and Second Address Chained bit */ + DMARxDesc->ControlBufferSize = ETH_DMARXDESC_RCH | ETH_RX_BUF_SIZE; + + /* Set Buffer1 address pointer */ + DMARxDesc->Buffer1Addr = (uint32_t)(&RxBuff[i*ETH_RX_BUF_SIZE]); + + if((heth->Init).RxMode == ETH_RXINTERRUPT_MODE) + { + /* Enable Ethernet DMA Rx Descriptor interrupt */ + DMARxDesc->ControlBufferSize &= ~ETH_DMARXDESC_DIC; + } + + /* Initialize the next descriptor with the Next Descriptor Polling Enable */ + if(i < (RxBuffCount-1)) + { + /* Set next descriptor address register with next descriptor base address */ + DMARxDesc->Buffer2NextDescAddr = (uint32_t)(DMARxDescTab+i+1); + } + else + { + /* For last descriptor, set next descriptor address register equal to the first descriptor base address */ + DMARxDesc->Buffer2NextDescAddr = (uint32_t)(DMARxDescTab); + } + } + + /* Set Receive Descriptor List Address Register */ + (heth->Instance)->DMARDLAR = (uint32_t) DMARxDescTab; + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the ETH MSP. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes ETH MSP. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User ETH Callback + * To be used instead of the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_DMA_ERROR_CB_ID DMA Error Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, pETH_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(heth); + + if(heth->State == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = pCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = pCallback; + break; + + case HAL_ETH_DMA_ERROR_CB_ID : + heth->DMAErrorCallback = pCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(heth->State == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(heth); + + return status; +} + +/** + * @brief Unregister an ETH Callback + * ETH callabck is redirected to the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_DMA_ERROR_CB_ID DMA Error Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(heth); + + if(heth->State == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; + break; + + case HAL_ETH_DMA_ERROR_CB_ID : + heth->DMAErrorCallback = HAL_ETH_ErrorCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(heth->State == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(heth); + + return status; +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group2 IO operation functions + * @brief Data transfers functions + * + @verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Transmit a frame + HAL_ETH_TransmitFrame(); + (+) Receive a frame + HAL_ETH_GetReceivedFrame(); + HAL_ETH_GetReceivedFrame_IT(); + (+) Read from an External PHY register + HAL_ETH_ReadPHYRegister(); + (+) Write to an External PHY register + HAL_ETH_WritePHYRegister(); + + @endverbatim + + * @{ + */ + +/** + * @brief Sends an Ethernet frame. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param FrameLength Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_TransmitFrame(ETH_HandleTypeDef *heth, uint32_t FrameLength) +{ + uint32_t bufcount = 0, size = 0, i = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + if (FrameLength == 0) + { + /* Set ETH HAL state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_ERROR; + } + + /* Check if the descriptor is owned by the ETHERNET DMA (when set) or CPU (when reset) */ + if(((heth->TxDesc)->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + /* OWN bit set */ + heth->State = HAL_ETH_STATE_BUSY_TX; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_ERROR; + } + + /* Get the number of needed Tx buffers for the current frame */ + if (FrameLength > ETH_TX_BUF_SIZE) + { + bufcount = FrameLength/ETH_TX_BUF_SIZE; + if (FrameLength % ETH_TX_BUF_SIZE) + { + bufcount++; + } + } + else + { + bufcount = 1; + } + if (bufcount == 1) + { + /* Set LAST and FIRST segment */ + heth->TxDesc->Status |=ETH_DMATXDESC_FS|ETH_DMATXDESC_LS; + /* Set frame size */ + heth->TxDesc->ControlBufferSize = (FrameLength & ETH_DMATXDESC_TBS1); + /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */ + heth->TxDesc->Status |= ETH_DMATXDESC_OWN; + /* Point to next descriptor */ + heth->TxDesc= (ETH_DMADescTypeDef *)(heth->TxDesc->Buffer2NextDescAddr); + } + else + { + for (i=0; i< bufcount; i++) + { + /* Clear FIRST and LAST segment bits */ + heth->TxDesc->Status &= ~(ETH_DMATXDESC_FS | ETH_DMATXDESC_LS); + + if (i == 0) + { + /* Setting the first segment bit */ + heth->TxDesc->Status |= ETH_DMATXDESC_FS; + } + + /* Program size */ + heth->TxDesc->ControlBufferSize = (ETH_TX_BUF_SIZE & ETH_DMATXDESC_TBS1); + + if (i == (bufcount-1)) + { + /* Setting the last segment bit */ + heth->TxDesc->Status |= ETH_DMATXDESC_LS; + size = FrameLength - (bufcount-1)*ETH_TX_BUF_SIZE; + heth->TxDesc->ControlBufferSize = (size & ETH_DMATXDESC_TBS1); + } + + /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */ + heth->TxDesc->Status |= ETH_DMATXDESC_OWN; + /* point to next descriptor */ + heth->TxDesc = (ETH_DMADescTypeDef *)(heth->TxDesc->Buffer2NextDescAddr); + } + } + + /* When Tx Buffer unavailable flag is set: clear it and resume transmission */ + if (((heth->Instance)->DMASR & ETH_DMASR_TBUS) != (uint32_t)RESET) + { + /* Clear TBUS ETHERNET DMA flag */ + (heth->Instance)->DMASR = ETH_DMASR_TBUS; + /* Resume DMA transmission*/ + (heth->Instance)->DMATPDR = 0; + } + + /* Set ETH HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Checks for received frames. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame(ETH_HandleTypeDef *heth) +{ + uint32_t framelength = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Check the ETH state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Check if segment is not owned by DMA */ + /* (((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET) && ((heth->RxDesc->Status & ETH_DMARXDESC_LS) != (uint32_t)RESET)) */ + if(((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET)) + { + /* Check if last segment */ + if(((heth->RxDesc->Status & ETH_DMARXDESC_LS) != (uint32_t)RESET)) + { + /* increment segment count */ + (heth->RxFrameInfos).SegCount++; + + /* Check if last segment is first segment: one segment contains the frame */ + if ((heth->RxFrameInfos).SegCount == 1) + { + (heth->RxFrameInfos).FSRxDesc =heth->RxDesc; + } + + heth->RxFrameInfos.LSRxDesc = heth->RxDesc; + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + framelength = (((heth->RxDesc)->Status & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT) - 4; + heth->RxFrameInfos.length = framelength; + + /* Get the address of the buffer start address */ + heth->RxFrameInfos.buffer = ((heth->RxFrameInfos).FSRxDesc)->Buffer1Addr; + /* point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) ((heth->RxDesc)->Buffer2NextDescAddr); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; + } + /* Check if first segment */ + else if((heth->RxDesc->Status & ETH_DMARXDESC_FS) != (uint32_t)RESET) + { + (heth->RxFrameInfos).FSRxDesc = heth->RxDesc; + (heth->RxFrameInfos).LSRxDesc = NULL; + (heth->RxFrameInfos).SegCount = 1; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + /* Check if intermediate segment */ + else + { + (heth->RxFrameInfos).SegCount++; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + } + + /* Set ETH HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief Gets the Received frame in interrupt mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame_IT(ETH_HandleTypeDef *heth) +{ + uint32_t descriptorscancounter = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set ETH HAL State to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Scan descriptors owned by CPU */ + while (((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET) && (descriptorscancounter < ETH_RXBUFNB)) + { + /* Just for security */ + descriptorscancounter++; + + /* Check if first segment in frame */ + /* ((heth->RxDesc->Status & ETH_DMARXDESC_FS) != (uint32_t)RESET) && ((heth->RxDesc->Status & ETH_DMARXDESC_LS) == (uint32_t)RESET)) */ + if((heth->RxDesc->Status & (ETH_DMARXDESC_FS | ETH_DMARXDESC_LS)) == (uint32_t)ETH_DMARXDESC_FS) + { + heth->RxFrameInfos.FSRxDesc = heth->RxDesc; + heth->RxFrameInfos.SegCount = 1; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + /* Check if intermediate segment */ + /* ((heth->RxDesc->Status & ETH_DMARXDESC_LS) == (uint32_t)RESET)&& ((heth->RxDesc->Status & ETH_DMARXDESC_FS) == (uint32_t)RESET)) */ + else if ((heth->RxDesc->Status & (ETH_DMARXDESC_LS | ETH_DMARXDESC_FS)) == (uint32_t)RESET) + { + /* Increment segment count */ + (heth->RxFrameInfos.SegCount)++; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*)(heth->RxDesc->Buffer2NextDescAddr); + } + /* Should be last segment */ + else + { + /* Last segment */ + heth->RxFrameInfos.LSRxDesc = heth->RxDesc; + + /* Increment segment count */ + (heth->RxFrameInfos.SegCount)++; + + /* Check if last segment is first segment: one segment contains the frame */ + if ((heth->RxFrameInfos.SegCount) == 1) + { + heth->RxFrameInfos.FSRxDesc = heth->RxDesc; + } + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + heth->RxFrameInfos.length = (((heth->RxDesc)->Status & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT) - 4; + + /* Get the address of the buffer start address */ + heth->RxFrameInfos.buffer =((heth->RxFrameInfos).FSRxDesc)->Buffer1Addr; + + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; + } + } + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief This function handles ETH interrupt request. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth) +{ + /* Frame received */ + if (__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_R)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Receive complete callback*/ + heth->RxCpltCallback(heth); +#else + /* Receive complete callback */ + HAL_ETH_RxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the Eth DMA Rx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_R); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + } + /* Frame transmitted */ + else if (__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_T)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call resgistered Transfer complete callback*/ + heth->TxCpltCallback(heth); +#else + /* Transfer complete callback */ + HAL_ETH_TxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the Eth DMA Tx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_T); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + } + + /* Clear the interrupt flags */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_NIS); + + /* ETH DMA Error */ + if(__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_AIS)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + heth->DMAErrorCallback(heth); +#else + /* Ethernet Error callback */ + HAL_ETH_ErrorCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the interrupt flags */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_FLAG_AIS); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + } +} + +/** + * @brief Tx Transfer completed callbacks. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet transfer error callbacks + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Reads a PHY register + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYReg PHY register address, is the index of one of the 32 PHY register. + * This parameter can be one of the following values: + * PHY_BCR: Transceiver Basic Control Register, + * PHY_BSR: Transceiver Basic Status Register. + * More PHY register could be read depending on the used PHY + * @param RegValue PHY register value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t *RegValue) +{ + uint32_t tmpreg = 0; + uint32_t tickstart = 0; + + /* Check parameters */ + assert_param(IS_ETH_PHY_ADDRESS(heth->Init.PhyAddress)); + + /* Check the ETH peripheral state */ + if(heth->State == HAL_ETH_STATE_BUSY_RD) + { + return HAL_BUSY; + } + /* Set ETH HAL State to BUSY_RD */ + heth->State = HAL_ETH_STATE_BUSY_RD; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII address register value */ + tmpreg |=(((uint32_t)heth->Init.PhyAddress << 11) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg |=(((uint32_t)PHYReg<<6) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg &= ~ETH_MACMIIAR_MW; /* Set the read mode */ + tmpreg |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while((tmpreg & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > PHY_READ_TO) + { + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + tmpreg = heth->Instance->MACMIIAR; + } + + /* Get MACMIIDR value */ + *RegValue = (uint16_t)(heth->Instance->MACMIIDR); + + /* Set ETH HAL State to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Writes to a PHY register. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYReg PHY register address, is the index of one of the 32 PHY register. + * This parameter can be one of the following values: + * PHY_BCR: Transceiver Control Register. + * More PHY register could be written depending on the used PHY + * @param RegValue the value to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t RegValue) +{ + uint32_t tmpreg = 0; + uint32_t tickstart = 0; + + /* Check parameters */ + assert_param(IS_ETH_PHY_ADDRESS(heth->Init.PhyAddress)); + + /* Check the ETH peripheral state */ + if(heth->State == HAL_ETH_STATE_BUSY_WR) + { + return HAL_BUSY; + } + /* Set ETH HAL State to BUSY_WR */ + heth->State = HAL_ETH_STATE_BUSY_WR; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII register address value */ + tmpreg |=(((uint32_t)heth->Init.PhyAddress<<11) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg |=(((uint32_t)PHYReg<<6) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg |= ETH_MACMIIAR_MW; /* Set the write mode */ + tmpreg |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Give the value to the MII data register */ + heth->Instance->MACMIIDR = (uint16_t)RegValue; + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while((tmpreg & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > PHY_WRITE_TO) + { + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + tmpreg = heth->Instance->MACMIIAR; + } + + /* Set ETH HAL State to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Enable MAC and DMA transmission and reception. + HAL_ETH_Start(); + (+) Disable MAC and DMA transmission and reception. + HAL_ETH_Stop(); + (+) Set the MAC configuration in runtime mode + HAL_ETH_ConfigMAC(); + (+) Set the DMA configuration in runtime mode + HAL_ETH_ConfigDMA(); + +@endverbatim + * @{ + */ + + /** + * @brief Enables Ethernet MAC and DMA reception/transmission + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth) +{ + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Enable transmit state machine of the MAC for transmission on the MII */ + ETH_MACTransmissionEnable(heth); + + /* Enable receive state machine of the MAC for reception from the MII */ + ETH_MACReceptionEnable(heth); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Start DMA transmission */ + ETH_DMATransmissionEnable(heth); + + /* Start DMA reception */ + ETH_DMAReceptionEnable(heth); + + /* Set the ETH state to READY*/ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth) +{ + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Stop DMA transmission */ + ETH_DMATransmissionDisable(heth); + + /* Stop DMA reception */ + ETH_DMAReceptionDisable(heth); + + /* Disable receive state machine of the MAC for reception from the MII */ + ETH_MACReceptionDisable(heth); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable transmit state machine of the MAC for transmission on the MII */ + ETH_MACTransmissionDisable(heth); + + /* Set the ETH state*/ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set ETH MAC Configuration. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf MAC Configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ConfigMAC(ETH_HandleTypeDef *heth, ETH_MACInitTypeDef *macconf) +{ + uint32_t tmpreg = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State= HAL_ETH_STATE_BUSY; + + assert_param(IS_ETH_SPEED(heth->Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth->Init.DuplexMode)); + + if (macconf != NULL) + { + /* Check the parameters */ + assert_param(IS_ETH_WATCHDOG(macconf->Watchdog)); + assert_param(IS_ETH_JABBER(macconf->Jabber)); + assert_param(IS_ETH_INTER_FRAME_GAP(macconf->InterFrameGap)); + assert_param(IS_ETH_CARRIER_SENSE(macconf->CarrierSense)); + assert_param(IS_ETH_RECEIVE_OWN(macconf->ReceiveOwn)); + assert_param(IS_ETH_LOOPBACK_MODE(macconf->LoopbackMode)); + assert_param(IS_ETH_CHECKSUM_OFFLOAD(macconf->ChecksumOffload)); + assert_param(IS_ETH_RETRY_TRANSMISSION(macconf->RetryTransmission)); + assert_param(IS_ETH_AUTOMATIC_PADCRC_STRIP(macconf->AutomaticPadCRCStrip)); + assert_param(IS_ETH_BACKOFF_LIMIT(macconf->BackOffLimit)); + assert_param(IS_ETH_DEFERRAL_CHECK(macconf->DeferralCheck)); + assert_param(IS_ETH_RECEIVE_ALL(macconf->ReceiveAll)); + assert_param(IS_ETH_SOURCE_ADDR_FILTER(macconf->SourceAddrFilter)); + assert_param(IS_ETH_CONTROL_FRAMES(macconf->PassControlFrames)); + assert_param(IS_ETH_BROADCAST_FRAMES_RECEPTION(macconf->BroadcastFramesReception)); + assert_param(IS_ETH_DESTINATION_ADDR_FILTER(macconf->DestinationAddrFilter)); + assert_param(IS_ETH_PROMISCUOUS_MODE(macconf->PromiscuousMode)); + assert_param(IS_ETH_MULTICAST_FRAMES_FILTER(macconf->MulticastFramesFilter)); + assert_param(IS_ETH_UNICAST_FRAMES_FILTER(macconf->UnicastFramesFilter)); + assert_param(IS_ETH_PAUSE_TIME(macconf->PauseTime)); + assert_param(IS_ETH_ZEROQUANTA_PAUSE(macconf->ZeroQuantaPause)); + assert_param(IS_ETH_PAUSE_LOW_THRESHOLD(macconf->PauseLowThreshold)); + assert_param(IS_ETH_UNICAST_PAUSE_FRAME_DETECT(macconf->UnicastPauseFrameDetect)); + assert_param(IS_ETH_RECEIVE_FLOWCONTROL(macconf->ReceiveFlowControl)); + assert_param(IS_ETH_TRANSMIT_FLOWCONTROL(macconf->TransmitFlowControl)); + assert_param(IS_ETH_VLAN_TAG_COMPARISON(macconf->VLANTagComparison)); + assert_param(IS_ETH_VLAN_TAG_IDENTIFIER(macconf->VLANTagIdentifier)); + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + /* Clear WD, PCE, PS, TE and RE bits */ + tmpreg &= ETH_MACCR_CLEAR_MASK; + + tmpreg |= (uint32_t)(macconf->Watchdog | + macconf->Jabber | + macconf->InterFrameGap | + macconf->CarrierSense | + (heth->Init).Speed | + macconf->ReceiveOwn | + macconf->LoopbackMode | + (heth->Init).DuplexMode | + macconf->ChecksumOffload | + macconf->RetryTransmission | + macconf->AutomaticPadCRCStrip | + macconf->BackOffLimit | + macconf->DeferralCheck); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + + /*----------------------- ETHERNET MACFFR Configuration --------------------*/ + /* Write to ETHERNET MACFFR */ + (heth->Instance)->MACFFR = (uint32_t)(macconf->ReceiveAll | + macconf->SourceAddrFilter | + macconf->PassControlFrames | + macconf->BroadcastFramesReception | + macconf->DestinationAddrFilter | + macconf->PromiscuousMode | + macconf->MulticastFramesFilter | + macconf->UnicastFramesFilter); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg; + + /*--------------- ETHERNET MACHTHR and MACHTLR Configuration ---------------*/ + /* Write to ETHERNET MACHTHR */ + (heth->Instance)->MACHTHR = (uint32_t)macconf->HashTableHigh; + + /* Write to ETHERNET MACHTLR */ + (heth->Instance)->MACHTLR = (uint32_t)macconf->HashTableLow; + /*----------------------- ETHERNET MACFCR Configuration --------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg &= ETH_MACFCR_CLEAR_MASK; + + tmpreg |= (uint32_t)((macconf->PauseTime << 16) | + macconf->ZeroQuantaPause | + macconf->PauseLowThreshold | + macconf->UnicastPauseFrameDetect | + macconf->ReceiveFlowControl | + macconf->TransmitFlowControl); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg; + + /*----------------------- ETHERNET MACVLANTR Configuration -----------------*/ + (heth->Instance)->MACVLANTR = (uint32_t)(macconf->VLANTagComparison | + macconf->VLANTagIdentifier); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg; + } + else /* macconf == NULL : here we just configure Speed and Duplex mode */ + { + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + + /* Clear FES and DM bits */ + tmpreg &= ~((uint32_t)0x00004800); + + tmpreg |= (uint32_t)(heth->Init.Speed | heth->Init.DuplexMode); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + } + + /* Set the ETH state to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Sets ETH DMA Configuration. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf DMA Configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ConfigDMA(ETH_HandleTypeDef *heth, ETH_DMAInitTypeDef *dmaconf) +{ + uint32_t tmpreg = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State= HAL_ETH_STATE_BUSY; + + /* Check parameters */ + assert_param(IS_ETH_DROP_TCPIP_CHECKSUM_FRAME(dmaconf->DropTCPIPChecksumErrorFrame)); + assert_param(IS_ETH_RECEIVE_STORE_FORWARD(dmaconf->ReceiveStoreForward)); + assert_param(IS_ETH_FLUSH_RECEIVE_FRAME(dmaconf->FlushReceivedFrame)); + assert_param(IS_ETH_TRANSMIT_STORE_FORWARD(dmaconf->TransmitStoreForward)); + assert_param(IS_ETH_TRANSMIT_THRESHOLD_CONTROL(dmaconf->TransmitThresholdControl)); + assert_param(IS_ETH_FORWARD_ERROR_FRAMES(dmaconf->ForwardErrorFrames)); + assert_param(IS_ETH_FORWARD_UNDERSIZED_GOOD_FRAMES(dmaconf->ForwardUndersizedGoodFrames)); + assert_param(IS_ETH_RECEIVE_THRESHOLD_CONTROL(dmaconf->ReceiveThresholdControl)); + assert_param(IS_ETH_SECOND_FRAME_OPERATE(dmaconf->SecondFrameOperate)); + assert_param(IS_ETH_ADDRESS_ALIGNED_BEATS(dmaconf->AddressAlignedBeats)); + assert_param(IS_ETH_FIXED_BURST(dmaconf->FixedBurst)); + assert_param(IS_ETH_RXDMA_BURST_LENGTH(dmaconf->RxDMABurstLength)); + assert_param(IS_ETH_TXDMA_BURST_LENGTH(dmaconf->TxDMABurstLength)); + assert_param(IS_ETH_ENHANCED_DESCRIPTOR_FORMAT(dmaconf->EnhancedDescriptorFormat)); + assert_param(IS_ETH_DMA_DESC_SKIP_LENGTH(dmaconf->DescriptorSkipLength)); + assert_param(IS_ETH_DMA_ARBITRATION_ROUNDROBIN_RXTX(dmaconf->DMAArbitration)); + + /*----------------------- ETHERNET DMAOMR Configuration --------------------*/ + /* Get the ETHERNET DMAOMR value */ + tmpreg = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg &= ETH_DMAOMR_CLEAR_MASK; + + tmpreg |= (uint32_t)(dmaconf->DropTCPIPChecksumErrorFrame | + dmaconf->ReceiveStoreForward | + dmaconf->FlushReceivedFrame | + dmaconf->TransmitStoreForward | + dmaconf->TransmitThresholdControl | + dmaconf->ForwardErrorFrames | + dmaconf->ForwardUndersizedGoodFrames | + dmaconf->ReceiveThresholdControl | + dmaconf->SecondFrameOperate); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; + + /*----------------------- ETHERNET DMABMR Configuration --------------------*/ + (heth->Instance)->DMABMR = (uint32_t)(dmaconf->AddressAlignedBeats | + dmaconf->FixedBurst | + dmaconf->RxDMABurstLength | /* !! if 4xPBL is selected for Tx or Rx it is applied for the other */ + dmaconf->TxDMABurstLength | + dmaconf->EnhancedDescriptorFormat | + (dmaconf->DescriptorSkipLength << 2) | + dmaconf->DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg; + + /* Set the ETH state to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group4 Peripheral State functions + * @brief Peripheral State functions + * + @verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + (+) Get the ETH handle state: + HAL_ETH_GetState(); + + + @endverbatim + * @{ + */ + +/** + * @brief Return the ETH HAL state + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL state + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(ETH_HandleTypeDef *heth) +{ + /* Return ETH state */ + return heth->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup ETH_Private_Functions + * @{ + */ + +/** + * @brief Configures Ethernet MAC and DMA with default parameters. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param err Ethernet Init error + * @retval HAL status + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth, uint32_t err) +{ + ETH_MACInitTypeDef macinit; + ETH_DMAInitTypeDef dmainit; + uint32_t tmpreg = 0; + + if (err != ETH_SUCCESS) /* Auto-negotiation failed */ + { + /* Set Ethernet duplex mode to Full-duplex */ + (heth->Init).DuplexMode = ETH_MODE_FULLDUPLEX; + + /* Set Ethernet speed to 100M */ + (heth->Init).Speed = ETH_SPEED_100M; + } + + /* Ethernet MAC default initialization **************************************/ + macinit.Watchdog = ETH_WATCHDOG_ENABLE; + macinit.Jabber = ETH_JABBER_ENABLE; + macinit.InterFrameGap = ETH_INTERFRAMEGAP_96BIT; + macinit.CarrierSense = ETH_CARRIERSENCE_ENABLE; + macinit.ReceiveOwn = ETH_RECEIVEOWN_ENABLE; + macinit.LoopbackMode = ETH_LOOPBACKMODE_DISABLE; + if(heth->Init.ChecksumMode == ETH_CHECKSUM_BY_HARDWARE) + { + macinit.ChecksumOffload = ETH_CHECKSUMOFFLAOD_ENABLE; + } + else + { + macinit.ChecksumOffload = ETH_CHECKSUMOFFLAOD_DISABLE; + } + macinit.RetryTransmission = ETH_RETRYTRANSMISSION_DISABLE; + macinit.AutomaticPadCRCStrip = ETH_AUTOMATICPADCRCSTRIP_DISABLE; + macinit.BackOffLimit = ETH_BACKOFFLIMIT_10; + macinit.DeferralCheck = ETH_DEFFERRALCHECK_DISABLE; + macinit.ReceiveAll = ETH_RECEIVEAll_DISABLE; + macinit.SourceAddrFilter = ETH_SOURCEADDRFILTER_DISABLE; + macinit.PassControlFrames = ETH_PASSCONTROLFRAMES_BLOCKALL; + macinit.BroadcastFramesReception = ETH_BROADCASTFRAMESRECEPTION_ENABLE; + macinit.DestinationAddrFilter = ETH_DESTINATIONADDRFILTER_NORMAL; + macinit.PromiscuousMode = ETH_PROMISCUOUS_MODE_DISABLE; + macinit.MulticastFramesFilter = ETH_MULTICASTFRAMESFILTER_PERFECT; + macinit.UnicastFramesFilter = ETH_UNICASTFRAMESFILTER_PERFECT; + macinit.HashTableHigh = 0x0; + macinit.HashTableLow = 0x0; + macinit.PauseTime = 0x0; + macinit.ZeroQuantaPause = ETH_ZEROQUANTAPAUSE_DISABLE; + macinit.PauseLowThreshold = ETH_PAUSELOWTHRESHOLD_MINUS4; + macinit.UnicastPauseFrameDetect = ETH_UNICASTPAUSEFRAMEDETECT_DISABLE; + macinit.ReceiveFlowControl = ETH_RECEIVEFLOWCONTROL_DISABLE; + macinit.TransmitFlowControl = ETH_TRANSMITFLOWCONTROL_DISABLE; + macinit.VLANTagComparison = ETH_VLANTAGCOMPARISON_16BIT; + macinit.VLANTagIdentifier = 0x0; + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + /* Clear WD, PCE, PS, TE and RE bits */ + tmpreg &= ETH_MACCR_CLEAR_MASK; + /* Set the WD bit according to ETH Watchdog value */ + /* Set the JD: bit according to ETH Jabber value */ + /* Set the IFG bit according to ETH InterFrameGap value */ + /* Set the DCRS bit according to ETH CarrierSense value */ + /* Set the FES bit according to ETH Speed value */ + /* Set the DO bit according to ETH ReceiveOwn value */ + /* Set the LM bit according to ETH LoopbackMode value */ + /* Set the DM bit according to ETH Mode value */ + /* Set the IPCO bit according to ETH ChecksumOffload value */ + /* Set the DR bit according to ETH RetryTransmission value */ + /* Set the ACS bit according to ETH AutomaticPadCRCStrip value */ + /* Set the BL bit according to ETH BackOffLimit value */ + /* Set the DC bit according to ETH DeferralCheck value */ + tmpreg |= (uint32_t)(macinit.Watchdog | + macinit.Jabber | + macinit.InterFrameGap | + macinit.CarrierSense | + (heth->Init).Speed | + macinit.ReceiveOwn | + macinit.LoopbackMode | + (heth->Init).DuplexMode | + macinit.ChecksumOffload | + macinit.RetryTransmission | + macinit.AutomaticPadCRCStrip | + macinit.BackOffLimit | + macinit.DeferralCheck); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + + /*----------------------- ETHERNET MACFFR Configuration --------------------*/ + /* Set the RA bit according to ETH ReceiveAll value */ + /* Set the SAF and SAIF bits according to ETH SourceAddrFilter value */ + /* Set the PCF bit according to ETH PassControlFrames value */ + /* Set the DBF bit according to ETH BroadcastFramesReception value */ + /* Set the DAIF bit according to ETH DestinationAddrFilter value */ + /* Set the PR bit according to ETH PromiscuousMode value */ + /* Set the PM, HMC and HPF bits according to ETH MulticastFramesFilter value */ + /* Set the HUC and HPF bits according to ETH UnicastFramesFilter value */ + /* Write to ETHERNET MACFFR */ + (heth->Instance)->MACFFR = (uint32_t)(macinit.ReceiveAll | + macinit.SourceAddrFilter | + macinit.PassControlFrames | + macinit.BroadcastFramesReception | + macinit.DestinationAddrFilter | + macinit.PromiscuousMode | + macinit.MulticastFramesFilter | + macinit.UnicastFramesFilter); + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg; + + /*--------------- ETHERNET MACHTHR and MACHTLR Configuration --------------*/ + /* Write to ETHERNET MACHTHR */ + (heth->Instance)->MACHTHR = (uint32_t)macinit.HashTableHigh; + + /* Write to ETHERNET MACHTLR */ + (heth->Instance)->MACHTLR = (uint32_t)macinit.HashTableLow; + /*----------------------- ETHERNET MACFCR Configuration -------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg &= ETH_MACFCR_CLEAR_MASK; + + /* Set the PT bit according to ETH PauseTime value */ + /* Set the DZPQ bit according to ETH ZeroQuantaPause value */ + /* Set the PLT bit according to ETH PauseLowThreshold value */ + /* Set the UP bit according to ETH UnicastPauseFrameDetect value */ + /* Set the RFE bit according to ETH ReceiveFlowControl value */ + /* Set the TFE bit according to ETH TransmitFlowControl value */ + tmpreg |= (uint32_t)((macinit.PauseTime << 16) | + macinit.ZeroQuantaPause | + macinit.PauseLowThreshold | + macinit.UnicastPauseFrameDetect | + macinit.ReceiveFlowControl | + macinit.TransmitFlowControl); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg; + + /*----------------------- ETHERNET MACVLANTR Configuration ----------------*/ + /* Set the ETV bit according to ETH VLANTagComparison value */ + /* Set the VL bit according to ETH VLANTagIdentifier value */ + (heth->Instance)->MACVLANTR = (uint32_t)(macinit.VLANTagComparison | + macinit.VLANTagIdentifier); + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg; + + /* Ethernet DMA default initialization ************************************/ + dmainit.DropTCPIPChecksumErrorFrame = ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE; + dmainit.ReceiveStoreForward = ETH_RECEIVESTOREFORWARD_ENABLE; + dmainit.FlushReceivedFrame = ETH_FLUSHRECEIVEDFRAME_ENABLE; + dmainit.TransmitStoreForward = ETH_TRANSMITSTOREFORWARD_ENABLE; + dmainit.TransmitThresholdControl = ETH_TRANSMITTHRESHOLDCONTROL_64BYTES; + dmainit.ForwardErrorFrames = ETH_FORWARDERRORFRAMES_DISABLE; + dmainit.ForwardUndersizedGoodFrames = ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE; + dmainit.ReceiveThresholdControl = ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES; + dmainit.SecondFrameOperate = ETH_SECONDFRAMEOPERARTE_ENABLE; + dmainit.AddressAlignedBeats = ETH_ADDRESSALIGNEDBEATS_ENABLE; + dmainit.FixedBurst = ETH_FIXEDBURST_ENABLE; + dmainit.RxDMABurstLength = ETH_RXDMABURSTLENGTH_32BEAT; + dmainit.TxDMABurstLength = ETH_TXDMABURSTLENGTH_32BEAT; + dmainit.EnhancedDescriptorFormat = ETH_DMAENHANCEDDESCRIPTOR_ENABLE; + dmainit.DescriptorSkipLength = 0x0; + dmainit.DMAArbitration = ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1; + + /* Get the ETHERNET DMAOMR value */ + tmpreg = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg &= ETH_DMAOMR_CLEAR_MASK; + + /* Set the DT bit according to ETH DropTCPIPChecksumErrorFrame value */ + /* Set the RSF bit according to ETH ReceiveStoreForward value */ + /* Set the DFF bit according to ETH FlushReceivedFrame value */ + /* Set the TSF bit according to ETH TransmitStoreForward value */ + /* Set the TTC bit according to ETH TransmitThresholdControl value */ + /* Set the FEF bit according to ETH ForwardErrorFrames value */ + /* Set the FUF bit according to ETH ForwardUndersizedGoodFrames value */ + /* Set the RTC bit according to ETH ReceiveThresholdControl value */ + /* Set the OSF bit according to ETH SecondFrameOperate value */ + tmpreg |= (uint32_t)(dmainit.DropTCPIPChecksumErrorFrame | + dmainit.ReceiveStoreForward | + dmainit.FlushReceivedFrame | + dmainit.TransmitStoreForward | + dmainit.TransmitThresholdControl | + dmainit.ForwardErrorFrames | + dmainit.ForwardUndersizedGoodFrames | + dmainit.ReceiveThresholdControl | + dmainit.SecondFrameOperate); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; + + /*----------------------- ETHERNET DMABMR Configuration ------------------*/ + /* Set the AAL bit according to ETH AddressAlignedBeats value */ + /* Set the FB bit according to ETH FixedBurst value */ + /* Set the RPBL and 4*PBL bits according to ETH RxDMABurstLength value */ + /* Set the PBL and 4*PBL bits according to ETH TxDMABurstLength value */ + /* Set the Enhanced DMA descriptors bit according to ETH EnhancedDescriptorFormat value*/ + /* Set the DSL bit according to ETH DesciptorSkipLength value */ + /* Set the PR and DA bits according to ETH DMAArbitration value */ + (heth->Instance)->DMABMR = (uint32_t)(dmainit.AddressAlignedBeats | + dmainit.FixedBurst | + dmainit.RxDMABurstLength | /* !! if 4xPBL is selected for Tx or Rx it is applied for the other */ + dmainit.TxDMABurstLength | + dmainit.EnhancedDescriptorFormat | + (dmainit.DescriptorSkipLength << 2) | + dmainit.DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg; + + if((heth->Init).RxMode == ETH_RXINTERRUPT_MODE) + { + /* Enable the Ethernet Rx Interrupt */ + __HAL_ETH_DMA_ENABLE_IT((heth), ETH_DMA_IT_NIS | ETH_DMA_IT_R); + } + + /* Initialize MAC address in ethernet MAC */ + ETH_MACAddressConfig(heth, ETH_MAC_ADDRESS0, heth->Init.MACAddr); +} + +/** + * @brief Configures the selected MAC address. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param MacAddr The MAC address to configure + * This parameter can be one of the following values: + * @arg ETH_MAC_Address0: MAC Address0 + * @arg ETH_MAC_Address1: MAC Address1 + * @arg ETH_MAC_Address2: MAC Address2 + * @arg ETH_MAC_Address3: MAC Address3 + * @param Addr Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr) +{ + uint32_t tmpreg; + + /* Check the parameters */ + assert_param(IS_ETH_MAC_ADDRESS0123(MacAddr)); + + /* Calculate the selected MAC address high register */ + tmpreg = ((uint32_t)Addr[5] << 8) | (uint32_t)Addr[4]; + /* Load the selected MAC address high register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_HBASE + MacAddr))) = tmpreg; + /* Calculate the selected MAC address low register */ + tmpreg = ((uint32_t)Addr[3] << 24) | ((uint32_t)Addr[2] << 16) | ((uint32_t)Addr[1] << 8) | Addr[0]; + + /* Load the selected MAC address low register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_LBASE + MacAddr))) = tmpreg; +} + +/** + * @brief Enables the MAC transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACTransmissionEnable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Enable the MAC transmission */ + (heth->Instance)->MACCR |= ETH_MACCR_TE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Disables the MAC transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACTransmissionDisable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Disable the MAC transmission */ + (heth->Instance)->MACCR &= ~ETH_MACCR_TE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Enables the MAC reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACReceptionEnable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Enable the MAC reception */ + (heth->Instance)->MACCR |= ETH_MACCR_RE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Disables the MAC reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACReceptionDisable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Disable the MAC reception */ + (heth->Instance)->MACCR &= ~ETH_MACCR_RE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Enables the DMA transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATransmissionEnable(ETH_HandleTypeDef *heth) +{ + /* Enable the DMA transmission */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_ST; +} + +/** + * @brief Disables the DMA transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATransmissionDisable(ETH_HandleTypeDef *heth) +{ + /* Disable the DMA transmission */ + (heth->Instance)->DMAOMR &= ~ETH_DMAOMR_ST; +} + +/** + * @brief Enables the DMA reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMAReceptionEnable(ETH_HandleTypeDef *heth) +{ + /* Enable the DMA reception */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_SR; +} + +/** + * @brief Disables the DMA reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMAReceptionDisable(ETH_HandleTypeDef *heth) +{ + /* Disable the DMA reception */ + (heth->Instance)->DMAOMR &= ~ETH_DMAOMR_SR; +} + +/** + * @brief Clears the ETHERNET transmit FIFO. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Set the Flush Transmit FIFO bit */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_FTF; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth) +{ + /* Init the ETH Callback settings */ + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; /* Legacy weak TxCpltCallback */ + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; /* Legacy weak RxCpltCallback */ + heth->DMAErrorCallback = HAL_ETH_ErrorCallback; /* Legacy weak DMAErrorCallback */ +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +#endif /* ETH */ +#endif /* HAL_ETH_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c new file mode 100644 index 000000000..d4fe0891a --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c @@ -0,0 +1,559 @@ +/** + ****************************************************************************** + * @file stm32F7xx_hal_exti.c + * @author MCD Application Team + * @brief EXTI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Extended Interrupts and events controller (EXTI) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + @verbatim + ============================================================================== + ##### EXTI Peripheral features ##### + ============================================================================== + [..] + (+) Each Exti line can be configured within this driver. + + (+) Exti line can be configured in 3 different modes + (++) Interrupt + (++) Event + (++) Both of them + + (+) Configurable Exti lines can be configured with 3 different triggers + (++) Rising + (++) Falling + (++) Both of them + + (+) When set in interrupt mode, configurable Exti lines have two different + interrupts pending registers which allow to distinguish which transition + occurs: + (++) Rising edge pending interrupt + (++) Falling + + (+) Exti lines 0 to 15 are linked to gpio pin number 0 to 15. Gpio port can + be selected through multiplexer. + + ##### How to use this driver ##### + ============================================================================== + [..] + + (#) Configure the EXTI line using HAL_EXTI_SetConfigLine(). + (++) Choose the interrupt line number by setting "Line" member from + EXTI_ConfigTypeDef structure. + (++) Configure the interrupt and/or event mode using "Mode" member from + EXTI_ConfigTypeDef structure. + (++) For configurable lines, configure rising and/or falling trigger + "Trigger" member from EXTI_ConfigTypeDef structure. + (++) For Exti lines linked to gpio, choose gpio port using "GPIOSel" + member from GPIO_InitTypeDef structure. + + (#) Get current Exti configuration of a dedicated line using + HAL_EXTI_GetConfigLine(). + (++) Provide exiting handle as parameter. + (++) Provide pointer on EXTI_ConfigTypeDef structure as second parameter. + + (#) Clear Exti configuration of a dedicated line using HAL_EXTI_GetConfigLine(). + (++) Provide exiting handle as parameter. + + (#) Register callback to treat Exti interrupts using HAL_EXTI_RegisterCallback(). + (++) Provide exiting handle as first parameter. + (++) Provide which callback will be registered using one value from + EXTI_CallbackIDTypeDef. + (++) Provide callback function pointer. + + (#) Get interrupt pending bit using HAL_EXTI_GetPending(). + + (#) Clear interrupt pending bit using HAL_EXTI_GetPending(). + + (#) Generate software interrupt using HAL_EXTI_GenerateSWI(). + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2018 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup EXTI + * @{ + */ +/** MISRA C:2012 deviation rule has been granted for following rule: + * Rule-18.1_b - Medium: Array `EXTICR' 1st subscript interval [0,7] may be out + * of bounds [0,3] in following API : + * HAL_EXTI_SetConfigLine + * HAL_EXTI_GetConfigLine + * HAL_EXTI_ClearConfigLine + */ + +#ifdef HAL_EXTI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup EXTI_Exported_Functions + * @{ + */ + +/** @addtogroup EXTI_Exported_Functions_Group1 + * @brief Configuration functions + * +@verbatim + =============================================================================== + ##### Configuration functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Set configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on EXTI configuration to be set. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check parameters */ + assert_param(IS_EXTI_LINE(pExtiConfig->Line)); + assert_param(IS_EXTI_MODE(pExtiConfig->Mode)); + + /* Assign line number to handle */ + hexti->Line = pExtiConfig->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* Configure triggers for configurable lines */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + assert_param(IS_EXTI_TRIGGER(pExtiConfig->Trigger)); + + /* Configure rising trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_RISING) != 0x00u) + { + EXTI->RTSR |= maskline; + } + else + { + EXTI->RTSR &= ~maskline; + } + + /* Configure falling trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_FALLING) != 0x00u) + { + EXTI->FTSR |= maskline; + } + else + { + EXTI->FTSR &= ~maskline; + } + + + /* Configure gpio port selection in case of gpio exti line */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PORT(pExtiConfig->GPIOSel)); + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + regval |= (pExtiConfig->GPIOSel << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + /* Configure interrupt mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_INTERRUPT) != 0x00u) + { + EXTI->IMR |= maskline; + } + else + { + EXTI->IMR &= ~maskline; + } + + /* Configure event mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_EVENT) != 0x00u) + { + EXTI->EMR |= maskline; + } + else + { + EXTI->EMR &= ~maskline; + } + + return HAL_OK; +} + +/** + * @brief Get configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on structure to store Exti configuration. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* Store handle line number to configuration structure */ + pExtiConfig->Line = hexti->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Get core mode : interrupt */ + + /* Check if selected line is enable */ + if ((EXTI->IMR & maskline) != 0x00u) + { + pExtiConfig->Mode = EXTI_MODE_INTERRUPT; + } + else + { + pExtiConfig->Mode = EXTI_MODE_NONE; + } + + /* Get event mode */ + /* Check if selected line is enable */ + if ((EXTI->EMR & maskline) != 0x00u) + { + pExtiConfig->Mode |= EXTI_MODE_EVENT; + } + + /* 2] Get trigger for configurable lines : rising */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + /* Check if configuration of selected line is enable */ + if ((EXTI->RTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger = EXTI_TRIGGER_RISING; + } + else + { + pExtiConfig->Trigger = EXTI_TRIGGER_NONE; + } + + /* Get falling configuration */ + /* Check if configuration of selected line is enable */ + if ((EXTI->FTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger |= EXTI_TRIGGER_FALLING; + } + + /* Get Gpio port selection for gpio lines */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + pExtiConfig->GPIOSel = ((regval << (SYSCFG_EXTICR1_EXTI1_Pos * (3uL - (linepos & 0x03u)))) >> 24); + } + else + { + pExtiConfig->GPIOSel = 0x00u; + } + } + else + { + /* No Trigger selected */ + pExtiConfig->Trigger = EXTI_TRIGGER_NONE; + pExtiConfig->GPIOSel = 0x00u; + } + + return HAL_OK; +} + +/** + * @brief Clear whole configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Clear interrupt mode */ + EXTI->IMR = (EXTI->IMR & ~maskline); + + /* 2] Clear event mode */ + EXTI->EMR = (EXTI->EMR & ~maskline); + + /* 3] Clear triggers in case of configurable lines */ + if ((hexti->Line & EXTI_CONFIG) != 0x00u) + { + EXTI->RTSR = (EXTI->RTSR & ~maskline); + EXTI->FTSR = (EXTI->FTSR & ~maskline); + + /* Get Gpio port selection for gpio lines */ + if ((hexti->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + return HAL_OK; +} + +/** + * @brief Register callback for a dedicated Exti line. + * @param hexti Exti handle. + * @param CallbackID User callback identifier. + * This parameter can be one of @arg @ref EXTI_CallbackIDTypeDef values. + * @param pPendingCbfn function pointer to be stored as callback. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)) +{ + HAL_StatusTypeDef status = HAL_OK; + + switch (CallbackID) + { + case HAL_EXTI_COMMON_CB_ID: + hexti->PendingCallback = pPendingCbfn; + break; + + default: + status = HAL_ERROR; + break; + } + + return status; +} + +/** + * @brief Store line number as handle private field. + * @param hexti Exti handle. + * @param ExtiLine Exti line number. + * This parameter can be from 0 to @ref EXTI_LINE_NB. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine) +{ + /* Check the parameters */ + assert_param(IS_EXTI_LINE(ExtiLine)); + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + else + { + /* Store line number as handle private field */ + hexti->Line = ExtiLine; + + return HAL_OK; + } +} + +/** + * @} + */ + +/** @addtogroup EXTI_Exported_Functions_Group2 + * @brief EXTI IO functions. + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Handle EXTI interrupt request. + * @param hexti Exti handle. + * @retval none. + */ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t maskline; + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Get pending bit */ + regval = (EXTI->PR & maskline); + if (regval != 0x00u) + { + /* Clear pending bit */ + EXTI->PR = maskline; + + /* Call callback */ + if (hexti->PendingCallback != NULL) + { + hexti->PendingCallback(); + } + } +} + +/** + * @brief Get interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be checked. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval 1 if interrupt is pending else 0. + */ +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* return 1 if bit is set else 0 */ + regval = ((EXTI->PR & maskline) >> linepos); + return regval; +} + +/** + * @brief Clear interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be clear. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval None. + */ +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Clear Pending bit */ + EXTI->PR = maskline; +} + +/** + * @brief Generate a software interrupt for a dedicated line. + * @param hexti Exti handle. + * @retval None. + */ +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti) +{ + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Generate Software interrupt */ + EXTI->SWIER = maskline; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_EXTI_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c new file mode 100644 index 000000000..9cb569e55 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c @@ -0,0 +1,817 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash.c + * @author MCD Application Team + * @brief FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the internal FLASH memory: + * + Program operations functions + * + Memory Control functions + * + Peripheral Errors functions + * + @verbatim + ============================================================================== + ##### FLASH peripheral features ##### + ============================================================================== + + [..] The Flash memory interface manages CPU AHB I-Code and D-Code accesses + to the Flash memory. It implements the erase and program Flash memory operations + and the read and write protection mechanisms. + + [..] The Flash memory interface accelerates code execution with a system of instruction + prefetch and cache lines. + + [..] The FLASH main features are: + (+) Flash memory read operations + (+) Flash memory program/erase operations + (+) Read / write protections + (+) Prefetch on I-Code + (+) 64 cache lines of 128 bits on I-Code + (+) 8 cache lines of 128 bits on D-Code + + ##### How to use this driver ##### + ============================================================================== + [..] + This driver provides functions and macros to configure and program the FLASH + memory of all STM32F7xx devices. + + (#) FLASH Memory IO Programming functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Program functions: byte, half word, word and double word + (++) There Two modes of programming : + (+++) Polling mode using HAL_FLASH_Program() function + (+++) Interrupt mode using HAL_FLASH_Program_IT() function + + (#) Interrupts and flags management functions : + (++) Handle FLASH interrupts by calling HAL_FLASH_IRQHandler() + (++) Wait for last FLASH operation according to its status + (++) Get error flag status by calling HAL_SetErrorCode() + [..] + In addition to these functions, this driver includes a set of macros allowing + to handle the following operations: + (+) Set the latency + (+) Enable/Disable the prefetch buffer + (+) Enable/Disable the Instruction cache and the Data cache + (+) Reset the Instruction cache and the Data cache + (+) Enable/Disable the FLASH interrupts + (+) Monitor the FLASH flags status + [..] + (@) For any Flash memory program operation (erase or program), the CPU clock frequency + (HCLK) must be at least 1MHz. + (@) The contents of the Flash memory are not guaranteed if a device reset occurs during + a Flash memory operation. + (@) Any attempt to read the Flash memory while it is being written or erased, causes the + bus to stall. Read operations are processed correctly once the program operation has + completed. This means that code or data fetches cannot be performed while a write/erase + operation is ongoing. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASH FLASH + * @brief FLASH HAL module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Constants + * @{ + */ +#define SECTOR_MASK ((uint32_t)0xFFFFFF07U) +#define FLASH_TIMEOUT_VALUE ((uint32_t)50000U)/* 50 s */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Variables + * @{ + */ +/* Variable used for Erase sectors under interruption */ +FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASH_Private_Functions + * @{ + */ +/* Program operations */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data); +static void FLASH_Program_Word(uint32_t Address, uint32_t Data); +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data); +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data); +static void FLASH_SetErrorCode(void); + +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Functions FLASH Exported Functions + * @{ + */ + +/** @defgroup FLASH_Exported_Functions_Group1 Programming operation functions + * @brief Programming operation functions + * +@verbatim + =============================================================================== + ##### Programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the FLASH + program operations. + +@endverbatim + * @{ + */ + +/** + * @brief Program byte, halfword, word or double word at a specified address + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL_StatusTypeDef HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + switch(TypeProgram) + { + case FLASH_TYPEPROGRAM_BYTE : + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_HALFWORD : + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_WORD : + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_DOUBLEWORD : + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + break; + } + default : + break; + } + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the program operation is completed, disable the PG Bit */ + FLASH->CR &= (~FLASH_CR_PG); + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Program byte, halfword, word or double word at a specified address with interrupt enabled. + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR |\ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR| FLASH_FLAG_ERSERR); + + pFlash.ProcedureOnGoing = FLASH_PROC_PROGRAM; + pFlash.Address = Address; + + switch(TypeProgram) + { + case FLASH_TYPEPROGRAM_BYTE : + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_HALFWORD : + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_WORD : + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_DOUBLEWORD : + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + break; + } + default : + break; + } + return status; +} + +/** + * @brief This function handles FLASH interrupt request. + * @retval None + */ +void HAL_FLASH_IRQHandler(void) +{ + uint32_t temp = 0; + + /* If the program operation is completed, disable the PG Bit */ + FLASH->CR &= (~FLASH_CR_PG); + + /* If the erase operation is completed, disable the SER Bit */ + FLASH->CR &= (~FLASH_CR_SER); + FLASH->CR &= SECTOR_MASK; + + /* if the erase operation is completed, disable the MER Bit */ + FLASH->CR &= (~FLASH_MER_BIT); + + /* Check FLASH End of Operation flag */ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + + switch (pFlash.ProcedureOnGoing) + { + case FLASH_PROC_SECTERASE : + { + /* Nb of sector to erased can be decreased */ + pFlash.NbSectorsToErase--; + + /* Check if there are still sectors to erase */ + if(pFlash.NbSectorsToErase != 0) + { + temp = pFlash.Sector; + /* Indicate user which sector has been erased */ + HAL_FLASH_EndOfOperationCallback(temp); + + /* Increment sector number */ + temp = ++pFlash.Sector; + FLASH_Erase_Sector(temp, pFlash.VoltageForErase); + } + else + { + /* No more sectors to Erase, user callback can be called.*/ + /* Reset Sector and stop Erase sectors procedure */ + pFlash.Sector = temp = 0xFFFFFFFFU; + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(temp); + /* Sector Erase procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + break; + } + + case FLASH_PROC_MASSERASE : + { + /* MassErase ended. Return the selected bank : in this product we don't have Banks */ + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(0); + /* MAss Erase procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + break; + } + + case FLASH_PROC_PROGRAM : + { + /*Program ended. Return the selected address*/ + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(pFlash.Address); + /* Programming procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + break; + } + default : + break; + } + } + + /* Check FLASH operation error flags */ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ALL_ERRORS) != RESET) + { + switch (pFlash.ProcedureOnGoing) + { + case FLASH_PROC_SECTERASE : + { + /* return the faulty sector */ + temp = pFlash.Sector; + pFlash.Sector = 0xFFFFFFFFU; + break; + } + case FLASH_PROC_MASSERASE : + { + /* No return in case of Mass Erase */ + temp = 0; + break; + } + case FLASH_PROC_PROGRAM : + { + /*return the faulty address*/ + temp = pFlash.Address; + break; + } + default : + break; + } + /*Save the Error code*/ + FLASH_SetErrorCode(); + + /* FLASH error interrupt user callback */ + HAL_FLASH_OperationErrorCallback(temp); + + /*Stop the procedure ongoing */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + + if(pFlash.ProcedureOnGoing == FLASH_PROC_NONE) + { + /* Disable End of FLASH Operation interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_EOP); + + /* Disable Error source interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_ERR); + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + } + +} + +/** + * @brief FLASH end of operation interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * - Sectors Erase: Sector which has been erased (if 0xFFFFFFFF, it means that + * all the selected sectors have been erased) + * - Program : Address which was selected for data program + * - Mass Erase : No return value expected + * @retval None + */ +__weak void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_EndOfOperationCallback could be implemented in the user file + */ +} + +/** + * @brief FLASH operation error interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * - Sectors Erase: Sector which has been erased (if 0xFFFFFFFF, it means that + * all the selected sectors have been erased) + * - Program : Address which was selected for data program + * - Mass Erase : No return value expected + * @retval None + */ +__weak void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_OperationErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group2 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the FLASH + memory operations. + +@endverbatim + * @{ + */ + +/** + * @brief Unlock the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Unlock(void) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + /* Authorize the FLASH Registers access */ + WRITE_REG(FLASH->KEYR, FLASH_KEY1); + WRITE_REG(FLASH->KEYR, FLASH_KEY2); + + /* Verify Flash is unlocked */ + if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + status = HAL_ERROR; + } + } + + return status; +} + +/** + * @brief Locks the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Lock(void) +{ + /* Set the LOCK Bit to lock the FLASH Registers access */ + FLASH->CR |= FLASH_CR_LOCK; + + return HAL_OK; +} + +/** + * @brief Unlock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void) +{ + if((FLASH->OPTCR & FLASH_OPTCR_OPTLOCK) != RESET) + { + /* Authorizes the Option Byte register programming */ + FLASH->OPTKEYR = FLASH_OPT_KEY1; + FLASH->OPTKEYR = FLASH_OPT_KEY2; + } + else + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Lock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void) +{ + /* Set the OPTLOCK Bit to lock the FLASH Option Byte Registers access */ + FLASH->OPTCR |= FLASH_OPTCR_OPTLOCK; + + return HAL_OK; +} + +/** + * @brief Launch the option byte loading. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void) +{ + /* Set the OPTSTRT bit in OPTCR register */ + FLASH->OPTCR |= FLASH_OPTCR_OPTSTRT; + + /* Wait for last operation to be completed */ + return(FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE)); +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief Peripheral Errors functions + * +@verbatim + =============================================================================== + ##### Peripheral Errors functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time Errors of the FLASH peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Get the specific FLASH error flag. + * @retval FLASH_ErrorCode: The returned value can be: + * @arg FLASH_ERROR_ERS: FLASH Erasing Sequence error flag + * @arg FLASH_ERROR_PGP: FLASH Programming Parallelism error flag + * @arg FLASH_ERROR_PGA: FLASH Programming Alignment error flag + * @arg FLASH_ERROR_WRP: FLASH Write protected error flag + * @arg FLASH_ERROR_OPERATION: FLASH operation Error flag + */ +uint32_t HAL_FLASH_GetError(void) +{ + return pFlash.ErrorCode; +} + +/** + * @} + */ + +/** + * @brief Wait for a FLASH operation to complete. + * @param Timeout maximum flash operationtimeout + * @retval HAL Status + */ +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Clear Error Code */ + pFlash.ErrorCode = HAL_FLASH_ERROR_NONE; + + /* Wait for the FLASH operation to complete by polling on BUSY flag to be reset. + Even if the FLASH operation fails, the BUSY flag will be reset and an error + flag will be set */ + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_FLASH_GET_FLAG(FLASH_FLAG_BSY) != RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + return HAL_TIMEOUT; + } + } + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ALL_ERRORS) != RESET) + { + /*Save the error code*/ + FLASH_SetErrorCode(); + return HAL_ERROR; + } + + /* Check FLASH End of Operation flag */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + } + + /* If there is an error flag set */ + return HAL_OK; + +} + +/** + * @brief Program a double word (64-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V and an External Vpp is present. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_DOUBLE_WORD; + FLASH->CR |= FLASH_CR_PG; + + /* Program the double-word */ + *(__IO uint32_t*)Address = (uint32_t)Data; + *(__IO uint32_t*)(Address+4) = (uint32_t)(Data >> 32); + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + + +/** + * @brief Program word (32-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Word(uint32_t Address, uint32_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint32_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Program a half-word (16-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_HALF_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint16_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); + +} + +/** + * @brief Program byte (8-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_BYTE; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint8_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Set the specific FLASH error flag. + * @retval None + */ +static void FLASH_SetErrorCode(void) +{ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_OPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_OPERATION; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_WRPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_WRP; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGAERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGA; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGP; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ERSERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_ERS; + } + +#if defined (FLASH_OPTCR2_PCROP) + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_RDERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_RD; + } +#endif /* FLASH_OPTCR2_PCROP */ + + /* Clear error programming flags */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_ALL_ERRORS); +} + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c new file mode 100644 index 000000000..835e275a5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c @@ -0,0 +1,1122 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash_ex.c + * @author MCD Application Team + * @brief Extended FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the FLASH extension peripheral: + * + Extended programming operations functions + * + @verbatim + ============================================================================== + ##### Flash Extension features ##### + ============================================================================== + + [..] Comparing to other previous devices, the FLASH interface for STM32F76xx/STM32F77xx + devices contains the following additional features + + (+) Capacity up to 2 Mbyte with dual bank architecture supporting read-while-write + capability (RWW) + (+) Dual bank memory organization + (+) Dual boot mode + + ##### How to use this driver ##### + ============================================================================== + [..] This driver provides functions to configure and program the FLASH memory + of all STM32F7xx devices. It includes + (#) FLASH Memory Erase functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Erase function: Erase sector, erase all sectors + (++) There are two modes of erase : + (+++) Polling Mode using HAL_FLASHEx_Erase() + (+++) Interrupt Mode using HAL_FLASHEx_Erase_IT() + + (#) Option Bytes Programming functions: Use HAL_FLASHEx_OBProgram() to : + (++) Set/Reset the write protection + (++) Set the Read protection Level + (++) Set the BOR level + (++) Program the user Option Bytes + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASHEx FLASHEx + * @brief FLASH HAL Extension module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Constants + * @{ + */ +#define SECTOR_MASK 0xFFFFFF07U +#define FLASH_TIMEOUT_VALUE 50000U/* 50 s */ +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Variables + * @{ + */ +extern FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Functions + * @{ + */ +/* Option bytes control */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector); +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector); +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level); +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level); +static HAL_StatusTypeDef FLASH_OB_BootAddressConfig(uint32_t BootOption, uint32_t Address); +static uint32_t FLASH_OB_GetUser(void); +static uint32_t FLASH_OB_GetWRP(void); +static uint8_t FLASH_OB_GetRDP(void); +static uint32_t FLASH_OB_GetBOR(void); +static uint32_t FLASH_OB_GetBootAddress(uint32_t BootOption); + +#if defined (FLASH_OPTCR_nDBANK) +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, \ + uint32_t Iwdgstdby, uint32_t NDBank, uint32_t NDBoot); +#else +static void FLASH_MassErase(uint8_t VoltageRange); +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, uint32_t Iwdgstdby); +#endif /* FLASH_OPTCR_nDBANK */ + +#if defined (FLASH_OPTCR2_PCROP) +static HAL_StatusTypeDef FLASH_OB_PCROP_Config(uint32_t PCROPSector); +static HAL_StatusTypeDef FLASH_OB_PCROP_RDP_Config(uint32_t Pcrop_Rdp); +static uint32_t FLASH_OB_GetPCROP(void); +static uint32_t FLASH_OB_GetPCROPRDP(void); +#endif /* FLASH_OPTCR2_PCROP */ + +extern HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Functions FLASHEx Exported Functions + * @{ + */ + +/** @defgroup FLASHEx_Exported_Functions_Group1 Extended IO operation functions + * @brief Extended IO operation functions + * +@verbatim + =============================================================================== + ##### Extended programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the Extension FLASH + programming operations Operations. + +@endverbatim + * @{ + */ +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors + * @param[in] pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @param[out] SectorError pointer to variable that + * contains the configuration information on faulty sector in case of error + * (0xFFFFFFFF means that all the sectors have been correctly erased) + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError) +{ + HAL_StatusTypeDef status = HAL_ERROR; + uint32_t index = 0; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /*Initialization of SectorError variable*/ + *SectorError = 0xFFFFFFFFU; + + if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ +#if defined (FLASH_OPTCR_nDBANK) + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); +#else + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange); +#endif /* FLASH_OPTCR_nDBANK */ + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* if the erase operation is completed, disable the MER Bit */ + FLASH->CR &= (~FLASH_MER_BIT); + } + else + { + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + /* Erase by sector by sector to be done*/ + for(index = pEraseInit->Sector; index < (pEraseInit->NbSectors + pEraseInit->Sector); index++) + { + FLASH_Erase_Sector(index, (uint8_t) pEraseInit->VoltageRange); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the erase operation is completed, disable the SER Bit and SNB Bits */ + CLEAR_BIT(FLASH->CR, (FLASH_CR_SER | FLASH_CR_SNB)); + + if(status != HAL_OK) + { + /* In case of error, stop erase procedure and return the faulty sector*/ + *SectorError = index; + break; + } + } + } + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors with interrupt enabled + * @param pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR |\ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR| FLASH_FLAG_ERSERR); + + if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ + pFlash.ProcedureOnGoing = FLASH_PROC_MASSERASE; +#if defined (FLASH_OPTCR_nDBANK) + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); +#else + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange); +#endif /* FLASH_OPTCR_nDBANK */ + } + else + { + /* Erase by sector to be done*/ + + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + pFlash.ProcedureOnGoing = FLASH_PROC_SECTERASE; + pFlash.NbSectorsToErase = pEraseInit->NbSectors; + pFlash.Sector = pEraseInit->Sector; + pFlash.VoltageForErase = (uint8_t)pEraseInit->VoltageRange; + + /*Erase 1st sector and wait for IT*/ + FLASH_Erase_Sector(pEraseInit->Sector, pEraseInit->VoltageRange); + } + + return status; +} + +/** + * @brief Program option bytes + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_OPTIONBYTE(pOBInit->OptionType)); + + /* Write protection configuration */ + if((pOBInit->OptionType & OPTIONBYTE_WRP) == OPTIONBYTE_WRP) + { + assert_param(IS_WRPSTATE(pOBInit->WRPState)); + if(pOBInit->WRPState == OB_WRPSTATE_ENABLE) + { + /*Enable of Write protection on the selected Sector*/ + status = FLASH_OB_EnableWRP(pOBInit->WRPSector); + } + else + { + /*Disable of Write protection on the selected Sector*/ + status = FLASH_OB_DisableWRP(pOBInit->WRPSector); + } + } + + /* Read protection configuration */ + if((pOBInit->OptionType & OPTIONBYTE_RDP) == OPTIONBYTE_RDP) + { + status = FLASH_OB_RDP_LevelConfig(pOBInit->RDPLevel); + } + + /* USER configuration */ + if((pOBInit->OptionType & OPTIONBYTE_USER) == OPTIONBYTE_USER) + { +#if defined (FLASH_OPTCR_nDBANK) + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_WWDG_SW, + pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST, + pOBInit->USERConfig & OB_IWDG_STOP_ACTIVE, + pOBInit->USERConfig & OB_IWDG_STDBY_ACTIVE, + pOBInit->USERConfig & OB_NDBANK_SINGLE_BANK, + pOBInit->USERConfig & OB_DUAL_BOOT_DISABLE); +#else + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_WWDG_SW, + pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST, + pOBInit->USERConfig & OB_IWDG_STOP_ACTIVE, + pOBInit->USERConfig & OB_IWDG_STDBY_ACTIVE); +#endif /* FLASH_OPTCR_nDBANK */ + } + + /* BOR Level configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOR) == OPTIONBYTE_BOR) + { + status = FLASH_OB_BOR_LevelConfig(pOBInit->BORLevel); + } + + /* Boot 0 Address configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOOTADDR_0) == OPTIONBYTE_BOOTADDR_0) + { + status = FLASH_OB_BootAddressConfig(OPTIONBYTE_BOOTADDR_0, pOBInit->BootAddr0); + } + + /* Boot 1 Address configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOOTADDR_1) == OPTIONBYTE_BOOTADDR_1) + { + status = FLASH_OB_BootAddressConfig(OPTIONBYTE_BOOTADDR_1, pOBInit->BootAddr1); + } + +#if defined (FLASH_OPTCR2_PCROP) + /* PCROP configuration */ + if((pOBInit->OptionType & OPTIONBYTE_PCROP) == OPTIONBYTE_PCROP) + { + status = FLASH_OB_PCROP_Config(pOBInit->PCROPSector); + } + + /* PCROP_RDP configuration */ + if((pOBInit->OptionType & OPTIONBYTE_PCROP_RDP) == OPTIONBYTE_PCROP_RDP) + { + status = FLASH_OB_PCROP_RDP_Config(pOBInit->PCROPRdp); + } +#endif /* FLASH_OPTCR2_PCROP */ + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Get the Option byte configuration + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval None + */ +void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit) +{ + pOBInit->OptionType = OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1; + + /*Get WRP*/ + pOBInit->WRPSector = FLASH_OB_GetWRP(); + + /*Get RDP Level*/ + pOBInit->RDPLevel = FLASH_OB_GetRDP(); + + /*Get USER*/ + pOBInit->USERConfig = FLASH_OB_GetUser(); + + /*Get BOR Level*/ + pOBInit->BORLevel = FLASH_OB_GetBOR(); + + /*Get Boot Address when Boot pin = 0 */ + pOBInit->BootAddr0 = FLASH_OB_GetBootAddress(OPTIONBYTE_BOOTADDR_0); + + /*Get Boot Address when Boot pin = 1 */ + pOBInit->BootAddr1 = FLASH_OB_GetBootAddress(OPTIONBYTE_BOOTADDR_1); + +#if defined (FLASH_OPTCR2_PCROP) + /*Get PCROP Sectors */ + pOBInit->PCROPSector = FLASH_OB_GetPCROP(); + + /*Get PCROP_RDP Value */ + pOBInit->PCROPRdp = FLASH_OB_GetPCROPRDP(); +#endif /* FLASH_OPTCR2_PCROP */ +} +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBANK) +/** + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * @param Banks Banks to be erased + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: Bank1 to be erased + * @arg FLASH_BANK_2: Bank2 to be erased + * @arg FLASH_BANK_BOTH: Bank1 and Bank2 to be erased + * + * @retval HAL Status + */ +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + assert_param(IS_FLASH_BANK(Banks)); + + /* if the previous operation is completed, proceed to erase all sectors */ + FLASH->CR &= CR_PSIZE_MASK; + if(Banks == FLASH_BANK_BOTH) + { + /* bank1 & bank2 will be erased*/ + FLASH->CR |= FLASH_MER_BIT; + } + else if(Banks == FLASH_BANK_2) + { + /*Only bank2 will be erased*/ + FLASH->CR |= FLASH_CR_MER2; + } + else + { + /*Only bank1 will be erased*/ + FLASH->CR |= FLASH_CR_MER1; + } + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8); + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* Need to add offset of 4 when sector higher than FLASH_SECTOR_11 */ + if(Sector > FLASH_SECTOR_11) + { + Sector += 4; + } + + /* If the previous operation is completed, proceed to erase the sector */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= tmp_psize; + CLEAR_BIT(FLASH->CR, FLASH_CR_SNB); + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Return the FLASH Write Protection Option Bytes value. + * @retval uint32_t FLASH Write Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetWRP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR & 0x0FFF0000)); +} + +/** + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @param Wwdg Selects the IWDG mode + * This parameter can be one of the following values: + * @arg OB_WWDG_SW: Software WWDG selected + * @arg OB_WWDG_HW: Hardware WWDG selected + * @param Iwdg Selects the WWDG mode + * This parameter can be one of the following values: + * @arg OB_IWDG_SW: Software IWDG selected + * @arg OB_IWDG_HW: Hardware IWDG selected + * @param Stop Reset event when entering STOP mode. + * This parameter can be one of the following values: + * @arg OB_STOP_NO_RST: No reset generated when entering in STOP + * @arg OB_STOP_RST: Reset generated when entering in STOP + * @param Stdby Reset event when entering Standby mode. + * This parameter can be one of the following values: + * @arg OB_STDBY_NO_RST: No reset generated when entering in STANDBY + * @arg OB_STDBY_RST: Reset generated when entering in STANDBY + * @param Iwdgstop Independent watchdog counter freeze in Stop mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STOP_FREEZE: Freeze IWDG counter in STOP + * @arg OB_IWDG_STOP_ACTIVE: IWDG counter active in STOP + * @param Iwdgstdby Independent watchdog counter freeze in standby mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STDBY_FREEZE: Freeze IWDG counter in STANDBY + * @arg OB_IWDG_STDBY_ACTIVE: IWDG counter active in STANDBY + * @param NDBank Flash Single Bank mode enabled. + * This parameter can be one of the following values: + * @arg OB_NDBANK_SINGLE_BANK: enable 256 bits mode (Flash is a single bank) + * @arg OB_NDBANK_DUAL_BANK: disable 256 bits mode (Flash is a dual bank in 128 bits mode) + * @param NDBoot Flash Dual boot mode disable. + * This parameter can be one of the following values: + * @arg OB_DUAL_BOOT_DISABLE: Disable Dual Boot + * @arg OB_DUAL_BOOT_ENABLE: Enable Dual Boot + + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, \ + uint32_t Iwdgstdby, uint32_t NDBank, uint32_t NDBoot) +{ + uint32_t useroptionmask = 0x00; + uint32_t useroptionvalue = 0x00; + + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WWDG_SOURCE(Wwdg)); + assert_param(IS_OB_IWDG_SOURCE(Iwdg)); + assert_param(IS_OB_STOP_SOURCE(Stop)); + assert_param(IS_OB_STDBY_SOURCE(Stdby)); + assert_param(IS_OB_IWDG_STOP_FREEZE(Iwdgstop)); + assert_param(IS_OB_IWDG_STDBY_FREEZE(Iwdgstdby)); + assert_param(IS_OB_NDBANK(NDBank)); + assert_param(IS_OB_NDBOOT(NDBoot)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + useroptionmask = (FLASH_OPTCR_WWDG_SW | FLASH_OPTCR_IWDG_SW | FLASH_OPTCR_nRST_STOP | \ + FLASH_OPTCR_nRST_STDBY | FLASH_OPTCR_IWDG_STOP | FLASH_OPTCR_IWDG_STDBY | \ + FLASH_OPTCR_nDBOOT | FLASH_OPTCR_nDBANK); + + useroptionvalue = (Iwdg | Wwdg | Stop | Stdby | Iwdgstop | Iwdgstdby | NDBoot | NDBank); + + /* Update User Option Byte */ + MODIFY_REG(FLASH->OPTCR, useroptionmask, useroptionvalue); + } + + return status; +} + +/** + * @brief Return the FLASH User Option Byte value. + * @retval uint32_t FLASH User Option Bytes values: WWDG_SW(Bit4), IWDG_SW(Bit5), nRST_STOP(Bit6), + * nRST_STDBY(Bit7), nDBOOT(Bit28), nDBANK(Bit29), IWDG_STDBY(Bit30) and IWDG_STOP(Bit31). + */ +static uint32_t FLASH_OB_GetUser(void) +{ + /* Return the User Option Byte */ + return ((uint32_t)(FLASH->OPTCR & 0xF00000F0U)); +} +#else + +/** + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval HAL Status + */ +static void FLASH_MassErase(uint8_t VoltageRange) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + /* if the previous operation is completed, proceed to erase all sectors */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_CR_MER; + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8); + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* If the previous operation is completed, proceed to erase the sector */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= tmp_psize; + FLASH->CR &= SECTOR_MASK; + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Return the FLASH Write Protection Option Bytes value. + * @retval uint32_t FLASH Write Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetWRP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR & 0x00FF0000)); +} + +/** + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @param Wwdg Selects the IWDG mode + * This parameter can be one of the following values: + * @arg OB_WWDG_SW: Software WWDG selected + * @arg OB_WWDG_HW: Hardware WWDG selected + * @param Iwdg Selects the WWDG mode + * This parameter can be one of the following values: + * @arg OB_IWDG_SW: Software IWDG selected + * @arg OB_IWDG_HW: Hardware IWDG selected + * @param Stop Reset event when entering STOP mode. + * This parameter can be one of the following values: + * @arg OB_STOP_NO_RST: No reset generated when entering in STOP + * @arg OB_STOP_RST: Reset generated when entering in STOP + * @param Stdby Reset event when entering Standby mode. + * This parameter can be one of the following values: + * @arg OB_STDBY_NO_RST: No reset generated when entering in STANDBY + * @arg OB_STDBY_RST: Reset generated when entering in STANDBY + * @param Iwdgstop Independent watchdog counter freeze in Stop mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STOP_FREEZE: Freeze IWDG counter in STOP + * @arg OB_IWDG_STOP_ACTIVE: IWDG counter active in STOP + * @param Iwdgstdby Independent watchdog counter freeze in standby mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STDBY_FREEZE: Freeze IWDG counter in STANDBY + * @arg OB_IWDG_STDBY_ACTIVE: IWDG counter active in STANDBY + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, uint32_t Iwdgstdby) +{ + uint32_t useroptionmask = 0x00; + uint32_t useroptionvalue = 0x00; + + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WWDG_SOURCE(Wwdg)); + assert_param(IS_OB_IWDG_SOURCE(Iwdg)); + assert_param(IS_OB_STOP_SOURCE(Stop)); + assert_param(IS_OB_STDBY_SOURCE(Stdby)); + assert_param(IS_OB_IWDG_STOP_FREEZE(Iwdgstop)); + assert_param(IS_OB_IWDG_STDBY_FREEZE(Iwdgstdby)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + useroptionmask = (FLASH_OPTCR_WWDG_SW | FLASH_OPTCR_IWDG_SW | FLASH_OPTCR_nRST_STOP | \ + FLASH_OPTCR_nRST_STDBY | FLASH_OPTCR_IWDG_STOP | FLASH_OPTCR_IWDG_STDBY); + + useroptionvalue = (Iwdg | Wwdg | Stop | Stdby | Iwdgstop | Iwdgstdby); + + /* Update User Option Byte */ + MODIFY_REG(FLASH->OPTCR, useroptionmask, useroptionvalue); + } + + return status; + +} + +/** + * @brief Return the FLASH User Option Byte value. + * @retval uint32_t FLASH User Option Bytes values: WWDG_SW(Bit4), IWDG_SW(Bit5), nRST_STOP(Bit6), + * nRST_STDBY(Bit7), IWDG_STDBY(Bit30) and IWDG_STOP(Bit31). + */ +static uint32_t FLASH_OB_GetUser(void) +{ + /* Return the User Option Byte */ + return ((uint32_t)(FLASH->OPTCR & 0xC00000F0U)); +} +#endif /* FLASH_OPTCR_nDBANK */ + +/** + * @brief Enable the write protection of the desired bank1 or bank2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM7 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_7 (for STM32F74xxx/STM32F75xxx devices) + * or a value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_11 (in Single Bank mode for STM32F76xxx/STM32F77xxx devices) + * or a value between OB_WRP_DB_SECTOR_0 and OB_WRP_DB_SECTOR_23 (in Dual Bank mode for STM32F76xxx/STM32F77xxx devices) + * @arg OB_WRP_SECTOR_All + * + * @retval HAL FLASH State + */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /*Write protection enabled on sectors */ + FLASH->OPTCR &= (~WRPSector); + } + + return status; +} + +/** + * @brief Disable the write protection of the desired bank1 or bank 2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_7 (for STM32F74xxx/STM32F75xxx devices) + * or a value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_11 (in Single Bank mode for STM32F76xxx/STM32F77xxx devices) + * or a value between OB_WRP_DB_SECTOR_0 and OB_WRP_DB_SECTOR_23 (in Dual Bank mode for STM32F76xxx/STM32F77xxx devices) + * @arg OB_WRP_Sector_All + * + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /* Write protection disabled on sectors */ + FLASH->OPTCR |= (WRPSector); + } + + return status; +} + +/** + * @brief Set the read protection level. + * @param Level specifies the read protection level. + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + * + * @note WARNING: When enabling OB_RDP level 2 it's no more possible to go back to level 1 or 0 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_RDP_LEVEL(Level)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + *(__IO uint8_t*)OPTCR_BYTE1_ADDRESS = Level; + } + + return status; +} + +/** + * @brief Set the BOR Level. + * @param Level specifies the Option Bytes BOR Reset Level. + * This parameter can be one of the following values: + * @arg OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * @arg OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * @arg OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * @arg OB_BOR_OFF: Supply voltage ranges from 1.62 to 2.1 V + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level) +{ + /* Check the parameters */ + assert_param(IS_OB_BOR_LEVEL(Level)); + + /* Set the BOR Level */ + MODIFY_REG(FLASH->OPTCR, FLASH_OPTCR_BOR_LEV, Level); + + return HAL_OK; + +} + +/** + * @brief Configure Boot base address. + * + * @param BootOption specifies Boot base address depending from Boot pin = 0 or pin = 1 + * This parameter can be one of the following values: + * @arg OPTIONBYTE_BOOTADDR_0 : Boot address based when Boot pin = 0 + * @arg OPTIONBYTE_BOOTADDR_1 : Boot address based when Boot pin = 1 + * @param Address specifies Boot base address + * This parameter can be one of the following values: + * @arg OB_BOOTADDR_ITCM_RAM : Boot from ITCM RAM (0x00000000) + * @arg OB_BOOTADDR_SYSTEM : Boot from System memory bootloader (0x00100000) + * @arg OB_BOOTADDR_ITCM_FLASH : Boot from Flash on ITCM interface (0x00200000) + * @arg OB_BOOTADDR_AXIM_FLASH : Boot from Flash on AXIM interface (0x08000000) + * @arg OB_BOOTADDR_DTCM_RAM : Boot from DTCM RAM (0x20000000) + * @arg OB_BOOTADDR_SRAM1 : Boot from SRAM1 (0x20010000) + * @arg OB_BOOTADDR_SRAM2 : Boot from SRAM2 (0x2004C000) + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_BootAddressConfig(uint32_t BootOption, uint32_t Address) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_BOOT_ADDRESS(Address)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + if(BootOption == OPTIONBYTE_BOOTADDR_0) + { + MODIFY_REG(FLASH->OPTCR1, FLASH_OPTCR1_BOOT_ADD0, Address); + } + else + { + MODIFY_REG(FLASH->OPTCR1, FLASH_OPTCR1_BOOT_ADD1, (Address << 16)); + } + } + + return status; +} + +/** + * @brief Returns the FLASH Read Protection level. + * @retval FlagStatus FLASH ReadOut Protection Status: + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + */ +static uint8_t FLASH_OB_GetRDP(void) +{ + uint8_t readstatus = OB_RDP_LEVEL_0; + + if ((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS)) == OB_RDP_LEVEL_0) + { + readstatus = OB_RDP_LEVEL_0; + } + else if ((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS)) == OB_RDP_LEVEL_2) + { + readstatus = OB_RDP_LEVEL_2; + } + else + { + readstatus = OB_RDP_LEVEL_1; + } + + return readstatus; +} + +/** + * @brief Returns the FLASH BOR level. + * @retval uint32_t The FLASH BOR level: + * - OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * - OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * - OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * - OB_BOR_OFF : Supply voltage ranges from 1.62 to 2.1 V + */ +static uint32_t FLASH_OB_GetBOR(void) +{ + /* Return the FLASH BOR level */ + return ((uint32_t)(FLASH->OPTCR & 0x0C)); +} + +/** + * @brief Configure Boot base address. + * + * @param BootOption specifies Boot base address depending from Boot pin = 0 or pin = 1 + * This parameter can be one of the following values: + * @arg OPTIONBYTE_BOOTADDR_0 : Boot address based when Boot pin = 0 + * @arg OPTIONBYTE_BOOTADDR_1 : Boot address based when Boot pin = 1 + * + * @retval uint32_t Boot Base Address: + * - OB_BOOTADDR_ITCM_RAM : Boot from ITCM RAM (0x00000000) + * - OB_BOOTADDR_SYSTEM : Boot from System memory bootloader (0x00100000) + * - OB_BOOTADDR_ITCM_FLASH : Boot from Flash on ITCM interface (0x00200000) + * - OB_BOOTADDR_AXIM_FLASH : Boot from Flash on AXIM interface (0x08000000) + * - OB_BOOTADDR_DTCM_RAM : Boot from DTCM RAM (0x20000000) + * - OB_BOOTADDR_SRAM1 : Boot from SRAM1 (0x20010000) + * - OB_BOOTADDR_SRAM2 : Boot from SRAM2 (0x2004C000) + */ +static uint32_t FLASH_OB_GetBootAddress(uint32_t BootOption) +{ + uint32_t Address = 0; + + /* Return the Boot base Address */ + if(BootOption == OPTIONBYTE_BOOTADDR_0) + { + Address = FLASH->OPTCR1 & FLASH_OPTCR1_BOOT_ADD0; + } + else + { + Address = ((FLASH->OPTCR1 & FLASH_OPTCR1_BOOT_ADD1) >> 16); + } + + return Address; +} + +#if defined (FLASH_OPTCR2_PCROP) +/** + * @brief Set the PCROP protection for sectors. + * @param PCROPSector specifies the sector(s) to be PCROP protected. + * This parameter can be one of the following values: + * @arg OB_PCROP_SECTOR_x: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_7 + * @arg OB_PCROP_SECTOR_ALL + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_PCROP_Config(uint32_t PCROPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP_SECTOR(PCROPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + MODIFY_REG(FLASH->OPTCR2, FLASH_OPTCR2_PCROP, PCROPSector); + } + + return status; +} + +/** + * @brief Set the PCROP_RDP value + * @param Pcrop_Rdp specifies the PCROP_RDP bit value. + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_PCROP_RDP_Config(uint32_t Pcrop_Rdp) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP_RDP_VALUE(Pcrop_Rdp)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + MODIFY_REG(FLASH->OPTCR2, FLASH_OPTCR2_PCROP_RDP, Pcrop_Rdp); + } + + return status; +} + +/** + * @brief Return the FLASH PCROP Protection Option Bytes value. + * @retval uint32_t FLASH PCROP Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetPCROP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR2 & FLASH_OPTCR2_PCROP)); +} + +/** + * @brief Return the FLASH PCROP_RDP option byte value. + * @retval uint32_t FLASH PCROP_RDP option byte value + */ +static uint32_t FLASH_OB_GetPCROPRDP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR2 & FLASH_OPTCR2_PCROP_RDP)); +} +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c new file mode 100644 index 000000000..75e4b5b1b --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c @@ -0,0 +1,532 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio.c + * @author MCD Application Team + * @brief GPIO HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the General Purpose Input/Output (GPIO) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + @verbatim + ============================================================================== + ##### GPIO Peripheral features ##### + ============================================================================== + [..] + Subject to the specific hardware characteristics of each I/O port listed in the datasheet, each + port bit of the General Purpose IO (GPIO) Ports, can be individually configured by software + in several modes: + (+) Input mode + (+) Analog mode + (+) Output mode + (+) Alternate function mode + (+) External interrupt/event lines + + [..] + During and just after reset, the alternate functions and external interrupt + lines are not active and the I/O ports are configured in input floating mode. + + [..] + All GPIO pins have weak internal pull-up and pull-down resistors, which can be + activated or not. + + [..] + In Output or Alternate mode, each IO can be configured on open-drain or push-pull + type and the IO speed can be selected depending on the VDD value. + + [..] + All ports have external interrupt/event capability. To use external interrupt + lines, the port must be configured in input mode. All available GPIO pins are + connected to the 16 external interrupt/event lines from EXTI0 to EXTI15. + + [..] + The external interrupt/event controller consists of up to 23 edge detectors + (16 lines are connected to GPIO) for generating event/interrupt requests (each + input line can be independently configured to select the type (interrupt or event) + and the corresponding trigger event (rising or falling or both). Each line can + also be masked independently. + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable the GPIO AHB clock using the following function: __HAL_RCC_GPIOx_CLK_ENABLE(). + + (#) Configure the GPIO pin(s) using HAL_GPIO_Init(). + (++) Configure the IO mode using "Mode" member from GPIO_InitTypeDef structure + (++) Activate Pull-up, Pull-down resistor using "Pull" member from GPIO_InitTypeDef + structure. + (++) In case of Output or alternate function mode selection: the speed is + configured through "Speed" member from GPIO_InitTypeDef structure. + (++) In alternate mode is selection, the alternate function connected to the IO + is configured through "Alternate" member from GPIO_InitTypeDef structure. + (++) Analog mode is required when a pin is to be used as ADC channel + or DAC output. + (++) In case of external interrupt/event selection the "Mode" member from + GPIO_InitTypeDef structure select the type (interrupt or event) and + the corresponding trigger event (rising or falling or both). + + (#) In case of external interrupt/event mode selection, configure NVIC IRQ priority + mapped to the EXTI line using HAL_NVIC_SetPriority() and enable it using + HAL_NVIC_EnableIRQ(). + + (#) To get the level of a pin configured in input mode use HAL_GPIO_ReadPin(). + + (#) To set/reset the level of a pin configured in output mode use + HAL_GPIO_WritePin()/HAL_GPIO_TogglePin(). + + (#) To lock pin configuration until next reset use HAL_GPIO_LockPin(). + + + (#) During and just after reset, the alternate functions are not + active and the GPIO pins are configured in input floating mode (except JTAG + pins). + + (#) The LSE oscillator pins OSC32_IN and OSC32_OUT can be used as general purpose + (PC14 and PC15, respectively) when the LSE oscillator is off. The LSE has + priority over the GPIO function. + + (#) The HSE oscillator pins OSC_IN/OSC_OUT can be used as + general purpose PH0 and PH1, respectively, when the HSE oscillator is off. + The HSE has priority over the GPIO function. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIO GPIO + * @brief GPIO HAL module driver + * @{ + */ + +#ifdef HAL_GPIO_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ +#define GPIO_MODE ((uint32_t)0x00000003U) +#define EXTI_MODE ((uint32_t)0x10000000U) +#define GPIO_MODE_IT ((uint32_t)0x00010000U) +#define GPIO_MODE_EVT ((uint32_t)0x00020000U) +#define RISING_EDGE ((uint32_t)0x00100000U) +#define FALLING_EDGE ((uint32_t)0x00200000U) +#define GPIO_OUTPUT_TYPE ((uint32_t)0x00000010U) + +#define GPIO_NUMBER ((uint32_t)16U) +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Functions GPIO Exported Functions + * @{ + */ + +/** @defgroup GPIO_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize and de-initialize the GPIOs + to be ready for use. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the GPIOx peripheral according to the specified parameters in the GPIO_Init. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Init pointer to a GPIO_InitTypeDef structure that contains + * the configuration information for the specified GPIO peripheral. + * @retval None + */ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) +{ + uint32_t position = 0x00; + uint32_t ioposition = 0x00; + uint32_t iocurrent = 0x00; + uint32_t temp = 0x00; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + assert_param(IS_GPIO_PIN(GPIO_Init->Pin)); + assert_param(IS_GPIO_MODE(GPIO_Init->Mode)); + assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); + + /* Configure the port pins */ + for(position = 0; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = ((uint32_t)0x01) << position; + /* Get the current IO position */ + iocurrent = (uint32_t)(GPIO_Init->Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*--------------------- GPIO Mode Configuration ------------------------*/ + /* In case of Output or Alternate function mode selection */ + if((GPIO_Init->Mode == GPIO_MODE_OUTPUT_PP) || (GPIO_Init->Mode == GPIO_MODE_AF_PP) || + (GPIO_Init->Mode == GPIO_MODE_OUTPUT_OD) || (GPIO_Init->Mode == GPIO_MODE_AF_OD)) + { + /* Check the Speed parameter */ + assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); + /* Configure the IO Speed */ + temp = GPIOx->OSPEEDR; + temp &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2)); + temp |= (GPIO_Init->Speed << (position * 2)); + GPIOx->OSPEEDR = temp; + + /* Configure the IO Output Type */ + temp = GPIOx->OTYPER; + temp &= ~(GPIO_OTYPER_OT_0 << position) ; + temp |= (((GPIO_Init->Mode & GPIO_OUTPUT_TYPE) >> 4) << position); + GPIOx->OTYPER = temp; + } + + /* Activate the Pull-up or Pull down resistor for the current IO */ + temp = GPIOx->PUPDR; + temp &= ~(GPIO_PUPDR_PUPDR0 << (position * 2)); + temp |= ((GPIO_Init->Pull) << (position * 2)); + GPIOx->PUPDR = temp; + + /* In case of Alternate function mode selection */ + if((GPIO_Init->Mode == GPIO_MODE_AF_PP) || (GPIO_Init->Mode == GPIO_MODE_AF_OD)) + { + /* Check the Alternate function parameter */ + assert_param(IS_GPIO_AF(GPIO_Init->Alternate)); + + /* Configure Alternate function mapped with the current IO */ + temp = GPIOx->AFR[position >> 3]; + temp &= ~((uint32_t)0xF << ((uint32_t)(position & (uint32_t)0x07) * 4)) ; + temp |= ((uint32_t)(GPIO_Init->Alternate) << (((uint32_t)position & (uint32_t)0x07) * 4)); + GPIOx->AFR[position >> 3] = temp; + } + + /* Configure IO Direction mode (Input, Output, Alternate or Analog) */ + temp = GPIOx->MODER; + temp &= ~(GPIO_MODER_MODER0 << (position * 2)); + temp |= ((GPIO_Init->Mode & GPIO_MODE) << (position * 2)); + GPIOx->MODER = temp; + + /*--------------------- EXTI Mode Configuration ------------------------*/ + /* Configure the External Interrupt or event for the current IO */ + if((GPIO_Init->Mode & EXTI_MODE) == EXTI_MODE) + { + /* Enable SYSCFG Clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + temp = SYSCFG->EXTICR[position >> 2]; + temp &= ~(((uint32_t)0x0F) << (4 * (position & 0x03))); + temp |= ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4 * (position & 0x03))); + SYSCFG->EXTICR[position >> 2] = temp; + + /* Clear EXTI line configuration */ + temp = EXTI->IMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & GPIO_MODE_IT) == GPIO_MODE_IT) + { + temp |= iocurrent; + } + EXTI->IMR = temp; + + temp = EXTI->EMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & GPIO_MODE_EVT) == GPIO_MODE_EVT) + { + temp |= iocurrent; + } + EXTI->EMR = temp; + + /* Clear Rising Falling edge configuration */ + temp = EXTI->RTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & RISING_EDGE) == RISING_EDGE) + { + temp |= iocurrent; + } + EXTI->RTSR = temp; + + temp = EXTI->FTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & FALLING_EDGE) == FALLING_EDGE) + { + temp |= iocurrent; + } + EXTI->FTSR = temp; + } + } + } +} + +/** + * @brief De-initializes the GPIOx peripheral registers to their default reset values. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin) +{ + uint32_t position; + uint32_t ioposition = 0x00; + uint32_t iocurrent = 0x00; + uint32_t tmp = 0x00; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + + /* Configure the port pins */ + for(position = 0; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = ((uint32_t)0x01) << position; + /* Get the current IO position */ + iocurrent = (GPIO_Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*------------------------- EXTI Mode Configuration --------------------*/ + tmp = SYSCFG->EXTICR[position >> 2]; + tmp &= (((uint32_t)0x0F) << (4 * (position & 0x03))); + if(tmp == ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4 * (position & 0x03)))) + { + /* Clear EXTI line configuration */ + EXTI->IMR &= ~((uint32_t)iocurrent); + EXTI->EMR &= ~((uint32_t)iocurrent); + + /* Clear Rising Falling edge configuration */ + EXTI->RTSR &= ~((uint32_t)iocurrent); + EXTI->FTSR &= ~((uint32_t)iocurrent); + + /* Configure the External Interrupt or event for the current IO */ + tmp = ((uint32_t)0x0F) << (4 * (position & 0x03)); + SYSCFG->EXTICR[position >> 2] &= ~tmp; + } + /*------------------------- GPIO Mode Configuration --------------------*/ + /* Configure IO Direction in Input Floating Mode */ + GPIOx->MODER &= ~(GPIO_MODER_MODER0 << (position * 2)); + + /* Configure the default Alternate Function in current IO */ + GPIOx->AFR[position >> 3] &= ~((uint32_t)0xF << ((uint32_t)(position & (uint32_t)0x07) * 4)) ; + + /* Deactivate the Pull-up and Pull-down resistor for the current IO */ + GPIOx->PUPDR &= ~(GPIO_PUPDR_PUPDR0 << (position * 2)); + + /* Configure the default value IO Output Type */ + GPIOx->OTYPER &= ~(GPIO_OTYPER_OT_0 << position) ; + + /* Configure the default value for IO Speed */ + GPIOx->OSPEEDR &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2)); + } + } +} + +/** + * @} + */ + +/** @defgroup GPIO_Exported_Functions_Group2 IO operation functions + * @brief GPIO Read and Write + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Reads the specified input port pin. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to read. + * This parameter can be GPIO_PIN_x where x can be (0..15). + * @retval The input port pin value. + */ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + GPIO_PinState bitstatus; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + if((GPIOx->IDR & GPIO_Pin) != (uint32_t)GPIO_PIN_RESET) + { + bitstatus = GPIO_PIN_SET; + } + else + { + bitstatus = GPIO_PIN_RESET; + } + return bitstatus; +} + +/** + * @brief Sets or clears the selected data port bit. + * + * @note This function uses GPIOx_BSRR register to allow atomic read/modify + * accesses. In this way, there is no risk of an IRQ occurring between + * the read and the modify access. + * + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @param PinState specifies the value to be written to the selected bit. + * This parameter can be one of the GPIO_PinState enum values: + * @arg GPIO_PIN_RESET: to clear the port pin + * @arg GPIO_PIN_SET: to set the port pin + * @retval None + */ +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState) +{ + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + assert_param(IS_GPIO_PIN_ACTION(PinState)); + + if(PinState != GPIO_PIN_RESET) + { + GPIOx->BSRR = GPIO_Pin; + } + else + { + GPIOx->BSRR = (uint32_t)GPIO_Pin << 16; + } +} + +/** + * @brief Toggles the specified GPIO pins. + * @param GPIOx Where x can be (A..I) to select the GPIO peripheral. + * @param GPIO_Pin Specifies the pins to be toggled. + * @retval None + */ +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + if ((GPIOx->ODR & GPIO_Pin) != 0X00u) + { + GPIOx->BSRR = (uint32_t)GPIO_Pin << GPIO_NUMBER; + } + else + { + GPIOx->BSRR = (uint32_t)GPIO_Pin; + } +} + +/** + * @brief Locks GPIO Pins configuration registers. + * @note The locked registers are GPIOx_MODER, GPIOx_OTYPER, GPIOx_OSPEEDR, + * GPIOx_PUPDR, GPIOx_AFRL and GPIOx_AFRH. + * @note The configuration of the locked GPIO pins can no longer be modified + * until the next reset. + * @param GPIOx where x can be (A..F) to select the GPIO peripheral for STM32F7 family + * @param GPIO_Pin specifies the port bit to be locked. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + __IO uint32_t tmp = GPIO_LCKR_LCKK; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + /* Apply lock key write sequence */ + tmp |= GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Reset LCKx bit(s): LCKK='0' + LCK[15-0] */ + GPIOx->LCKR = GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Read LCKR register. This read is mandatory to complete key lock sequence */ + tmp = GPIOx->LCKR; + + /* Read again in order to confirm lock is active */ + if((GPIOx->LCKR & GPIO_LCKR_LCKK) != RESET) + { + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief This function handles EXTI interrupt request. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin) +{ + /* EXTI line interrupt detected */ + if(__HAL_GPIO_EXTI_GET_IT(GPIO_Pin) != RESET) + { + __HAL_GPIO_EXTI_CLEAR_IT(GPIO_Pin); + HAL_GPIO_EXTI_Callback(GPIO_Pin); + } +} + +/** + * @brief EXTI line detection callbacks. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +__weak void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(GPIO_Pin); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_GPIO_EXTI_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + + +/** + * @} + */ + +#endif /* HAL_GPIO_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c new file mode 100644 index 000000000..4b3cee4d7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c @@ -0,0 +1,6522 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c.c + * @author MCD Application Team + * @brief I2C HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Inter Integrated Circuit (I2C) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral State and Errors functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The I2C HAL driver can be used as follows: + + (#) Declare a I2C_HandleTypeDef handle structure, for example: + I2C_HandleTypeDef hi2c; + + (#)Initialize the I2C low level resources by implementing the @ref HAL_I2C_MspInit() API: + (##) Enable the I2Cx interface clock + (##) I2C pins configuration + (+++) Enable the clock for the I2C GPIOs + (+++) Configure I2C pins as alternate function open-drain + (##) NVIC configuration if you need to use interrupt process + (+++) Configure the I2Cx interrupt priority + (+++) Enable the NVIC I2C IRQ Channel + (##) DMA Configuration if you need to use DMA process + (+++) Declare a DMA_HandleTypeDef handle structure for the transmit or receive stream + (+++) Enable the DMAx interface clock using + (+++) Configure the DMA handle parameters + (+++) Configure the DMA Tx or Rx stream + (+++) Associate the initialized DMA handle to the hi2c DMA Tx or Rx handle + (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on + the DMA Tx or Rx stream + + (#) Configure the Communication Clock Timing, Own Address1, Master Addressing mode, Dual Addressing mode, + Own Address2, Own Address2 Mask, General call and Nostretch mode in the hi2c Init structure. + + (#) Initialize the I2C registers by calling the @ref HAL_I2C_Init(), configures also the low level Hardware + (GPIO, CLOCK, NVIC...etc) by calling the customized @ref HAL_I2C_MspInit(&hi2c) API. + + (#) To check if target device is ready for communication, use the function @ref HAL_I2C_IsDeviceReady() + + (#) For I2C IO and IO MEM operations, three operation modes are available within this driver : + + *** Polling mode IO operation *** + ================================= + [..] + (+) Transmit in master mode an amount of data in blocking mode using @ref HAL_I2C_Master_Transmit() + (+) Receive in master mode an amount of data in blocking mode using @ref HAL_I2C_Master_Receive() + (+) Transmit in slave mode an amount of data in blocking mode using @ref HAL_I2C_Slave_Transmit() + (+) Receive in slave mode an amount of data in blocking mode using @ref HAL_I2C_Slave_Receive() + + *** Polling mode IO MEM operation *** + ===================================== + [..] + (+) Write an amount of data in blocking mode to a specific memory address using @ref HAL_I2C_Mem_Write() + (+) Read an amount of data in blocking mode from a specific memory address using @ref HAL_I2C_Mem_Read() + + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Transmit in master mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Transmit_IT() + (+) At transmission end of transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() + (+) Receive in master mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Receive_IT() + (+) At reception end of transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() + (+) Transmit in slave mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Transmit_IT() + (+) At transmission end of transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() + (+) Receive in slave mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Receive_IT() + (+) At reception end of transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + (+) Abort a master I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() + (+) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() + (+) Discard a slave I2C process communication using @ref __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + + *** Interrupt mode or DMA mode IO sequential operation *** + ========================================================== + [..] + (@) These interfaces allow to manage a sequential transfer with a repeated start condition + when a direction change during transfer + [..] + (+) A specific option field manage the different steps of a sequential transfer + (+) Option field values are defined through @ref I2C_XFEROPTIONS and are listed below: + (++) I2C_FIRST_AND_LAST_FRAME: No sequential usage, functionnal is same as associated interfaces in no sequential mode + (++) I2C_FIRST_FRAME: Sequential usage, this option allow to manage a sequence with start condition, address + and data to transfer without a final stop condition + (++) I2C_FIRST_AND_NEXT_FRAME: Sequential usage (Master only), this option allow to manage a sequence with start condition, address + and data to transfer without a final stop condition, an then permit a call the same master sequential interface + several times (like @ref HAL_I2C_Master_Seq_Transmit_IT() then @ref HAL_I2C_Master_Seq_Transmit_IT() + or @ref HAL_I2C_Master_Seq_Transmit_DMA() then @ref HAL_I2C_Master_Seq_Transmit_DMA()) + (++) I2C_NEXT_FRAME: Sequential usage, this option allow to manage a sequence with a restart condition, address + and with new data to transfer if the direction change or manage only the new data to transfer + if no direction change and without a final stop condition in both cases + (++) I2C_LAST_FRAME: Sequential usage, this option allow to manage a sequance with a restart condition, address + and with new data to transfer if the direction change or manage only the new data to transfer + if no direction change and with a final stop condition in both cases + (++) I2C_LAST_FRAME_NO_STOP: Sequential usage (Master only), this option allow to manage a restart condition after several call of the same master sequential + interface several times (link with option I2C_FIRST_AND_NEXT_FRAME). + Usage can, transfer several bytes one by one using HAL_I2C_Master_Seq_Transmit_IT(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME) + or HAL_I2C_Master_Seq_Receive_IT(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME) + or HAL_I2C_Master_Seq_Transmit_DMA(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME) + or HAL_I2C_Master_Seq_Receive_DMA(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME). + Then usage of this option I2C_LAST_FRAME_NO_STOP at the last Transmit or Receive sequence permit to call the oposite interface Receive or Transmit + without stopping the communication and so generate a restart condition. + (++) I2C_OTHER_FRAME: Sequential usage (Master only), this option allow to manage a restart condition after each call of the same master sequential + interface. + Usage can, transfer several bytes one by one with a restart with slave address between each bytes using HAL_I2C_Master_Seq_Transmit_IT(option I2C_FIRST_FRAME then I2C_OTHER_FRAME) + or HAL_I2C_Master_Seq_Receive_IT(option I2C_FIRST_FRAME then I2C_OTHER_FRAME) + or HAL_I2C_Master_Seq_Transmit_DMA(option I2C_FIRST_FRAME then I2C_OTHER_FRAME) + or HAL_I2C_Master_Seq_Receive_DMA(option I2C_FIRST_FRAME then I2C_OTHER_FRAME). + Then usage of this option I2C_OTHER_AND_LAST_FRAME at the last frame to help automatic generation of STOP condition. + + (+) Differents sequential I2C interfaces are listed below: + (++) Sequential transmit in master I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Seq_Transmit_IT() + or using @ref HAL_I2C_Master_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() + (++) Sequential receive in master I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Seq_Receive_IT() + or using @ref HAL_I2C_Master_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() + (++) Abort a master IT or DMA I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() + (+++) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() + (++) Enable/disable the Address listen mode in slave I2C mode using @ref HAL_I2C_EnableListen_IT() @ref HAL_I2C_DisableListen_IT() + (+++) When address slave I2C match, @ref HAL_I2C_AddrCallback() is executed and user can + add his own code to check the Address Match Code and the transmission direction request by master (Write/Read). + (+++) At Listen mode end @ref HAL_I2C_ListenCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ListenCpltCallback() + (++) Sequential transmit in slave I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Seq_Transmit_IT() + or using @ref HAL_I2C_Slave_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() + (++) Sequential receive in slave I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Seq_Receive_IT() + or using @ref HAL_I2C_Slave_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() + (++) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + (++) Discard a slave I2C process communication using @ref __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + *** Interrupt mode IO MEM operation *** + ======================================= + [..] + (+) Write an amount of data in non-blocking mode with Interrupt to a specific memory address using + @ref HAL_I2C_Mem_Write_IT() + (+) At Memory end of write transfer, @ref HAL_I2C_MemTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MemTxCpltCallback() + (+) Read an amount of data in non-blocking mode with Interrupt from a specific memory address using + @ref HAL_I2C_Mem_Read_IT() + (+) At Memory end of read transfer, @ref HAL_I2C_MemRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + + *** DMA mode IO operation *** + ============================== + [..] + (+) Transmit in master mode an amount of data in non-blocking mode (DMA) using + @ref HAL_I2C_Master_Transmit_DMA() + (+) At transmission end of transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() + (+) Receive in master mode an amount of data in non-blocking mode (DMA) using + @ref HAL_I2C_Master_Receive_DMA() + (+) At reception end of transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() + (+) Transmit in slave mode an amount of data in non-blocking mode (DMA) using + @ref HAL_I2C_Slave_Transmit_DMA() + (+) At transmission end of transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() + (+) Receive in slave mode an amount of data in non-blocking mode (DMA) using + @ref HAL_I2C_Slave_Receive_DMA() + (+) At reception end of transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + (+) Abort a master I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() + (+) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() + (+) Discard a slave I2C process communication using @ref __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + *** DMA mode IO MEM operation *** + ================================= + [..] + (+) Write an amount of data in non-blocking mode with DMA to a specific memory address using + @ref HAL_I2C_Mem_Write_DMA() + (+) At Memory end of write transfer, @ref HAL_I2C_MemTxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MemTxCpltCallback() + (+) Read an amount of data in non-blocking mode with DMA from a specific memory address using + @ref HAL_I2C_Mem_Read_DMA() + (+) At Memory end of read transfer, @ref HAL_I2C_MemRxCpltCallback() is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + + + *** I2C HAL driver macros list *** + ================================== + [..] + Below the list of most used macros in I2C HAL driver. + + (+) @ref __HAL_I2C_ENABLE: Enable the I2C peripheral + (+) @ref __HAL_I2C_DISABLE: Disable the I2C peripheral + (+) @ref __HAL_I2C_GENERATE_NACK: Generate a Non-Acknowledge I2C peripheral in Slave mode + (+) @ref __HAL_I2C_GET_FLAG: Check whether the specified I2C flag is set or not + (+) @ref __HAL_I2C_CLEAR_FLAG: Clear the specified I2C pending flag + (+) @ref __HAL_I2C_ENABLE_IT: Enable the specified I2C interrupt + (+) @ref __HAL_I2C_DISABLE_IT: Disable the specified I2C interrupt + + *** Callback registration *** + ============================================= + [..] + The compilation flag USE_HAL_I2C_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Functions @ref HAL_I2C_RegisterCallback() or @ref HAL_I2C_RegisterAddrCallback() + to register an interrupt callback. + [..] + Function @ref HAL_I2C_RegisterCallback() allows to register following callbacks: + (+) MasterTxCpltCallback : callback for Master transmission end of transfer. + (+) MasterRxCpltCallback : callback for Master reception end of transfer. + (+) SlaveTxCpltCallback : callback for Slave transmission end of transfer. + (+) SlaveRxCpltCallback : callback for Slave reception end of transfer. + (+) ListenCpltCallback : callback for end of listen mode. + (+) MemTxCpltCallback : callback for Memory transmission end of transfer. + (+) MemRxCpltCallback : callback for Memory reception end of transfer. + (+) ErrorCallback : callback for error detection. + (+) AbortCpltCallback : callback for abort completion process. + (+) MspInitCallback : callback for Msp Init. + (+) MspDeInitCallback : callback for Msp DeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + [..] + For specific callback AddrCallback use dedicated register callbacks : @ref HAL_I2C_RegisterAddrCallback(). + [..] + Use function @ref HAL_I2C_UnRegisterCallback to reset a callback to the default + weak function. + @ref HAL_I2C_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) MasterTxCpltCallback : callback for Master transmission end of transfer. + (+) MasterRxCpltCallback : callback for Master reception end of transfer. + (+) SlaveTxCpltCallback : callback for Slave transmission end of transfer. + (+) SlaveRxCpltCallback : callback for Slave reception end of transfer. + (+) ListenCpltCallback : callback for end of listen mode. + (+) MemTxCpltCallback : callback for Memory transmission end of transfer. + (+) MemRxCpltCallback : callback for Memory reception end of transfer. + (+) ErrorCallback : callback for error detection. + (+) AbortCpltCallback : callback for abort completion process. + (+) MspInitCallback : callback for Msp Init. + (+) MspDeInitCallback : callback for Msp DeInit. + [..] + For callback AddrCallback use dedicated register callbacks : @ref HAL_I2C_UnRegisterAddrCallback(). + [..] + By default, after the @ref HAL_I2C_Init() and when the state is @ref HAL_I2C_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_I2C_MasterTxCpltCallback(), @ref HAL_I2C_MasterRxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak functions in the @ref HAL_I2C_Init()/ @ref HAL_I2C_DeInit() only when + these callbacks are null (not registered beforehand). + If MspInit or MspDeInit are not null, the @ref HAL_I2C_Init()/ @ref HAL_I2C_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. + [..] + Callbacks can be registered/unregistered in @ref HAL_I2C_STATE_READY state only. + Exception done MspInit/MspDeInit functions that can be registered/unregistered + in @ref HAL_I2C_STATE_READY or @ref HAL_I2C_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + Then, the user first registers the MspInit/MspDeInit user callbacks + using @ref HAL_I2C_RegisterCallback() before calling @ref HAL_I2C_DeInit() + or @ref HAL_I2C_Init() function. + [..] + When the compilation flag USE_HAL_I2C_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + [..] + (@) You can refer to the I2C HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup I2C I2C + * @brief I2C HAL module driver + * @{ + */ + +#ifdef HAL_I2C_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ + +/** @defgroup I2C_Private_Define I2C Private Define + * @{ + */ +#define TIMING_CLEAR_MASK (0xF0FFFFFFU) /*!< I2C TIMING clear register Mask */ +#define I2C_TIMEOUT_ADDR (10000U) /*!< 10 s */ +#define I2C_TIMEOUT_BUSY (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_DIR (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_RXNE (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_STOPF (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TC (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TCR (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TXIS (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_FLAG (25U) /*!< 25 ms */ + +#define MAX_NBYTE_SIZE 255U +#define SlaveAddr_SHIFT 7U +#define SlaveAddr_MSK 0x06U + +/* Private define for @ref PreviousState usage */ +#define I2C_STATE_MSK ((uint32_t)((uint32_t)((uint32_t)HAL_I2C_STATE_BUSY_TX | (uint32_t)HAL_I2C_STATE_BUSY_RX) & (uint32_t)(~((uint32_t)HAL_I2C_STATE_READY)))) /*!< Mask State define, keep only RX and TX bits */ +#define I2C_STATE_NONE ((uint32_t)(HAL_I2C_MODE_NONE)) /*!< Default Value */ +#define I2C_STATE_MASTER_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_MASTER)) /*!< Master Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MASTER_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_MASTER)) /*!< Master Busy RX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_SLAVE_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_SLAVE)) /*!< Slave Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_SLAVE_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_SLAVE)) /*!< Slave Busy RX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MEM_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_MEM)) /*!< Memory Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MEM_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | (uint32_t)HAL_I2C_MODE_MEM)) /*!< Memory Busy RX, combinaison of State LSB and Mode enum */ + + +/* Private define to centralize the enable/disable of Interrupts */ +#define I2C_XFER_TX_IT (0x00000001U) +#define I2C_XFER_RX_IT (0x00000002U) +#define I2C_XFER_LISTEN_IT (0x00000004U) + +#define I2C_XFER_ERROR_IT (0x00000011U) +#define I2C_XFER_CPLT_IT (0x00000012U) +#define I2C_XFER_RELOAD_IT (0x00000012U) + +/* Private define Sequential Transfer Options default/reset value */ +#define I2C_NO_OPTION_FRAME (0xFFFF0000U) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ + +/** @defgroup I2C_Private_Functions I2C Private Functions + * @{ + */ +/* Private functions to handle DMA transfer */ +static void I2C_DMAMasterTransmitCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMAMasterReceiveCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMASlaveTransmitCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMASlaveReceiveCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMAError(DMA_HandleTypeDef *hdma); +static void I2C_DMAAbort(DMA_HandleTypeDef *hdma); + +/* Private functions to handle IT transfer */ +static void I2C_ITAddrCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITMasterSeqCplt(I2C_HandleTypeDef *hi2c); +static void I2C_ITSlaveSeqCplt(I2C_HandleTypeDef *hi2c); +static void I2C_ITMasterCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITSlaveCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITListenCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITError(I2C_HandleTypeDef *hi2c, uint32_t ErrorCode); + +/* Private functions to handle IT transfer */ +static HAL_StatusTypeDef I2C_RequestMemoryWrite(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_RequestMemoryRead(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, uint32_t Tickstart); + +/* Private functions for I2C transfer IRQ handler */ +static HAL_StatusTypeDef I2C_Master_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); +static HAL_StatusTypeDef I2C_Slave_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); +static HAL_StatusTypeDef I2C_Master_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); +static HAL_StatusTypeDef I2C_Slave_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); + +/* Private functions to handle flags during polling transfer */ +static HAL_StatusTypeDef I2C_WaitOnFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Flag, FlagStatus Status, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnTXISFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnRXNEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnSTOPFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_IsAcknowledgeFailed(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart); + +/* Private functions to centralize the enable/disable of Interrupts */ +static void I2C_Enable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest); +static void I2C_Disable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest); + +/* Private function to flush TXDR register */ +static void I2C_Flush_TXDR(I2C_HandleTypeDef *hi2c); + +/* Private function to handle start, restart or stop a transfer */ +static void I2C_TransferConfig(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t Size, uint32_t Mode, uint32_t Request); + +/* Private function to Convert Specific options */ +static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Functions I2C Exported Functions + * @{ + */ + +/** @defgroup I2C_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + deinitialize the I2Cx peripheral: + + (+) User must Implement HAL_I2C_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO, DMA, IT and NVIC ). + + (+) Call the function HAL_I2C_Init() to configure the selected device with + the selected configuration: + (++) Clock Timing + (++) Own Address 1 + (++) Addressing mode (Master, Slave) + (++) Dual Addressing mode + (++) Own Address 2 + (++) Own Address 2 Mask + (++) General call mode + (++) Nostretch mode + + (+) Call the function HAL_I2C_DeInit() to restore the default configuration + of the selected I2Cx peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the I2C according to the specified parameters + * in the I2C_InitTypeDef and initialize the associated handle. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Init(I2C_HandleTypeDef *hi2c) +{ + /* Check the I2C handle allocation */ + if (hi2c == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_OWN_ADDRESS1(hi2c->Init.OwnAddress1)); + assert_param(IS_I2C_ADDRESSING_MODE(hi2c->Init.AddressingMode)); + assert_param(IS_I2C_DUAL_ADDRESS(hi2c->Init.DualAddressMode)); + assert_param(IS_I2C_OWN_ADDRESS2(hi2c->Init.OwnAddress2)); + assert_param(IS_I2C_OWN_ADDRESS2_MASK(hi2c->Init.OwnAddress2Masks)); + assert_param(IS_I2C_GENERAL_CALL(hi2c->Init.GeneralCallMode)); + assert_param(IS_I2C_NO_STRETCH(hi2c->Init.NoStretchMode)); + + if (hi2c->State == HAL_I2C_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hi2c->Lock = HAL_UNLOCKED; + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + /* Init the I2C Callback settings */ + hi2c->MasterTxCpltCallback = HAL_I2C_MasterTxCpltCallback; /* Legacy weak MasterTxCpltCallback */ + hi2c->MasterRxCpltCallback = HAL_I2C_MasterRxCpltCallback; /* Legacy weak MasterRxCpltCallback */ + hi2c->SlaveTxCpltCallback = HAL_I2C_SlaveTxCpltCallback; /* Legacy weak SlaveTxCpltCallback */ + hi2c->SlaveRxCpltCallback = HAL_I2C_SlaveRxCpltCallback; /* Legacy weak SlaveRxCpltCallback */ + hi2c->ListenCpltCallback = HAL_I2C_ListenCpltCallback; /* Legacy weak ListenCpltCallback */ + hi2c->MemTxCpltCallback = HAL_I2C_MemTxCpltCallback; /* Legacy weak MemTxCpltCallback */ + hi2c->MemRxCpltCallback = HAL_I2C_MemRxCpltCallback; /* Legacy weak MemRxCpltCallback */ + hi2c->ErrorCallback = HAL_I2C_ErrorCallback; /* Legacy weak ErrorCallback */ + hi2c->AbortCpltCallback = HAL_I2C_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + hi2c->AddrCallback = HAL_I2C_AddrCallback; /* Legacy weak AddrCallback */ + + if (hi2c->MspInitCallback == NULL) + { + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + hi2c->MspInitCallback(hi2c); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + HAL_I2C_MspInit(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /*---------------------------- I2Cx TIMINGR Configuration ------------------*/ + /* Configure I2Cx: Frequency range */ + hi2c->Instance->TIMINGR = hi2c->Init.Timing & TIMING_CLEAR_MASK; + + /*---------------------------- I2Cx OAR1 Configuration ---------------------*/ + /* Disable Own Address1 before set the Own Address1 configuration */ + hi2c->Instance->OAR1 &= ~I2C_OAR1_OA1EN; + + /* Configure I2Cx: Own Address1 and ack own address1 mode */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_7BIT) + { + hi2c->Instance->OAR1 = (I2C_OAR1_OA1EN | hi2c->Init.OwnAddress1); + } + else /* I2C_ADDRESSINGMODE_10BIT */ + { + hi2c->Instance->OAR1 = (I2C_OAR1_OA1EN | I2C_OAR1_OA1MODE | hi2c->Init.OwnAddress1); + } + + /*---------------------------- I2Cx CR2 Configuration ----------------------*/ + /* Configure I2Cx: Addressing Master mode */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + hi2c->Instance->CR2 = (I2C_CR2_ADD10); + } + /* Enable the AUTOEND by default, and enable NACK (should be disable only during Slave process */ + hi2c->Instance->CR2 |= (I2C_CR2_AUTOEND | I2C_CR2_NACK); + + /*---------------------------- I2Cx OAR2 Configuration ---------------------*/ + /* Disable Own Address2 before set the Own Address2 configuration */ + hi2c->Instance->OAR2 &= ~I2C_DUALADDRESS_ENABLE; + + /* Configure I2Cx: Dual mode and Own Address2 */ + hi2c->Instance->OAR2 = (hi2c->Init.DualAddressMode | hi2c->Init.OwnAddress2 | (hi2c->Init.OwnAddress2Masks << 8)); + + /*---------------------------- I2Cx CR1 Configuration ----------------------*/ + /* Configure I2Cx: Generalcall and NoStretch mode */ + hi2c->Instance->CR1 = (hi2c->Init.GeneralCallMode | hi2c->Init.NoStretchMode); + + /* Enable the selected I2C peripheral */ + __HAL_I2C_ENABLE(hi2c); + + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->Mode = HAL_I2C_MODE_NONE; + + return HAL_OK; +} + +/** + * @brief DeInitialize the I2C peripheral. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_DeInit(I2C_HandleTypeDef *hi2c) +{ + /* Check the I2C handle allocation */ + if (hi2c == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the I2C Peripheral Clock */ + __HAL_I2C_DISABLE(hi2c); + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + if (hi2c->MspDeInitCallback == NULL) + { + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + hi2c->MspDeInitCallback(hi2c); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_I2C_MspDeInit(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + hi2c->State = HAL_I2C_STATE_RESET; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Initialize the I2C MSP. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MspInit(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitialize the I2C MSP. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MspDeInit(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User I2C Callback + * To be used instead of the weak predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_I2C_MASTER_TX_COMPLETE_CB_ID Master Tx Transfer completed callback ID + * @arg @ref HAL_I2C_MASTER_RX_COMPLETE_CB_ID Master Rx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_TX_COMPLETE_CB_ID Slave Tx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_RX_COMPLETE_CB_ID Slave Rx Transfer completed callback ID + * @arg @ref HAL_I2C_LISTEN_COMPLETE_CB_ID Listen Complete callback ID + * @arg @ref HAL_I2C_MEM_TX_COMPLETE_CB_ID Memory Tx Transfer callback ID + * @arg @ref HAL_I2C_MEM_RX_COMPLETE_CB_ID Memory Rx Transfer completed callback ID + * @arg @ref HAL_I2C_ERROR_CB_ID Error callback ID + * @arg @ref HAL_I2C_ABORT_CB_ID Abort callback ID + * @arg @ref HAL_I2C_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_I2C_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_RegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID, pI2C_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MASTER_TX_COMPLETE_CB_ID : + hi2c->MasterTxCpltCallback = pCallback; + break; + + case HAL_I2C_MASTER_RX_COMPLETE_CB_ID : + hi2c->MasterRxCpltCallback = pCallback; + break; + + case HAL_I2C_SLAVE_TX_COMPLETE_CB_ID : + hi2c->SlaveTxCpltCallback = pCallback; + break; + + case HAL_I2C_SLAVE_RX_COMPLETE_CB_ID : + hi2c->SlaveRxCpltCallback = pCallback; + break; + + case HAL_I2C_LISTEN_COMPLETE_CB_ID : + hi2c->ListenCpltCallback = pCallback; + break; + + case HAL_I2C_MEM_TX_COMPLETE_CB_ID : + hi2c->MemTxCpltCallback = pCallback; + break; + + case HAL_I2C_MEM_RX_COMPLETE_CB_ID : + hi2c->MemRxCpltCallback = pCallback; + break; + + case HAL_I2C_ERROR_CB_ID : + hi2c->ErrorCallback = pCallback; + break; + + case HAL_I2C_ABORT_CB_ID : + hi2c->AbortCpltCallback = pCallback; + break; + + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = pCallback; + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_I2C_STATE_RESET == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = pCallback; + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief Unregister an I2C Callback + * I2C callback is redirected to the weak predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * This parameter can be one of the following values: + * @arg @ref HAL_I2C_MASTER_TX_COMPLETE_CB_ID Master Tx Transfer completed callback ID + * @arg @ref HAL_I2C_MASTER_RX_COMPLETE_CB_ID Master Rx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_TX_COMPLETE_CB_ID Slave Tx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_RX_COMPLETE_CB_ID Slave Rx Transfer completed callback ID + * @arg @ref HAL_I2C_LISTEN_COMPLETE_CB_ID Listen Complete callback ID + * @arg @ref HAL_I2C_MEM_TX_COMPLETE_CB_ID Memory Tx Transfer callback ID + * @arg @ref HAL_I2C_MEM_RX_COMPLETE_CB_ID Memory Rx Transfer completed callback ID + * @arg @ref HAL_I2C_ERROR_CB_ID Error callback ID + * @arg @ref HAL_I2C_ABORT_CB_ID Abort callback ID + * @arg @ref HAL_I2C_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_I2C_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_UnRegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MASTER_TX_COMPLETE_CB_ID : + hi2c->MasterTxCpltCallback = HAL_I2C_MasterTxCpltCallback; /* Legacy weak MasterTxCpltCallback */ + break; + + case HAL_I2C_MASTER_RX_COMPLETE_CB_ID : + hi2c->MasterRxCpltCallback = HAL_I2C_MasterRxCpltCallback; /* Legacy weak MasterRxCpltCallback */ + break; + + case HAL_I2C_SLAVE_TX_COMPLETE_CB_ID : + hi2c->SlaveTxCpltCallback = HAL_I2C_SlaveTxCpltCallback; /* Legacy weak SlaveTxCpltCallback */ + break; + + case HAL_I2C_SLAVE_RX_COMPLETE_CB_ID : + hi2c->SlaveRxCpltCallback = HAL_I2C_SlaveRxCpltCallback; /* Legacy weak SlaveRxCpltCallback */ + break; + + case HAL_I2C_LISTEN_COMPLETE_CB_ID : + hi2c->ListenCpltCallback = HAL_I2C_ListenCpltCallback; /* Legacy weak ListenCpltCallback */ + break; + + case HAL_I2C_MEM_TX_COMPLETE_CB_ID : + hi2c->MemTxCpltCallback = HAL_I2C_MemTxCpltCallback; /* Legacy weak MemTxCpltCallback */ + break; + + case HAL_I2C_MEM_RX_COMPLETE_CB_ID : + hi2c->MemRxCpltCallback = HAL_I2C_MemRxCpltCallback; /* Legacy weak MemRxCpltCallback */ + break; + + case HAL_I2C_ERROR_CB_ID : + hi2c->ErrorCallback = HAL_I2C_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_I2C_ABORT_CB_ID : + hi2c->AbortCpltCallback = HAL_I2C_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_I2C_STATE_RESET == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief Register the Slave Address Match I2C Callback + * To be used instead of the weak HAL_I2C_AddrCallback() predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pCallback pointer to the Address Match Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_RegisterAddrCallback(I2C_HandleTypeDef *hi2c, pI2C_AddrCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + hi2c->AddrCallback = pCallback; + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief UnRegister the Slave Address Match I2C Callback + * Info Ready I2C Callback is redirected to the weak HAL_I2C_AddrCallback() predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_UnRegisterAddrCallback(I2C_HandleTypeDef *hi2c) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + hi2c->AddrCallback = HAL_I2C_AddrCallback; /* Legacy weak AddrCallback */ + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup I2C_Exported_Functions_Group2 Input and Output operation functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the I2C data + transfers. + + (#) There are two modes of transfer: + (++) Blocking mode : The communication is performed in the polling mode. + The status of all data processing is returned by the same function + after finishing transfer. + (++) No-Blocking mode : The communication is performed using Interrupts + or DMA. These functions return the status of the transfer startup. + The end of the data processing will be indicated through the + dedicated I2C IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + + (#) Blocking mode functions are : + (++) HAL_I2C_Master_Transmit() + (++) HAL_I2C_Master_Receive() + (++) HAL_I2C_Slave_Transmit() + (++) HAL_I2C_Slave_Receive() + (++) HAL_I2C_Mem_Write() + (++) HAL_I2C_Mem_Read() + (++) HAL_I2C_IsDeviceReady() + + (#) No-Blocking mode functions with Interrupt are : + (++) HAL_I2C_Master_Transmit_IT() + (++) HAL_I2C_Master_Receive_IT() + (++) HAL_I2C_Slave_Transmit_IT() + (++) HAL_I2C_Slave_Receive_IT() + (++) HAL_I2C_Mem_Write_IT() + (++) HAL_I2C_Mem_Read_IT() + (++) HAL_I2C_Master_Seq_Transmit_IT() + (++) HAL_I2C_Master_Seq_Receive_IT() + (++) HAL_I2C_Slave_Seq_Transmit_IT() + (++) HAL_I2C_Slave_Seq_Receive_IT() + (++) HAL_I2C_EnableListen_IT() + (++) HAL_I2C_DisableListen_IT() + (++) HAL_I2C_Master_Abort_IT() + + (#) No-Blocking mode functions with DMA are : + (++) HAL_I2C_Master_Transmit_DMA() + (++) HAL_I2C_Master_Receive_DMA() + (++) HAL_I2C_Slave_Transmit_DMA() + (++) HAL_I2C_Slave_Receive_DMA() + (++) HAL_I2C_Mem_Write_DMA() + (++) HAL_I2C_Mem_Read_DMA() + (++) HAL_I2C_Master_Seq_Transmit_DMA() + (++) HAL_I2C_Master_Seq_Receive_DMA() + (++) HAL_I2C_Slave_Seq_Transmit_DMA() + (++) HAL_I2C_Slave_Seq_Receive_DMA() + + (#) A set of Transfer Complete Callbacks are provided in non Blocking mode: + (++) HAL_I2C_MasterTxCpltCallback() + (++) HAL_I2C_MasterRxCpltCallback() + (++) HAL_I2C_SlaveTxCpltCallback() + (++) HAL_I2C_SlaveRxCpltCallback() + (++) HAL_I2C_MemTxCpltCallback() + (++) HAL_I2C_MemRxCpltCallback() + (++) HAL_I2C_AddrCallback() + (++) HAL_I2C_ListenCpltCallback() + (++) HAL_I2C_ErrorCallback() + (++) HAL_I2C_AbortCpltCallback() + +@endverbatim + * @{ + */ + +/** + * @brief Transmits in master mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_GENERATE_START_WRITE); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_WRITE); + } + + while (hi2c->XferCount > 0U) + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + } + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receives in master mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_GENERATE_START_READ); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_READ); + } + + while (hi2c->XferCount > 0U) + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnRXNEFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + } + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmits in slave mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* If 10bit addressing mode is selected */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Wait until DIR flag is set Transmitter mode */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_DIR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + while (hi2c->XferCount > 0U) + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + } + + /* Wait until STOP flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + if (hi2c->ErrorCode == HAL_I2C_ERROR_AF) + { + /* Normal use case for Transmitter mode */ + /* A NACK is generated to confirm the end of transfer */ + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + } + else + { + return HAL_ERROR; + } + } + + /* Clear STOP flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Wait until BUSY flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in blocking mode + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Wait until DIR flag is reset Receiver mode */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_DIR, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + while (hi2c->XferCount > 0U) + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnRXNEFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Store Last receive data if any */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == SET) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + } + + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + } + + /* Wait until STOP flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear STOP flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Wait until BUSY flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in master mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size) +{ + uint32_t xfermode; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in master mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size) +{ + uint32_t xfermode; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in slave mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in master mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size) +{ + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_WRITE); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to write and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in master mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size) +{ + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to read and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to read and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in slave mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMASlaveTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMASlaveReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in blocking mode to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, Timeout, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + + do + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + + } + while (hi2c->XferCount > 0U); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Read an amount of data in blocking mode from a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, Timeout, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_GENERATE_START_READ); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_READ); + } + + do + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_RXNE, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t) hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + } + while (hi2c->XferCount > 0U); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in non-blocking mode with Interrupt to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Read an amount of data in non-blocking mode with Interrupt from a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in non-blocking mode with DMA to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Reads an amount of data in non-blocking mode with DMA from a specific memory address. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be read + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Checks if target device is ready for communication. + * @note This function is used with Memory devices + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param Trials Number of trials + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_IsDeviceReady(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint32_t Trials, uint32_t Timeout) +{ + uint32_t tickstart; + + __IO uint32_t I2C_Trials = 0UL; + + FlagStatus tmp1; + FlagStatus tmp2; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + do + { + /* Generate Start */ + hi2c->Instance->CR2 = I2C_GENERATE_START(hi2c->Init.AddressingMode, DevAddress); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set or a NACK flag is set*/ + tickstart = HAL_GetTick(); + + tmp1 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF); + tmp2 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF); + + while ((tmp1 == RESET) && (tmp2 == RESET)) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + + tmp1 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF); + tmp2 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF); + } + + /* Check if the NACKF flag has not been set */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF) == RESET) + { + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Device is ready */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Clear STOP Flag, auto generated with autoend*/ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + } + + /* Check if the maximum allowed number of trials has been reached */ + if (I2C_Trials == Trials) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + } + + /* Increment Trials */ + I2C_Trials++; + } + while (I2C_Trials < Trials); + + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in master I2C mode an amount of data in non-blocking mode with Interrupt. + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_WRITE; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_IT; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_TX) && (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount < MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + /* Send Slave Address and set NBYTES to write */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in master I2C mode an amount of data in non-blocking mode with DMA. + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_WRITE; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_DMA; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_TX) && (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount < MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address and set NBYTES to write */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to write and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential receive in master I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_READ; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_IT; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_RX) && (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount < MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + /* Send Slave Address and set NBYTES to read */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential receive in master I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_READ; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_DMA; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_RX) && (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount < MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address and set NBYTES to read */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to read and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI| I2C_IT_STOPI| I2C_IT_NACKI | I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in slave/device I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave RX state to TX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Abort DMA Xfer if any */ + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_IT; + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_RECEIVE) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential transmit in slave/device I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave RX state to TX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + /* Abort DMA Xfer if any */ + if (hi2c->hdmarx != NULL) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + else + { + /* Nothing to do */ + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMASlaveTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Reset XferSize */ + hi2c->XferSize = 0; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_RECEIVE) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential receive in slave/device I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave TX state to RX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_IT; + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_TRANSMIT) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential receive in slave/device I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, uint32_t XferOptions) +{ + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave TX state to RX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + else + { + /* Nothing to do */ + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMASlaveReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Reset XferSize */ + hi2c->XferSize = 0; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_TRANSMIT) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Enable the Address listen mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_EnableListen_IT(I2C_HandleTypeDef *hi2c) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Enable the Address Match interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Disable the Address listen mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_DisableListen_IT(I2C_HandleTypeDef *hi2c) +{ + /* Declaration of tmp to prevent undefined behavior of volatile usage */ + uint32_t tmp; + + /* Disable Address listen mode only if a transfer is not ongoing */ + if (hi2c->State == HAL_I2C_STATE_LISTEN) + { + tmp = (uint32_t)(hi2c->State) & I2C_STATE_MSK; + hi2c->PreviousState = tmp | (uint32_t)(hi2c->Mode); + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + /* Disable the Address Match interrupt */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Abort a master I2C IT or DMA process communication with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Abort_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress) +{ + if (hi2c->Mode == HAL_I2C_MODE_MASTER) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + /* Set State at HAL_I2C_STATE_ABORT */ + hi2c->State = HAL_I2C_STATE_ABORT; + + /* Set NBYTES to 1 to generate a dummy read on I2C peripheral */ + /* Set AUTOEND mode, this will generate a NACK then STOP condition to abort the current transfer */ + I2C_TransferConfig(hi2c, DevAddress, 1, I2C_AUTOEND_MODE, I2C_GENERATE_STOP); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + + return HAL_OK; + } + else + { + /* Wrong usage of abort function */ + /* This function should be used only in case of abort monitored by master device */ + return HAL_ERROR; + } +} + +/** + * @} + */ + +/** @defgroup I2C_IRQ_Handler_and_Callbacks IRQ Handler and Callbacks + * @{ + */ + +/** + * @brief This function handles I2C event interrupt request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +void HAL_I2C_EV_IRQHandler(I2C_HandleTypeDef *hi2c) +{ + /* Get current IT Flags and IT sources value */ + uint32_t itflags = READ_REG(hi2c->Instance->ISR); + uint32_t itsources = READ_REG(hi2c->Instance->CR1); + + /* I2C events treatment -------------------------------------*/ + if (hi2c->XferISR != NULL) + { + hi2c->XferISR(hi2c, itflags, itsources); + } +} + +/** + * @brief This function handles I2C error interrupt request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +void HAL_I2C_ER_IRQHandler(I2C_HandleTypeDef *hi2c) +{ + uint32_t itflags = READ_REG(hi2c->Instance->ISR); + uint32_t itsources = READ_REG(hi2c->Instance->CR1); + uint32_t tmperror; + + /* I2C Bus error interrupt occurred ------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_BERR) != RESET) && (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_BERR; + + /* Clear BERR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_BERR); + } + + /* I2C Over-Run/Under-Run interrupt occurred ----------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_OVR) != RESET) && (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_OVR; + + /* Clear OVR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_OVR); + } + + /* I2C Arbitration Loss error interrupt occurred -------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_ARLO) != RESET) && (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_ARLO; + + /* Clear ARLO flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ARLO); + } + + /* Store current volatile hi2c->ErrorCode, misra rule */ + tmperror = hi2c->ErrorCode; + + /* Call the Error Callback in case of Error detected */ + if ((tmperror & (HAL_I2C_ERROR_BERR | HAL_I2C_ERROR_OVR | HAL_I2C_ERROR_ARLO)) != HAL_I2C_ERROR_NONE) + { + I2C_ITError(hi2c, tmperror); + } +} + +/** + * @brief Master Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MasterTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MasterTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Master Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MasterRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MasterRxCpltCallback could be implemented in the user file + */ +} + +/** @brief Slave Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_SlaveTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_SlaveTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Slave Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_SlaveRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_SlaveRxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Slave Address Match callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param TransferDirection Master request Transfer Direction (Write/Read), value of @ref I2C_XFERDIRECTION + * @param AddrMatchCode Address Match Code + * @retval None + */ +__weak void HAL_I2C_AddrCallback(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + UNUSED(TransferDirection); + UNUSED(AddrMatchCode); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_AddrCallback() could be implemented in the user file + */ +} + +/** + * @brief Listen Complete callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_ListenCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_ListenCpltCallback() could be implemented in the user file + */ +} + +/** + * @brief Memory Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MemTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MemTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Memory Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MemRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MemRxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief I2C error callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_ErrorCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief I2C abort callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_AbortCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_AbortCpltCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup I2C_Exported_Functions_Group3 Peripheral State, Mode and Error functions + * @brief Peripheral State, Mode and Error functions + * +@verbatim + =============================================================================== + ##### Peripheral State, Mode and Error functions ##### + =============================================================================== + [..] + This subsection permit to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the I2C handle state. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL state + */ +HAL_I2C_StateTypeDef HAL_I2C_GetState(I2C_HandleTypeDef *hi2c) +{ + /* Return I2C handle state */ + return hi2c->State; +} + +/** + * @brief Returns the I2C Master, Slave, Memory or no mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for I2C module + * @retval HAL mode + */ +HAL_I2C_ModeTypeDef HAL_I2C_GetMode(I2C_HandleTypeDef *hi2c) +{ + return hi2c->Mode; +} + +/** +* @brief Return the I2C error code. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. +* @retval I2C Error Code +*/ +uint32_t HAL_I2C_GetError(I2C_HandleTypeDef *hi2c) +{ + return hi2c->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup I2C_Private_Functions + * @{ + */ + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Master Mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Master_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources) +{ + uint16_t devaddress; + uint32_t tmpITFlags = ITFlags; + + /* Process Locked */ + __HAL_LOCK(hi2c); + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_AF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set corresponding Error Code */ + /* No need to generate STOP, it is automatically done */ + /* Error callback will be send during stop flag treatment */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_RXI) != RESET)) + { + /* Remove RXNE flag on temporary variable as read done */ + tmpITFlags &= ~I2C_FLAG_RXNE; + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TXIS) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TXI) != RESET)) + { + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TCR) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + devaddress = (uint16_t)(hi2c->Instance->CR2 & I2C_CR2_SADD); + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, hi2c->XferOptions, I2C_NO_STARTSTOP); + } + else + { + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + } + else + { + /* Call TxCpltCallback() if no stop mode is set */ + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + else + { + /* Wrong size Status regarding TCR flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TC) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if (hi2c->XferCount == 0U) + { + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Generate a stop condition in case of no transfer option */ + if (hi2c->XferOptions == I2C_NO_OPTION_FRAME) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + } + else + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + } + } + else + { + /* Wrong size Status regarding TC flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + else + { + /* Nothing to do */ + } + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_STOPF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Master complete process */ + I2C_ITMasterCplt(hi2c, tmpITFlags); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Slave Mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Slave_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources) +{ + uint32_t tmpoptions = hi2c->XferOptions; + uint32_t tmpITFlags = ITFlags; + + /* Process locked */ + __HAL_LOCK(hi2c); + + /* Check if STOPF is set */ + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_STOPF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Slave complete process */ + I2C_ITSlaveCplt(hi2c, tmpITFlags); + } + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_AF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Check that I2C transfer finished */ + /* if yes, normal use case, a NACK is sent by the MASTER when Transfer is finished */ + /* Mean XferCount == 0*/ + /* So clear Flag NACKF only */ + if (hi2c->XferCount == 0U) + { + if ((hi2c->State == HAL_I2C_STATE_LISTEN) && (tmpoptions == I2C_FIRST_AND_LAST_FRAME)) /* Same action must be done for (tmpoptions == I2C_LAST_FRAME) which removed for Warning[Pa134]: left and right operands are identical */ + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, tmpITFlags); + } + else if ((hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) && (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else + { + /* if no, error use case, a Non-Acknowledge of last Data is generated by the MASTER*/ + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + if ((tmpoptions == I2C_FIRST_FRAME) || (tmpoptions == I2C_NEXT_FRAME)) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_RXI) != RESET)) + { + if (hi2c->XferCount > 0U) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + + if ((hi2c->XferCount == 0U) && \ + (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_ADDR) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_ADDRI) != RESET)) + { + I2C_ITAddrCplt(hi2c, tmpITFlags); + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TXIS) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TXI) != RESET)) + { + /* Write data to TXDR only if XferCount not reach "0" */ + /* A TXIS flag can be set, during STOP treatment */ + /* Check if all Datas have already been sent */ + /* If it is the case, this last write in TXDR is not sent, correspond to a dummy TXIS event */ + if (hi2c->XferCount > 0U) + { + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + } + else + { + if ((tmpoptions == I2C_NEXT_FRAME) || (tmpoptions == I2C_FIRST_FRAME)) + { + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + } + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Master Mode with DMA. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Master_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources) +{ + uint16_t devaddress; + uint32_t xfermode; + + /* Process Locked */ + __HAL_LOCK(hi2c); + + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_AF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set corresponding Error Code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + /* No need to generate STOP, it is automatically done */ + /* But enable STOP interrupt, to treat it */ + /* Error callback will be send during stop flag treatment */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_TCR) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + /* Disable TC interrupt */ + __HAL_I2C_DISABLE_IT(hi2c, I2C_IT_TCI); + + if (hi2c->XferCount != 0U) + { + /* Recover Slave address */ + devaddress = (uint16_t)(hi2c->Instance->CR2 & I2C_CR2_SADD); + + /* Prepare the new XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + xfermode = hi2c->XferOptions; + } + else + { + xfermode = I2C_AUTOEND_MODE; + } + } + + /* Set the new XferSize in Nbytes register */ + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Enable DMA Request */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + } + else + { + /* Call TxCpltCallback() if no stop mode is set */ + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + else + { + /* Wrong size Status regarding TCR flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_TC) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if (hi2c->XferCount == 0U) + { + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Generate a stop condition in case of no transfer option */ + if (hi2c->XferOptions == I2C_NO_OPTION_FRAME) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + } + else + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + } + } + else + { + /* Wrong size Status regarding TC flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_STOPF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Master complete process */ + I2C_ITMasterCplt(hi2c, ITFlags); + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Slave Mode with DMA. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Slave_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources) +{ + uint32_t tmpoptions = hi2c->XferOptions; + uint32_t treatdmanack = 0U; + + /* Process locked */ + __HAL_LOCK(hi2c); + + /* Check if STOPF is set */ + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_STOPF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Slave complete process */ + I2C_ITSlaveCplt(hi2c, ITFlags); + } + + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_AF) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Check that I2C transfer finished */ + /* if yes, normal use case, a NACK is sent by the MASTER when Transfer is finished */ + /* Mean XferCount == 0 */ + /* So clear Flag NACKF only */ + if ((I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_TXDMAEN) != RESET) || + (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_RXDMAEN) != RESET)) + { + /* Split check of hdmarx, for MISRA compliance */ + if (hi2c->hdmarx != NULL) + { + if (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_RXDMAEN) != RESET) + { + if (__HAL_DMA_GET_COUNTER(hi2c->hdmarx) == 0U) + { + treatdmanack = 1U; + } + } + } + + /* Split check of hdmatx, for MISRA compliance */ + if (hi2c->hdmatx != NULL) + { + if (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_TXDMAEN) != RESET) + { + if (__HAL_DMA_GET_COUNTER(hi2c->hdmatx) == 0U) + { + treatdmanack = 1U; + } + } + } + + if (treatdmanack == 1U) + { + if ((hi2c->State == HAL_I2C_STATE_LISTEN) && (tmpoptions == I2C_FIRST_AND_LAST_FRAME)) /* Same action must be done for (tmpoptions == I2C_LAST_FRAME) which removed for Warning[Pa134]: left and right operands are identical */ + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, ITFlags); + } + else if ((hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) && (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else + { + /* if no, error use case, a Non-Acknowledge of last Data is generated by the MASTER*/ + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + if ((tmpoptions == I2C_FIRST_FRAME) || (tmpoptions == I2C_NEXT_FRAME)) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + } + } + else + { + /* Only Clear NACK Flag, no DMA treatment is pending */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_ADDR) != RESET) && (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_ADDRI) != RESET)) + { + I2C_ITAddrCplt(hi2c, ITFlags); + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Master sends target device address followed by internal memory address for write request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_RequestMemoryWrite(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, uint32_t Tickstart) +{ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)MemAddSize, I2C_RELOAD_MODE, I2C_GENERATE_START_WRITE); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* If Memory address size is 8Bit */ + if (MemAddSize == I2C_MEMADD_SIZE_8BIT) + { + /* Send Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + /* If Memory address size is 16Bit */ + else + { + /* Send MSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_MSB(MemAddress); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Send LSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Master sends target device address followed by internal memory address for read request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_RequestMemoryRead(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, uint32_t Tickstart) +{ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)MemAddSize, I2C_SOFTEND_MODE, I2C_GENERATE_START_WRITE); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* If Memory address size is 8Bit */ + if (MemAddSize == I2C_MEMADD_SIZE_8BIT) + { + /* Send Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + /* If Memory address size is 16Bit */ + else + { + /* Send MSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_MSB(MemAddress); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Send LSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + + /* Wait until TC flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TC, RESET, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief I2C Address complete process callback. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITAddrCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint8_t transferdirection; + uint16_t slaveaddrcode; + uint16_t ownadd1code; + uint16_t ownadd2code; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(ITFlags); + + /* In case of Listen state, need to inform upper layer of address match code event */ + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + transferdirection = I2C_GET_DIR(hi2c); + slaveaddrcode = I2C_GET_ADDR_MATCH(hi2c); + ownadd1code = I2C_GET_OWN_ADDRESS1(hi2c); + ownadd2code = I2C_GET_OWN_ADDRESS2(hi2c); + + /* If 10bits addressing mode is selected */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + if ((slaveaddrcode & SlaveAddr_MSK) == ((ownadd1code >> SlaveAddr_SHIFT) & SlaveAddr_MSK)) + { + slaveaddrcode = ownadd1code; + hi2c->AddrEventCount++; + if (hi2c->AddrEventCount == 2U) + { + /* Reset Address Event counter */ + hi2c->AddrEventCount = 0U; + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + else + { + slaveaddrcode = ownadd2code; + + /* Disable ADDR Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* else 7 bits addressing mode is selected */ + else + { + /* Disable ADDR Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* Else clear address flag only */ + else + { + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + } +} + +/** + * @brief I2C Master sequential complete process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ITMasterSeqCplt(I2C_HandleTypeDef *hi2c) +{ + /* Reset I2C handle mode */ + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* No Generate Stop, to permit restart mode */ + /* The stop will be done at the end of transfer, when I2C_AUTOEND_MODE enable */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_TX; + hi2c->XferISR = NULL; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterTxCpltCallback(hi2c); +#else + HAL_I2C_MasterTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + /* hi2c->State == HAL_I2C_STATE_BUSY_RX */ + else + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + hi2c->XferISR = NULL; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterRxCpltCallback(hi2c); +#else + HAL_I2C_MasterRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Slave sequential complete process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ITSlaveSeqCplt(I2C_HandleTypeDef *hi2c) +{ + /* Reset I2C handle mode */ + hi2c->Mode = HAL_I2C_MODE_NONE; + + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Remove HAL_I2C_STATE_SLAVE_BUSY_TX, keep only HAL_I2C_STATE_LISTEN */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_TX; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveTxCpltCallback(hi2c); +#else + HAL_I2C_SlaveTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Remove HAL_I2C_STATE_SLAVE_BUSY_RX, keep only HAL_I2C_STATE_LISTEN */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_RX; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveRxCpltCallback(hi2c); +#else + HAL_I2C_SlaveRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief I2C Master complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITMasterCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint32_t tmperror; + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + /* Reset handle parameters */ + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->XferISR = NULL; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + + if (I2C_CHECK_FLAG(ITFlags, I2C_FLAG_AF) != RESET) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set acknowledge error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT | I2C_XFER_RX_IT); + + /* Store current volatile hi2c->ErrorCode, misra rule */ + tmperror = hi2c->ErrorCode; + + /* Call the corresponding callback to inform upper layer of End of Transfer */ + if ((hi2c->State == HAL_I2C_STATE_ABORT) || (tmperror != HAL_I2C_ERROR_NONE)) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + /* hi2c->State == HAL_I2C_STATE_BUSY_TX */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + hi2c->State = HAL_I2C_STATE_READY; + + if (hi2c->Mode == HAL_I2C_MODE_MEM) + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MemTxCpltCallback(hi2c); +#else + HAL_I2C_MemTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterTxCpltCallback(hi2c); +#else + HAL_I2C_MasterTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* hi2c->State == HAL_I2C_STATE_BUSY_RX */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->State = HAL_I2C_STATE_READY; + + if (hi2c->Mode == HAL_I2C_MODE_MEM) + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MemRxCpltCallback(hi2c); +#else + HAL_I2C_MemRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterRxCpltCallback(hi2c); +#else + HAL_I2C_MasterRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief I2C Slave complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITSlaveCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint32_t tmpcr1value = READ_REG(hi2c->Instance->CR1); + uint32_t tmpITFlags = ITFlags; + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Disable all interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT | I2C_XFER_RX_IT); + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* If a DMA is ongoing, Update handle size context */ + if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_TXDMAEN) != RESET) + { + if (hi2c->hdmatx != NULL) + { + hi2c->XferCount = (uint16_t)__HAL_DMA_GET_COUNTER(hi2c->hdmatx); + } + } + else if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_RXDMAEN) != RESET) + { + if (hi2c->hdmarx != NULL) + { + hi2c->XferCount = (uint16_t)__HAL_DMA_GET_COUNTER(hi2c->hdmarx); + } + } + else + { + /* Do nothing */ + } + + /* Store Last receive data if any */ + if (I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) + { + /* Remove RXNE flag on temporary variable as read done */ + tmpITFlags &= ~I2C_FLAG_RXNE; + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + if ((hi2c->XferSize > 0U)) + { + hi2c->XferSize--; + hi2c->XferCount--; + } + } + + /* All data are not transferred, so set error code accordingly */ + if (hi2c->XferCount != 0U) + { + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + if (hi2c->ErrorCode != HAL_I2C_ERROR_NONE) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ + if (hi2c->State == HAL_I2C_STATE_LISTEN) + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, tmpITFlags); + } + } + else if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + /* Call the Sequential Complete callback, to inform upper layer of the end of Tranfer */ + I2C_ITSlaveSeqCplt(hi2c); + + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ListenCpltCallback(hi2c); +#else + HAL_I2C_ListenCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + /* Call the corresponding callback to inform upper layer of End of Transfer */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveRxCpltCallback(hi2c); +#else + HAL_I2C_SlaveRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveTxCpltCallback(hi2c); +#else + HAL_I2C_SlaveTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Listen complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITListenCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + /* Reset handle parameters */ + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + /* Store Last receive data if any */ + if (I2C_CHECK_FLAG(ITFlags, I2C_FLAG_RXNE) != RESET) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + if ((hi2c->XferSize > 0U)) + { + hi2c->XferSize--; + hi2c->XferCount--; + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + } + + /* Disable all Interrupts*/ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ListenCpltCallback(hi2c); +#else + HAL_I2C_ListenCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +} + +/** + * @brief I2C interrupts error process. + * @param hi2c I2C handle. + * @param ErrorCode Error code to handle. + * @retval None + */ +static void I2C_ITError(I2C_HandleTypeDef *hi2c, uint32_t ErrorCode) +{ + HAL_I2C_StateTypeDef tmpstate = hi2c->State; + + /* Reset handle parameters */ + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferCount = 0U; + + /* Set new error code */ + hi2c->ErrorCode |= ErrorCode; + + /* Disable Interrupts */ + if ((tmpstate == HAL_I2C_STATE_LISTEN) || + (tmpstate == HAL_I2C_STATE_BUSY_TX_LISTEN) || + (tmpstate == HAL_I2C_STATE_BUSY_RX_LISTEN)) + { + /* Disable all interrupts, except interrupts related to LISTEN state */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* keep HAL_I2C_STATE_LISTEN if set */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->XferISR = I2C_Slave_ISR_IT; + } + else + { + /* Disable all interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* If state is an abort treatment on goind, don't change state */ + /* This change will be do later */ + if (hi2c->State != HAL_I2C_STATE_ABORT) + { + /* Set HAL_I2C_STATE_READY */ + hi2c->State = HAL_I2C_STATE_READY; + } + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->XferISR = NULL; + } + + /* Abort DMA TX transfer if any */ + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + /* Abort DMA RX transfer if any */ + else if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly hi2c->hdmarx->XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + else if (hi2c->State == HAL_I2C_STATE_ABORT) + { + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AbortCpltCallback(hi2c); +#else + HAL_I2C_AbortCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ErrorCallback(hi2c); +#else + HAL_I2C_ErrorCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Tx data register flush process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_Flush_TXDR(I2C_HandleTypeDef *hi2c) +{ + /* If a pending TXIS flag is set */ + /* Write a dummy data in TXDR to clear it */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXIS) != RESET) + { + hi2c->Instance->TXDR = 0x00U; + } + + /* Flush TX register if not empty */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXE) == RESET) + { + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_TXE); + } +} + +/** + * @brief DMA I2C master transmit process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAMasterTransmitCplt(DMA_HandleTypeDef *hdma) +{ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* If last transfer, enable STOP interrupt */ + if (hi2c->XferCount == 0U) + { + /* Enable STOP interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + } + /* else prepare a new DMA transfer and enable TCReload interrupt */ + else + { + /* Update Buffer pointer */ + hi2c->pBuffPtr += hi2c->XferSize; + + /* Set the XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + } + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->TXDR, hi2c->XferSize) != HAL_OK) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } + else + { + /* Enable TC interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RELOAD_IT); + } + } +} + +/** + * @brief DMA I2C slave transmit process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMASlaveTransmitCplt(DMA_HandleTypeDef *hdma) +{ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tmpoptions = hi2c->XferOptions; + + if ((tmpoptions == I2C_NEXT_FRAME) || (tmpoptions == I2C_FIRST_FRAME)) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* No specific action, Master fully manage the generation of STOP condition */ + /* Mean that this generation can arrive at any time, at the end or during DMA process */ + /* So STOP condition should be manage through Interrupt treatment */ + } +} + +/** + * @brief DMA I2C master receive process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAMasterReceiveCplt(DMA_HandleTypeDef *hdma) +{ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* If last transfer, enable STOP interrupt */ + if (hi2c->XferCount == 0U) + { + /* Enable STOP interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + } + /* else prepare a new DMA transfer and enable TCReload interrupt */ + else + { + /* Update Buffer pointer */ + hi2c->pBuffPtr += hi2c->XferSize; + + /* Set the XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + } + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize) != HAL_OK) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } + else + { + /* Enable TC interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RELOAD_IT); + } + } +} + +/** + * @brief DMA I2C slave receive process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMASlaveReceiveCplt(DMA_HandleTypeDef *hdma) +{ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tmpoptions = hi2c->XferOptions; + + if ((__HAL_DMA_GET_COUNTER(hi2c->hdmarx) == 0U) && \ + (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* No specific action, Master fully manage the generation of STOP condition */ + /* Mean that this generation can arrive at any time, at the end or during DMA process */ + /* So STOP condition should be manage through Interrupt treatment */ + } +} + +/** + * @brief DMA I2C communication error callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAError(DMA_HandleTypeDef *hdma) +{ + uint32_t treatdmaerror = 0U; + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + if (hi2c->hdmatx != NULL) + { + if (__HAL_DMA_GET_COUNTER(hi2c->hdmatx) == 0U) + { + treatdmaerror = 1U; + } + } + + if (hi2c->hdmarx != NULL) + { + if (__HAL_DMA_GET_COUNTER(hi2c->hdmarx) == 0U) + { + treatdmaerror = 1U; + } + } + + /* Check if a FIFO error is detected, if true normal use case, so no specific action to perform */ + if (!((HAL_DMA_GetError(hdma) == HAL_DMA_ERROR_FE)) && (treatdmaerror != 0U)) + { + /* Disable Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } +} + +/** + * @brief DMA I2C communication abort callback + * (To be called at end of DMA Abort procedure). + * @param hdma DMA handle. + * @retval None + */ +static void I2C_DMAAbort(DMA_HandleTypeDef *hdma) +{ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Reset AbortCpltCallback */ + hi2c->hdmatx->XferAbortCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Check if come from abort from user */ + if (hi2c->State == HAL_I2C_STATE_ABORT) + { + hi2c->State = HAL_I2C_STATE_READY; + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AbortCpltCallback(hi2c); +#else + HAL_I2C_AbortCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ErrorCallback(hi2c); +#else + HAL_I2C_ErrorCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief This function handles I2C Communication Timeout. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Flag Specifies the I2C flag to check. + * @param Status The new Flag status (SET or RESET). + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Flag, FlagStatus Status, uint32_t Timeout, uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, Flag) == Status) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of TXIS flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnTXISFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXIS) == RESET) + { + /* Check if a NACK is detected */ + if (I2C_IsAcknowledgeFailed(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of STOP flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnSTOPFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET) + { + /* Check if a NACK is detected */ + if (I2C_IsAcknowledgeFailed(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of RXNE flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnRXNEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == RESET) + { + /* Check if a NACK is detected */ + if (I2C_IsAcknowledgeFailed(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check if a STOPF is detected */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == SET) + { + /* Check if an RXNE is pending */ + /* Store Last receive data if any */ + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == SET) && (hi2c->XferSize > 0U)) + { + /* Return HAL_OK */ + /* The Reading of data from RXDR will be done in caller function */ + return HAL_OK; + } + else + { + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + + /* Check for the Timeout */ + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + return HAL_OK; +} + +/** + * @brief This function handles Acknowledge failed detection during an I2C Communication. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_IsAcknowledgeFailed(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart) +{ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF) == SET) + { + /* Wait until STOP Flag is reset */ + /* AutoEnd should be initiate after AF */ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + } + + /* Clear NACKF Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + return HAL_OK; +} + +/** + * @brief Handles I2Cx communication when starting transfer or during transfer (TC or TCR flag are set). + * @param hi2c I2C handle. + * @param DevAddress Specifies the slave address to be programmed. + * @param Size Specifies the number of bytes to be programmed. + * This parameter must be a value between 0 and 255. + * @param Mode New state of the I2C START condition generation. + * This parameter can be one of the following values: + * @arg @ref I2C_RELOAD_MODE Enable Reload mode . + * @arg @ref I2C_AUTOEND_MODE Enable Automatic end mode. + * @arg @ref I2C_SOFTEND_MODE Enable Software end mode. + * @param Request New state of the I2C START condition generation. + * This parameter can be one of the following values: + * @arg @ref I2C_NO_STARTSTOP Don't Generate stop and start condition. + * @arg @ref I2C_GENERATE_STOP Generate stop condition (Size should be set to 0). + * @arg @ref I2C_GENERATE_START_READ Generate Restart for read request. + * @arg @ref I2C_GENERATE_START_WRITE Generate Restart for write request. + * @retval None + */ +static void I2C_TransferConfig(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t Size, uint32_t Mode, uint32_t Request) +{ + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_TRANSFER_MODE(Mode)); + assert_param(IS_TRANSFER_REQUEST(Request)); + + /* update CR2 register */ + MODIFY_REG(hi2c->Instance->CR2, ((I2C_CR2_SADD | I2C_CR2_NBYTES | I2C_CR2_RELOAD | I2C_CR2_AUTOEND | (I2C_CR2_RD_WRN & (uint32_t)(Request >> (31U - I2C_CR2_RD_WRN_Pos))) | I2C_CR2_START | I2C_CR2_STOP)), \ + (uint32_t)(((uint32_t)DevAddress & I2C_CR2_SADD) | (((uint32_t)Size << I2C_CR2_NBYTES_Pos) & I2C_CR2_NBYTES) | (uint32_t)Mode | (uint32_t)Request)); +} + +/** + * @brief Manage the enabling of Interrupts. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param InterruptRequest Value of @ref I2C_Interrupt_configuration_definition. + * @retval None + */ +static void I2C_Enable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest) +{ + uint32_t tmpisr = 0U; + + if ((hi2c->XferISR == I2C_Master_ISR_DMA) || \ + (hi2c->XferISR == I2C_Slave_ISR_DMA)) + { + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Enable ERR, STOP, NACK and ADDR interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if ((InterruptRequest & I2C_XFER_ERROR_IT) == I2C_XFER_ERROR_IT) + { + /* Enable ERR and NACK interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_NACKI; + } + + if ((InterruptRequest & I2C_XFER_CPLT_IT) == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= I2C_IT_STOPI; + } + + if ((InterruptRequest & I2C_XFER_RELOAD_IT) == I2C_XFER_RELOAD_IT) + { + /* Enable TC interrupts */ + tmpisr |= I2C_IT_TCI; + } + } + else + { + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Enable ERR, STOP, NACK, and ADDR interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if ((InterruptRequest & I2C_XFER_TX_IT) == I2C_XFER_TX_IT) + { + /* Enable ERR, TC, STOP, NACK and RXI interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_TXI; + } + + if ((InterruptRequest & I2C_XFER_RX_IT) == I2C_XFER_RX_IT) + { + /* Enable ERR, TC, STOP, NACK and TXI interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_RXI; + } + + if ((InterruptRequest & I2C_XFER_CPLT_IT) == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= I2C_IT_STOPI; + } + } + + /* Enable interrupts only at the end */ + /* to avoid the risk of I2C interrupt handle execution before */ + /* all interrupts requested done */ + __HAL_I2C_ENABLE_IT(hi2c, tmpisr); +} + +/** + * @brief Manage the disabling of Interrupts. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param InterruptRequest Value of @ref I2C_Interrupt_configuration_definition. + * @retval None + */ +static void I2C_Disable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest) +{ + uint32_t tmpisr = 0U; + + if ((InterruptRequest & I2C_XFER_TX_IT) == I2C_XFER_TX_IT) + { + /* Disable TC and TXI interrupts */ + tmpisr |= I2C_IT_TCI | I2C_IT_TXI; + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) != (uint32_t)HAL_I2C_STATE_LISTEN) + { + /* Disable NACK and STOP interrupts */ + tmpisr |= I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + } + + if ((InterruptRequest & I2C_XFER_RX_IT) == I2C_XFER_RX_IT) + { + /* Disable TC and RXI interrupts */ + tmpisr |= I2C_IT_TCI | I2C_IT_RXI; + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) != (uint32_t)HAL_I2C_STATE_LISTEN) + { + /* Disable NACK and STOP interrupts */ + tmpisr |= I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + } + + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Disable ADDR, NACK and STOP interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if ((InterruptRequest & I2C_XFER_ERROR_IT) == I2C_XFER_ERROR_IT) + { + /* Enable ERR and NACK interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_NACKI; + } + + if ((InterruptRequest & I2C_XFER_CPLT_IT) == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= I2C_IT_STOPI; + } + + if ((InterruptRequest & I2C_XFER_RELOAD_IT) == I2C_XFER_RELOAD_IT) + { + /* Enable TC interrupts */ + tmpisr |= I2C_IT_TCI; + } + + /* Disable interrupts only at the end */ + /* to avoid a breaking situation like at "t" time */ + /* all disable interrupts request are not done */ + __HAL_I2C_DISABLE_IT(hi2c, tmpisr); +} + +/** + * @brief Convert I2Cx OTHER_xxx XferOptions to functionnal XferOptions. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c) +{ + /* if user set XferOptions to I2C_OTHER_FRAME */ + /* it request implicitly to generate a restart condition */ + /* set XferOptions to I2C_FIRST_FRAME */ + if (hi2c->XferOptions == I2C_OTHER_FRAME) + { + hi2c->XferOptions = I2C_FIRST_FRAME; + } + /* else if user set XferOptions to I2C_OTHER_AND_LAST_FRAME */ + /* it request implicitly to generate a restart condition */ + /* then generate a stop condition at the end of transfer */ + /* set XferOptions to I2C_FIRST_AND_LAST_FRAME */ + else if (hi2c->XferOptions == I2C_OTHER_AND_LAST_FRAME) + { + hi2c->XferOptions = I2C_FIRST_AND_LAST_FRAME; + } + else + { + /* Nothing to do */ + } +} + +/** + * @} + */ + +#endif /* HAL_I2C_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c new file mode 100644 index 000000000..1a14aa660 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c @@ -0,0 +1,258 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c_ex.c + * @author MCD Application Team + * @brief I2C Extended HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of I2C Extended peripheral: + * + Extended features functions + * + @verbatim + ============================================================================== + ##### I2C peripheral Extended features ##### + ============================================================================== + + [..] Comparing to other previous devices, the I2C interface for STM32F7xx + devices contains the following additional features + + (+) Possibility to disable or enable Analog Noise Filter + (+) Use of a configured Digital Noise Filter + (+) Disable or enable Fast Mode Plus + + ##### How to use this driver ##### + ============================================================================== + [..] This driver provides functions to: + (#) Configure I2C Analog noise filter using the function HAL_I2CEx_ConfigAnalogFilter() + (#) Configure I2C Digital noise filter using the function HAL_I2CEx_ConfigDigitalFilter() + (#) Configure the enable or disable of fast mode plus driving capability using the functions : + (++) HAL_I2CEx_EnableFastModePlus() + (++) HAL_I2CEx_DisableFastModePlus() + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup I2CEx I2CEx + * @brief I2C Extended HAL module driver + * @{ + */ + +#ifdef HAL_I2C_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup I2CEx_Exported_Functions I2C Extended Exported Functions + * @{ + */ + +/** @defgroup I2CEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure Noise Filters + (+) Configure Fast Mode Plus + +@endverbatim + * @{ + */ + +/** + * @brief Configure I2C Analog noise filter. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2Cx peripheral. + * @param AnalogFilter New state of the Analog filter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2CEx_ConfigAnalogFilter(I2C_HandleTypeDef *hi2c, uint32_t AnalogFilter) +{ + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_ANALOG_FILTER(AnalogFilter)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /* Reset I2Cx ANOFF bit */ + hi2c->Instance->CR1 &= ~(I2C_CR1_ANFOFF); + + /* Set analog filter bit*/ + hi2c->Instance->CR1 |= AnalogFilter; + + __HAL_I2C_ENABLE(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Configure I2C Digital noise filter. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2Cx peripheral. + * @param DigitalFilter Coefficient of digital noise filter between Min_Data=0x00 and Max_Data=0x0F. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_t DigitalFilter) +{ + uint32_t tmpreg; + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_DIGITAL_FILTER(DigitalFilter)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /* Get the old register value */ + tmpreg = hi2c->Instance->CR1; + + /* Reset I2Cx DNF bits [11:8] */ + tmpreg &= ~(I2C_CR1_DNF); + + /* Set I2Cx DNF coefficient */ + tmpreg |= DigitalFilter << 8U; + + /* Store the new register value */ + hi2c->Instance->CR1 = tmpreg; + + __HAL_I2C_ENABLE(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +#if (defined(SYSCFG_PMC_I2C_PB6_FMP) || defined(SYSCFG_PMC_I2C_PB7_FMP)) || (defined(SYSCFG_PMC_I2C_PB8_FMP) || defined(SYSCFG_PMC_I2C_PB9_FMP)) || (defined(SYSCFG_PMC_I2C1_FMP)) || (defined(SYSCFG_PMC_I2C2_FMP)) || defined(SYSCFG_PMC_I2C3_FMP) || defined(SYSCFG_PMC_I2C4_FMP) +/** + * @brief Enable the I2C fast mode plus driving capability. + * @param ConfigFastModePlus Selects the pin. + * This parameter can be one of the @ref I2CEx_FastModePlus values + * @note For I2C1, fast mode plus driving capability can be enabled on all selected + * I2C1 pins using I2C_FASTMODEPLUS_I2C1 parameter or independently + * on each one of the following pins PB6, PB7, PB8 and PB9. + * @note For remaining I2C1 pins (PA14, PA15...) fast mode plus driving capability + * can be enabled only by using I2C_FASTMODEPLUS_I2C1 parameter. + * @note For all I2C2 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C2 parameter. + * @note For all I2C3 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C3 parameter. + * @note For all I2C4 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C4 parameter. + * @retval None + */ +void HAL_I2CEx_EnableFastModePlus(uint32_t ConfigFastModePlus) +{ + /* Check the parameter */ + assert_param(IS_I2C_FASTMODEPLUS(ConfigFastModePlus)); + + /* Enable SYSCFG clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Enable fast mode plus driving capability for selected pin */ + SET_BIT(SYSCFG->PMC, (uint32_t)ConfigFastModePlus); +} + +/** + * @brief Disable the I2C fast mode plus driving capability. + * @param ConfigFastModePlus Selects the pin. + * This parameter can be one of the @ref I2CEx_FastModePlus values + * @note For I2C1, fast mode plus driving capability can be disabled on all selected + * I2C1 pins using I2C_FASTMODEPLUS_I2C1 parameter or independently + * on each one of the following pins PB6, PB7, PB8 and PB9. + * @note For remaining I2C1 pins (PA14, PA15...) fast mode plus driving capability + * can be disabled only by using I2C_FASTMODEPLUS_I2C1 parameter. + * @note For all I2C2 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C2 parameter. + * @note For all I2C3 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C3 parameter. + * @note For all I2C4 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C4 parameter. + * @retval None + */ +void HAL_I2CEx_DisableFastModePlus(uint32_t ConfigFastModePlus) +{ + /* Check the parameter */ + assert_param(IS_I2C_FASTMODEPLUS(ConfigFastModePlus)); + + /* Enable SYSCFG clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Disable fast mode plus driving capability for selected pin */ + CLEAR_BIT(SYSCFG->PMC, (uint32_t)ConfigFastModePlus); +} + +#endif +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_I2C_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c new file mode 100644 index 000000000..70f72687e --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_iwdg.c @@ -0,0 +1,264 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_iwdg.c + * @author MCD Application Team + * @brief IWDG HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Independent Watchdog (IWDG) peripheral: + * + Initialization and Start functions + * + IO operation functions + * + @verbatim + ============================================================================== + ##### IWDG Generic features ##### + ============================================================================== + [..] + (+) The IWDG can be started by either software or hardware (configurable + through option byte). + + (+) The IWDG is clocked by Low-Speed clock (LSI) and thus stays active even + if the main clock fails. + + (+) Once the IWDG is started, the LSI is forced ON and both can not be + disabled. The counter starts counting down from the reset value (0xFFF). + When it reaches the end of count value (0x000) a reset signal is + generated (IWDG reset). + + (+) Whenever the key value 0x0000 AAAA is written in the IWDG_KR register, + the IWDG_RLR value is reloaded in the counter and the watchdog reset is + prevented. + + (+) The IWDG is implemented in the VDD voltage domain that is still functional + in STOP and STANDBY mode (IWDG reset can wake-up from STANDBY). + IWDGRST flag in RCC_CSR register can be used to inform when an IWDG + reset occurs. + + (+) Debug mode : When the microcontroller enters debug mode (core halted), + the IWDG counter either continues to work normally or stops, depending + on DBG_IWDG_STOP configuration bit in DBG module, accessible through + __HAL_DBGMCU_FREEZE_IWDG() and __HAL_DBGMCU_UNFREEZE_IWDG() macros. + + [..] Min-max timeout value @32KHz (LSI): ~125us / ~32.7s + The IWDG timeout may vary due to LSI frequency dispersion. STM32F7xx + devices provide the capability to measure the LSI frequency (LSI clock + connected internally to TIM16 CH1 input capture). The measured value + can be used to have an IWDG timeout with an acceptable accuracy. + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Use IWDG using HAL_IWDG_Init() function to : + (+) Enable instance by writing Start keyword in IWDG_KEY register. LSI + clock is forced ON and IWDG counter starts counting down. + (+) Enable write access to configuration registers: + IWDG_PR, IWDG_RLR and IWDG_WINR. + (+) Configure the IWDG prescaler and counter reload value. This reload + value will be loaded in the IWDG counter each time the watchdog is + reloaded, then the IWDG will start counting down from this value. + (+) Wait for status flags to be reset. + (+) Depending on window parameter: + (++) If Window Init parameter is same as Window register value, + nothing more is done but reload counter value in order to exit + function with exact time base. + (++) Else modify Window register. This will automatically reload + watchdog counter. + + (#) Then the application program must refresh the IWDG counter at regular + intervals during normal operation to prevent an MCU reset, using + HAL_IWDG_Refresh() function. + + *** IWDG HAL driver macros list *** + ==================================== + [..] + Below the list of most used macros in IWDG HAL driver: + (+) __HAL_IWDG_START: Enable the IWDG peripheral + (+) __HAL_IWDG_RELOAD_COUNTER: Reloads IWDG counter with value defined in + the reload register + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +#ifdef HAL_IWDG_MODULE_ENABLED +/** @addtogroup IWDG + * @brief IWDG HAL module driver. + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup IWDG_Private_Defines IWDG Private Defines + * @{ + */ +/* Status register need 5 RC LSI divided by prescaler clock to be updated. With + higher prescaler (256), and according to LSI variation, we need to wait at + least 6 cycles so 48 ms. */ +#define HAL_IWDG_DEFAULT_TIMEOUT 48u +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup IWDG_Exported_Functions + * @{ + */ + +/** @addtogroup IWDG_Exported_Functions_Group1 + * @brief Initialization and Start functions. + * +@verbatim + =============================================================================== + ##### Initialization and Start functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize the IWDG according to the specified parameters in the + IWDG_InitTypeDef of associated handle. + (+) Manage Window option. + (+) Once initialization is performed in HAL_IWDG_Init function, Watchdog + is reloaded in order to exit function with correct time base. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the IWDG according to the specified parameters in the + * IWDG_InitTypeDef and start watchdog. Before exiting function, + * watchdog is refreshed in order to have correct time base. + * @param hiwdg pointer to a IWDG_HandleTypeDef structure that contains + * the configuration information for the specified IWDG module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_IWDG_Init(IWDG_HandleTypeDef *hiwdg) +{ + uint32_t tickstart; + + /* Check the IWDG handle allocation */ + if (hiwdg == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_IWDG_ALL_INSTANCE(hiwdg->Instance)); + assert_param(IS_IWDG_PRESCALER(hiwdg->Init.Prescaler)); + assert_param(IS_IWDG_RELOAD(hiwdg->Init.Reload)); + assert_param(IS_IWDG_WINDOW(hiwdg->Init.Window)); + + /* Enable IWDG. LSI is turned on automatically */ + __HAL_IWDG_START(hiwdg); + + /* Enable write access to IWDG_PR, IWDG_RLR and IWDG_WINR registers by writing + 0x5555 in KR */ + IWDG_ENABLE_WRITE_ACCESS(hiwdg); + + /* Write to IWDG registers the Prescaler & Reload values to work with */ + hiwdg->Instance->PR = hiwdg->Init.Prescaler; + hiwdg->Instance->RLR = hiwdg->Init.Reload; + + /* Check pending flag, if previous update not done, return timeout */ + tickstart = HAL_GetTick(); + + /* Wait for register to be updated */ + while (hiwdg->Instance->SR != 0x00u) + { + if ((HAL_GetTick() - tickstart) > HAL_IWDG_DEFAULT_TIMEOUT) + { + return HAL_TIMEOUT; + } + } + + /* If window parameter is different than current value, modify window + register */ + if (hiwdg->Instance->WINR != hiwdg->Init.Window) + { + /* Write to IWDG WINR the IWDG_Window value to compare with. In any case, + even if window feature is disabled, Watchdog will be reloaded by writing + windows register */ + hiwdg->Instance->WINR = hiwdg->Init.Window; + } + else + { + /* Reload IWDG counter with value defined in the reload register */ + __HAL_IWDG_RELOAD_COUNTER(hiwdg); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + + +/** @addtogroup IWDG_Exported_Functions_Group2 + * @brief IO operation functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Refresh the IWDG. + +@endverbatim + * @{ + */ + + +/** + * @brief Refresh the IWDG. + * @param hiwdg pointer to a IWDG_HandleTypeDef structure that contains + * the configuration information for the specified IWDG module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_IWDG_Refresh(IWDG_HandleTypeDef *hiwdg) +{ + /* Reload IWDG counter with value defined in the reload register */ + __HAL_IWDG_RELOAD_COUNTER(hiwdg); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_IWDG_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c new file mode 100644 index 000000000..282d8f70d --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_jpeg.c @@ -0,0 +1,4140 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_jpeg.c + * @author MCD Application Team + * @brief JPEG HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the JPEG encoder/decoder peripheral: + * + Initialization and de-initialization functions + * + JPEG processing functions encoding and decoding + * + JPEG decoding Getting Info and encoding configuration setting + * + JPEG enable/disable header parsing functions (for decoding) + * + JPEG Input/Output Buffer configuration. + * + JPEG callback functions + * + JPEG Abort/Pause/Resume functions + * + JPEG custom quantization tables setting functions + * + IRQ handler management + * + Peripheral State and Error functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the JPEG peripheral using HAL_JPEG_Init : No initialization parameters are required. + Only the call to HAL_JPEG_Init is necessary to initialize the JPEG peripheral. + + (#) If operation is JPEG encoding use function HAL_JPEG_ConfigEncoding to set + the encoding parameters (mandatory before calling the encoding function). + the application can change the encoding parameter ImageQuality from + 1 to 100 to obtain a more or less quality (visual quality vs the original row image), + and inversely more or less jpg file size. + + (#) Note that for decoding operation the JPEG peripheral output data are organized in + YCbCr blocks called MCU (Minimum Coded Unit) as defioned in the JPEG specification + ISO/IEC 10918-1 standard. + It is up to the application to transform these YCbCr blocks to RGB data that can be display. + + Respectively, for Encoding operation the JPEG peripheral input should be organized + in YCbCr MCU blocks. It is up to the application to perform the necessary RGB to YCbCr + MCU blocks transformation before feeding the JPEG peripheral with data. + + (#) Use functions HAL_JPEG_Encode and HAL_JPEG_Decode to start respectively + a JPEG encoding/decoding operation in polling method (blocking). + + (#) Use functions HAL_JPEG_Encode_IT and HAL_JPEG_Decode_IT to start respectively + a JPEG encoding/decoding operation with Interrupt method (not blocking). + + (#) Use functions HAL_JPEG_Encode_DMA and HAL_JPEG_Decode_DMA to start respectively + a JPEG encoding/decoding operation with DMA method (not blocking). + + (#) Callback HAL_JPEG_InfoReadyCallback is asserted if the current operation + is a JPEG decoding to provide the application with JPEG image parameters. + This callback is asserted when the JPEG peripheral successfully parse the + JPEG header. + + (#) Callback HAL_JPEG_GetDataCallback is asserted for both encoding and decoding + operations to inform the application that the input buffer has been + consumed by the peripheral and to ask for a new data chunk if the operation + (encoding/decoding) has not been complete yet. + + (++) This CallBack should be implemented in the application side. It should + call the function HAL_JPEG_ConfigInputBuffer if new input data are available, + or call HAL_JPEG_Pause with parameter XferSelection set to JPEG_PAUSE_RESUME_INPUT + to inform the JPEG HAL driver that the ongoing operation shall pause waiting for the + application to provide a new input data chunk. + Once the application succeed getting new data and if the input has been paused, + the application can call the function HAL_JPEG_ConfigInputBuffer to set the new + input buffer and size, then resume the JPEG HAL input by calling new function HAL_JPEG_Resume. + If the application has ended feeding the HAL JPEG with input data (no more input data), the application + Should call the function HAL_JPEG_ConfigInputBuffer (within the callback HAL_JPEG_GetDataCallback) + with the parameter InDataLength set to zero. + + (++) The mechanism of HAL_JPEG_ConfigInputBuffer/HAL_JPEG_Pause/HAL_JPEG_Resume allows + to the application to provide the input data (for encoding or decoding) by chunks. + If the new input data chunk is not available (because data should be read from an input file + for example) the application can pause the JPEG input (using function HAL_JPEG_Pause) + Once the new input data chunk is available ( read from a file for example), the application + can call the function HAL_JPEG_ConfigInputBuffer to provide the HAL with the new chunk + then resume the JPEG HAL input by calling function HAL_JPEG_Resume. + + (++) The application can call functions HAL_JPEG_ConfigInputBuffer then HAL_JPEG_Resume. + any time (outside the HAL_JPEG_GetDataCallback) Once the new input chunk data available. + However, to keep data coherency, the function HAL_JPEG_Pause must be imperatively called + (if necessary) within the callback HAL_JPEG_GetDataCallback, i.e when the HAL JPEG has ended + Transferring the previous chunk buffer to the JPEG peripheral. + + (#) Callback HAL_JPEG_DataReadyCallback is asserted when the HAL JPEG driver + has filled the given output buffer with the given size. + + (++) This CallBack should be implemented in the application side. It should + call the function HAL_JPEG_ConfigOutputBuffer to provide the HAL JPEG driver + with the new output buffer location and size to be used to store next data chunk. + if the application is not ready to provide the output chunk location then it can + call the function HAL_JPEG_Pause with parameter XferSelection set to JPEG_PAUSE_RESUME_OUTPUT + to inform the JPEG HAL driver that it shall pause output data. Once the application + is ready to receive the new data chunk (output buffer location free or available) it should call + the function HAL_JPEG_ConfigOutputBuffer to provide the HAL JPEG driver + with the new output chunk buffer location and size, then call HAL_JPEG_Resume + to inform the HAL that it shall resume outputting data in the given output buffer. + + (++) The mechanism of HAL_JPEG_ConfigOutputBuffer/HAL_JPEG_Pause/HAL_JPEG_Resume allows + the application to receive data from the JPEG peripheral by chunks. when a chunk + is received, the application can pause the HAL JPEG output data to be able to process + these received data (YCbCr to RGB conversion in case of decoding or data storage in case + of encoding). + + (++) The application can call functions HAL_JPEG_ ConfigOutputBuffer then HAL_JPEG_Resume. + any time (outside the HAL_JPEG_DataReadyCallback) Once the output data buffer is free to use. + However, to keep data coherency, the function HAL_JPEG_Pause must be imperatively called + (if necessary) within the callback HAL_JPEG_ DataReadyCallback, i.e when the HAL JPEG has ended + Transferring the previous chunk buffer from the JPEG peripheral to the application. + + (#) Callback HAL_JPEG_EncodeCpltCallback is asserted when the HAL JPEG driver has + ended the current JPEG encoding operation, and all output data has been transmitted + to the application. + + (#) Callback HAL_JPEG_DecodeCpltCallback is asserted when the HAL JPEG driver has + ended the current JPEG decoding operation. and all output data has been transmitted + to the application. + + (#) Callback HAL_JPEG_ErrorCallback is asserted when an error occurred during + the current operation. the application can call the function HAL_JPEG_GetError() + to retrieve the error codes. + + (#) By default the HAL JPEG driver uses the default quantization tables + as provide in the JPEG specification (ISO/IEC 10918-1 standard) for encoding. + User can change these default tables if necessary using the function HAL_JPEG_SetUserQuantTables + Note that for decoding the quantization tables are automatically extracted from + the JPEG header. + + (#) To control JPEG state you can use the following function: HAL_JPEG_GetState() + + *** JPEG HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in JPEG HAL driver. + + (+) __HAL_JPEG_RESET_HANDLE_STATE : Reset JPEG handle state. + (+) __HAL_JPEG_ENABLE : Enable the JPEG peripheral. + (+) __HAL_JPEG_DISABLE : Disable the JPEG peripheral. + (+) __HAL_JPEG_GET_FLAG : Check the specified JPEG status flag. + (+) __HAL_JPEG_CLEAR_FLAG : Clear the specified JPEG status flag. + (+) __HAL_JPEG_ENABLE_IT : Enable the specified JPEG Interrupt. + (+) __HAL_JPEG_DISABLE_IT : Disable the specified JPEG Interrupt. + (+) __HAL_JPEG_GET_IT_SOURCE : returns the state of the specified JPEG Interrupt (Enabled or disabled). + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_JPEG_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Functions HAL_JPEG_RegisterCallback() or HAL_JPEG_RegisterXXXCallback() + to register an interrupt callback. + + Function HAL_JPEG_RegisterCallback() allows to register following callbacks: + (+) EncodeCpltCallback : callback for end of encoding operation. + (+) DecodeCpltCallback : callback for end of decoding operation. + (+) ErrorCallback : callback for error detection. + (+) MspInitCallback : JPEG MspInit. + (+) MspDeInitCallback : JPEG MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + For specific callbacks InfoReadyCallback, GetDataCallback and DataReadyCallback use dedicated + register callbacks : respectively HAL_JPEG_RegisterInfoReadyCallback(), + HAL_JPEG_RegisterGetDataCallback() and HAL_JPEG_RegisterDataReadyCallback(). + + Use function HAL_JPEG_UnRegisterCallback() to reset a callback to the default + weak function. + HAL_JPEG_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) EncodeCpltCallback : callback for end of encoding operation. + (+) DecodeCpltCallback : callback for end of decoding operation. + (+) ErrorCallback : callback for error detection. + (+) MspInitCallback : JPEG MspInit. + (+) MspDeInitCallback : JPEG MspDeInit. + + For callbacks InfoReadyCallback, GetDataCallback and DataReadyCallback use dedicated + unregister callbacks : respectively HAL_JPEG_UnRegisterInfoReadyCallback(), + HAL_JPEG_UnRegisterGetDataCallback() and HAL_JPEG_UnRegisterDataReadyCallback(). + + By default, after the HAL_JPEG_Init() and when the state is HAL_JPEG_STATE_RESET + all callbacks are set to the corresponding weak functions : + examples HAL_JPEG_DecodeCpltCallback() , HAL_JPEG_GetDataCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_JPEG_Init()/ HAL_JPEG_DeInit() only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the HAL_JPEG_Init() / HAL_JPEG_DeInit() + keep and use the user MspInit/MspDeInit functions (registered beforehand) + + Callbacks can be registered/unregistered in HAL_JPEG_STATE_READY state only. + Exception done MspInit/MspDeInit callbacks that can be registered/unregistered + in HAL_JPEG_STATE_READY or HAL_JPEG_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_JPEG_RegisterCallback() before calling HAL_JPEG_DeInit() + or HAL_JPEG_Init() function. + + When The compilation define USE_HAL_JPEG_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup JPEG JPEG + * @brief JPEG HAL module driver. + * @{ + */ + +#ifdef HAL_JPEG_MODULE_ENABLED + +#if defined (JPEG) + +/* Private define ------------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Constants + * @{ + */ +#define JPEG_TIMEOUT_VALUE ((uint32_t)1000) /* 1s */ +#define JPEG_AC_HUFF_TABLE_SIZE ((uint32_t)162) /* Huffman AC table size : 162 codes*/ +#define JPEG_DC_HUFF_TABLE_SIZE ((uint32_t)12) /* Huffman AC table size : 12 codes*/ + +#define JPEG_FIFO_SIZE ((uint32_t)16U) /* JPEG Input/Output HW FIFO size in words*/ + +#define JPEG_FIFO_TH_SIZE ((uint32_t)4U) /* JPEG Input/Output HW FIFO Threshold in words*/ + +#define JPEG_DMA_MASK ((uint32_t)0x00001800) /* JPEG DMA request Mask*/ +#define JPEG_DMA_IDMA ((uint32_t)JPEG_CR_IDMAEN) /* DMA request for the input FIFO */ +#define JPEG_DMA_ODMA ((uint32_t)JPEG_CR_ODMAEN) /* DMA request for the output FIFO */ + +#define JPEG_INTERRUPT_MASK ((uint32_t)0x0000007EU) /* JPEG Interrupt Mask*/ + +#define JPEG_CONTEXT_ENCODE ((uint32_t)0x00000001) /* JPEG context : operation is encoding*/ +#define JPEG_CONTEXT_DECODE ((uint32_t)0x00000002) /* JPEG context : operation is decoding*/ +#define JPEG_CONTEXT_OPERATION_MASK ((uint32_t)0x00000003) /* JPEG context : operation Mask */ + +#define JPEG_CONTEXT_POLLING ((uint32_t)0x00000004) /* JPEG context : Transfer use Polling */ +#define JPEG_CONTEXT_IT ((uint32_t)0x00000008) /* JPEG context : Transfer use Interrupt */ +#define JPEG_CONTEXT_DMA ((uint32_t)0x0000000C) /* JPEG context : Transfer use DMA */ +#define JPEG_CONTEXT_METHOD_MASK ((uint32_t)0x0000000C) /* JPEG context : Transfer Mask */ + + +#define JPEG_CONTEXT_CONF_ENCODING ((uint32_t)0x00000100) /* JPEG context : encoding config done */ + +#define JPEG_CONTEXT_PAUSE_INPUT ((uint32_t)0x00001000) /* JPEG context : Pause Input */ +#define JPEG_CONTEXT_PAUSE_OUTPUT ((uint32_t)0x00002000) /* JPEG context : Pause Output */ + +#define JPEG_CONTEXT_CUSTOM_TABLES ((uint32_t)0x00004000) /* JPEG context : Use custom quantization tables */ + +#define JPEG_CONTEXT_ENDING_DMA ((uint32_t)0x00008000) /* JPEG context : ending with DMA in progress */ + +#define JPEG_PROCESS_ONGOING ((uint32_t)0x00000000) /* Process is on going */ +#define JPEG_PROCESS_DONE ((uint32_t)0x00000001) /* Process is done (ends) */ +/** + * @} + */ + +/* Private typedef -----------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Types + * @{ + */ + +/* + JPEG Huffman Table Structure definition : + This implementation of Huffman table structure is compliant with ISO/IEC 10918-1 standard , Annex C Huffman Table specification + */ +typedef struct +{ + /* These two fields directly represent the contents of a JPEG DHT marker */ + uint8_t Bits[16]; /*!< bits[k] = # of symbols with codes of length k bits, this parameter corresponds to BITS list in the Annex C */ + + uint8_t HuffVal[162]; /*!< The symbols, in order of incremented code length, this parameter corresponds to HUFFVAL list in the Annex C */ + + +} JPEG_ACHuffTableTypeDef; + +typedef struct +{ + /* These two fields directly represent the contents of a JPEG DHT marker */ + uint8_t Bits[16]; /*!< bits[k] = # of symbols with codes of length k bits, this parameter corresponds to BITS list in the Annex C */ + + uint8_t HuffVal[12]; /*!< The symbols, in order of incremented code length, this parameter corresponds to HUFFVAL list in the Annex C */ + + +} JPEG_DCHuffTableTypeDef; + +typedef struct +{ + uint8_t CodeLength[JPEG_AC_HUFF_TABLE_SIZE]; /*!< Code length */ + + uint32_t HuffmanCode[JPEG_AC_HUFF_TABLE_SIZE]; /*!< HuffmanCode */ + +} JPEG_AC_HuffCodeTableTypeDef; + +typedef struct +{ + uint8_t CodeLength[JPEG_DC_HUFF_TABLE_SIZE]; /*!< Code length */ + + uint32_t HuffmanCode[JPEG_DC_HUFF_TABLE_SIZE]; /*!< HuffmanCode */ + +} JPEG_DC_HuffCodeTableTypeDef; +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Macros + * @{ + */ +#define JPEG_ENABLE_DMA(__HANDLE__,__DMA__) ((__HANDLE__)->Instance->CR |= ((__DMA__) & JPEG_DMA_MASK)) +/*note : To disable a DMA request we must use MODIFY_REG macro to avoid writing "1" to the FIFO flush bits + located in the same DMA request enable register (CR register). */ +#define JPEG_DISABLE_DMA(__HANDLE__,__DMA__) MODIFY_REG((__HANDLE__)->Instance->CR, ((__DMA__) & JPEG_DMA_MASK), 0UL) +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Variables + * @{ + */ + +static const JPEG_DCHuffTableTypeDef JPEG_DCLUM_HuffTable = +{ + { 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }, /*Bits*/ + + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb } /*HUFFVAL */ + +}; + +static const JPEG_DCHuffTableTypeDef JPEG_DCCHROM_HuffTable = +{ + { 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, /*Bits*/ + + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb } /*HUFFVAL */ +}; + +static const JPEG_ACHuffTableTypeDef JPEG_ACLUM_HuffTable = +{ + { 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }, /*Bits*/ + + { + 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, /*HUFFVAL */ + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, + 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, + 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, + 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, + 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, + 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, + 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, + 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, + 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, + 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa + } +}; + +static const JPEG_ACHuffTableTypeDef JPEG_ACCHROM_HuffTable = +{ + { 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }, /*Bits*/ + + { + 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, /*HUFFVAL */ + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, + 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, + 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, + 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, + 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, + 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, + 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, + 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, + 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa + } +}; + +static const uint8_t JPEG_ZIGZAG_ORDER[JPEG_QUANT_TABLE_SIZE] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63 +}; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup JPEG_Private_Functions_Prototypes + * @{ + */ + +static HAL_StatusTypeDef JPEG_Bits_To_SizeCodes(uint8_t *Bits, uint8_t *Huffsize, uint32_t *Huffcode, uint32_t *LastK); +static HAL_StatusTypeDef JPEG_DCHuff_BitsVals_To_SizeCodes(JPEG_DCHuffTableTypeDef *DC_BitsValsTable, + JPEG_DC_HuffCodeTableTypeDef *DC_SizeCodesTable); +static HAL_StatusTypeDef JPEG_ACHuff_BitsVals_To_SizeCodes(JPEG_ACHuffTableTypeDef *AC_BitsValsTable, + JPEG_AC_HuffCodeTableTypeDef *AC_SizeCodesTable); +static HAL_StatusTypeDef JPEG_Set_HuffDC_Mem(JPEG_HandleTypeDef *hjpeg, JPEG_DCHuffTableTypeDef *HuffTableDC, + const __IO uint32_t *DCTableAddress); +static HAL_StatusTypeDef JPEG_Set_HuffAC_Mem(JPEG_HandleTypeDef *hjpeg, JPEG_ACHuffTableTypeDef *HuffTableAC, + const __IO uint32_t *ACTableAddress); +static HAL_StatusTypeDef JPEG_Set_HuffEnc_Mem(JPEG_HandleTypeDef *hjpeg); +static void JPEG_Set_Huff_DHTMem(JPEG_HandleTypeDef *hjpeg); +static uint32_t JPEG_Set_Quantization_Mem(JPEG_HandleTypeDef *hjpeg, uint8_t *QTable, + __IO uint32_t *QTableAddress); +static void JPEG_SetColorYCBCR(JPEG_HandleTypeDef *hjpeg); +static void JPEG_SetColorGrayScale(JPEG_HandleTypeDef *hjpeg); +static void JPEG_SetColorCMYK(JPEG_HandleTypeDef *hjpeg); + +static void JPEG_Init_Process(JPEG_HandleTypeDef *hjpeg); +static uint32_t JPEG_Process(JPEG_HandleTypeDef *hjpeg); +static void JPEG_ReadInputData(JPEG_HandleTypeDef *hjpeg, uint32_t nbRequestWords); +static void JPEG_StoreOutputData(JPEG_HandleTypeDef *hjpeg, uint32_t nbOutputWords); +static uint32_t JPEG_GetQuality(JPEG_HandleTypeDef *hjpeg); + +static HAL_StatusTypeDef JPEG_DMA_StartProcess(JPEG_HandleTypeDef *hjpeg); +static void JPEG_DMA_ContinueProcess(JPEG_HandleTypeDef *hjpeg); +static void JPEG_DMA_EndProcess(JPEG_HandleTypeDef *hjpeg); +static void JPEG_DMA_PollResidualData(JPEG_HandleTypeDef *hjpeg); +static void JPEG_DMAOutCpltCallback(DMA_HandleTypeDef *hdma); +static void JPEG_DMAInCpltCallback(DMA_HandleTypeDef *hdma); +static void JPEG_DMAErrorCallback(DMA_HandleTypeDef *hdma); +static void JPEG_DMAOutAbortCallback(DMA_HandleTypeDef *hdma) ; + +/** + * @} + */ + +/** @defgroup JPEG_Exported_Functions JPEG Exported Functions + * @{ + */ + +/** @defgroup JPEG_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions. + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Initialize the JPEG peripheral and creates the associated handle + (+) DeInitialize the JPEG peripheral + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the JPEG according to the specified + * parameters in the JPEG_InitTypeDef and creates the associated handle. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Init(JPEG_HandleTypeDef *hjpeg) +{ + /* These are the sample quantization tables given in JPEG spec ISO/IEC 10918-1 standard , section K.1. */ + static const uint8_t JPEG_LUM_QuantTable[JPEG_QUANT_TABLE_SIZE] = + { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 + }; + static const uint8_t JPEG_CHROM_QuantTable[JPEG_QUANT_TABLE_SIZE] = + { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 + }; + + /* Check the JPEG handle allocation */ + if (hjpeg == NULL) + { + return HAL_ERROR; + } + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + if (hjpeg->State == HAL_JPEG_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hjpeg->Lock = HAL_UNLOCKED; + + hjpeg->InfoReadyCallback = HAL_JPEG_InfoReadyCallback; /* Legacy weak InfoReadyCallback */ + hjpeg->EncodeCpltCallback = HAL_JPEG_EncodeCpltCallback; /* Legacy weak EncodeCpltCallback */ + hjpeg->DecodeCpltCallback = HAL_JPEG_DecodeCpltCallback; /* Legacy weak DecodeCpltCallback */ + hjpeg->ErrorCallback = HAL_JPEG_ErrorCallback; /* Legacy weak ErrorCallback */ + hjpeg->GetDataCallback = HAL_JPEG_GetDataCallback; /* Legacy weak GetDataCallback */ + hjpeg->DataReadyCallback = HAL_JPEG_DataReadyCallback; /* Legacy weak DataReadyCallback */ + + if (hjpeg->MspInitCallback == NULL) + { + hjpeg->MspInitCallback = HAL_JPEG_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware */ + hjpeg->MspInitCallback(hjpeg); + } +#else + if (hjpeg->State == HAL_JPEG_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hjpeg->Lock = HAL_UNLOCKED; + + /* Init the low level hardware : GPIO, CLOCK */ + HAL_JPEG_MspInit(hjpeg); + } +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_BUSY; + + /* Start the JPEG Core*/ + __HAL_JPEG_ENABLE(hjpeg); + + /* Stop the JPEG encoding/decoding process*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + /* Disable All Interrupts */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + /* Disable All DMA requests */ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_MASK); + + /* Flush input and output FIFOs*/ + hjpeg->Instance->CR |= JPEG_CR_IFF; + hjpeg->Instance->CR |= JPEG_CR_OFF; + + /* Clear all flags */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_ALL); + + /* init default quantization tables*/ + hjpeg->QuantTable0 = (uint8_t *)((uint32_t)JPEG_LUM_QuantTable); + hjpeg->QuantTable1 = (uint8_t *)((uint32_t)JPEG_CHROM_QuantTable); + hjpeg->QuantTable2 = NULL; + hjpeg->QuantTable3 = NULL; + + /* init the default Huffman tables*/ + if (JPEG_Set_HuffEnc_Mem(hjpeg) != HAL_OK) + { + hjpeg->ErrorCode = HAL_JPEG_ERROR_HUFF_TABLE; + + return HAL_ERROR; + } + + /* Enable header processing*/ + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_HDR; + + /* Reset JpegInCount and JpegOutCount */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /* Reset the JPEG ErrorCode */ + hjpeg->ErrorCode = HAL_JPEG_ERROR_NONE; + + /*Clear the context filelds*/ + hjpeg->Context = 0; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief DeInitializes the JPEG peripheral. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_DeInit(JPEG_HandleTypeDef *hjpeg) +{ + /* Check the JPEG handle allocation */ + if (hjpeg == NULL) + { + return HAL_ERROR; + } + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + if (hjpeg->MspDeInitCallback == NULL) + { + hjpeg->MspDeInitCallback = HAL_JPEG_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware */ + hjpeg->MspDeInitCallback(hjpeg); + +#else + /* DeInit the low level hardware: CLOCK, NVIC.*/ + HAL_JPEG_MspDeInit(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_BUSY; + + /* Reset the JPEG ErrorCode */ + hjpeg->ErrorCode = HAL_JPEG_ERROR_NONE; + + /* Reset JpegInCount and JpegOutCount */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_RESET; + + /*Clear the context fields*/ + hjpeg->Context = 0; + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the JPEG MSP. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +__weak void HAL_JPEG_MspInit(JPEG_HandleTypeDef *hjpeg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes JPEG MSP. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +__weak void HAL_JPEG_MspDeInit(JPEG_HandleTypeDef *hjpeg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User JPEG Callback + * To be used instead of the weak predefined callback + * @param hjpeg JPEG handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_JPEG_ENCODE_CPLT_CB_ID Encode Complete callback ID + * @arg @ref HAL_JPEG_DECODE_CPLT_CB_ID Decode Complete callback ID + * @arg @ref HAL_JPEG_ERROR_CB_ID Error callback ID + * @arg @ref HAL_JPEG_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_JPEG_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_RegisterCallback(JPEG_HandleTypeDef *hjpeg, HAL_JPEG_CallbackIDTypeDef CallbackID, + pJPEG_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + switch (CallbackID) + { + case HAL_JPEG_ENCODE_CPLT_CB_ID : + hjpeg->EncodeCpltCallback = pCallback; + break; + + case HAL_JPEG_DECODE_CPLT_CB_ID : + hjpeg->DecodeCpltCallback = pCallback; + break; + + case HAL_JPEG_ERROR_CB_ID : + hjpeg->ErrorCallback = pCallback; + break; + + case HAL_JPEG_MSPINIT_CB_ID : + hjpeg->MspInitCallback = pCallback; + break; + + case HAL_JPEG_MSPDEINIT_CB_ID : + hjpeg->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_JPEG_STATE_RESET == hjpeg->State) + { + switch (CallbackID) + { + case HAL_JPEG_MSPINIT_CB_ID : + hjpeg->MspInitCallback = pCallback; + break; + + case HAL_JPEG_MSPDEINIT_CB_ID : + hjpeg->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief Unregister a JPEG Callback + * JPEG callabck is redirected to the weak predefined callback + * @param hjpeg JPEG handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * This parameter can be one of the following values: + * @arg @ref HAL_JPEG_ENCODE_CPLT_CB_ID Encode Complete callback ID + * @arg @ref HAL_JPEG_DECODE_CPLT_CB_ID Decode Complete callback ID + * @arg @ref HAL_JPEG_ERROR_CB_ID Error callback ID + * @arg @ref HAL_JPEG_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_JPEG_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_UnRegisterCallback(JPEG_HandleTypeDef *hjpeg, HAL_JPEG_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + switch (CallbackID) + { + case HAL_JPEG_ENCODE_CPLT_CB_ID : + hjpeg->EncodeCpltCallback = HAL_JPEG_EncodeCpltCallback; /* Legacy weak EncodeCpltCallback */ + break; + + case HAL_JPEG_DECODE_CPLT_CB_ID : + hjpeg->DecodeCpltCallback = HAL_JPEG_DecodeCpltCallback; /* Legacy weak DecodeCpltCallback */ + break; + + case HAL_JPEG_ERROR_CB_ID : + hjpeg->ErrorCallback = HAL_JPEG_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_JPEG_MSPINIT_CB_ID : + hjpeg->MspInitCallback = HAL_JPEG_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_JPEG_MSPDEINIT_CB_ID : + hjpeg->MspDeInitCallback = HAL_JPEG_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_JPEG_STATE_RESET == hjpeg->State) + { + switch (CallbackID) + { + case HAL_JPEG_MSPINIT_CB_ID : + hjpeg->MspInitCallback = HAL_JPEG_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_JPEG_MSPDEINIT_CB_ID : + hjpeg->MspDeInitCallback = HAL_JPEG_MspDeInit; /* Legacy weak MspInit */ + break; + + default : + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief Register Info Ready JPEG Callback + * To be used instead of the weak HAL_JPEG_InfoReadyCallback() predefined callback + * @param hjpeg JPEG handle + * @param pCallback pointer to the Info Ready Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_RegisterInfoReadyCallback(JPEG_HandleTypeDef *hjpeg, + pJPEG_InfoReadyCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->InfoReadyCallback = pCallback; + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief UnRegister the Info Ready JPEG Callback + * Info Ready JPEG Callback is redirected to the weak HAL_JPEG_InfoReadyCallback() predefined callback + * @param hjpeg JPEG handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_UnRegisterInfoReadyCallback(JPEG_HandleTypeDef *hjpeg) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->InfoReadyCallback = HAL_JPEG_InfoReadyCallback; /* Legacy weak InfoReadyCallback */ + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief Register Get Data JPEG Callback + * To be used instead of the weak HAL_JPEG_GetDataCallback() predefined callback + * @param hjpeg JPEG handle + * @param pCallback pointer to the Get Data Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_RegisterGetDataCallback(JPEG_HandleTypeDef *hjpeg, pJPEG_GetDataCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->GetDataCallback = pCallback; + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief UnRegister the Get Data JPEG Callback + * Get Data JPEG Callback is redirected to the weak HAL_JPEG_GetDataCallback() predefined callback + * @param hjpeg JPEG handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_UnRegisterGetDataCallback(JPEG_HandleTypeDef *hjpeg) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->GetDataCallback = HAL_JPEG_GetDataCallback; /* Legacy weak GetDataCallback */ + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief Register Data Ready JPEG Callback + * To be used instead of the weak HAL_JPEG_DataReadyCallback() predefined callback + * @param hjpeg JPEG handle + * @param pCallback pointer to the Get Data Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_RegisterDataReadyCallback(JPEG_HandleTypeDef *hjpeg, + pJPEG_DataReadyCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->DataReadyCallback = pCallback; + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +/** + * @brief UnRegister the Data Ready JPEG Callback + * Get Data Ready Callback is redirected to the weak HAL_JPEG_DataReadyCallback() predefined callback + * @param hjpeg JPEG handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_UnRegisterDataReadyCallback(JPEG_HandleTypeDef *hjpeg) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (HAL_JPEG_STATE_READY == hjpeg->State) + { + hjpeg->DataReadyCallback = HAL_JPEG_DataReadyCallback; /* Legacy weak DataReadyCallback */ + } + else + { + /* Update the error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hjpeg); + return status; +} + +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup JPEG_Exported_Functions_Group2 Configuration functions + * @brief JPEG Configuration functions. + * +@verbatim + ============================================================================== + ##### Configuration functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) HAL_JPEG_ConfigEncoding() : JPEG encoding configuration + (+) HAL_JPEG_GetInfo() : Extract the image configuration from the JPEG header during the decoding + (+) HAL_JPEG_EnableHeaderParsing() : Enable JPEG Header parsing for decoding + (+) HAL_JPEG_DisableHeaderParsing() : Disable JPEG Header parsing for decoding + (+) HAL_JPEG_SetUserQuantTables : Modify the default Quantization tables used for JPEG encoding. + +@endverbatim + * @{ + */ + +/** + * @brief Set the JPEG encoding configuration. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pConf pointer to a JPEG_ConfTypeDef structure that contains + * the encoding configuration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_ConfigEncoding(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pConf) +{ + uint32_t error; + uint32_t numberMCU, hfactor, vfactor, hMCU, vMCU; + + /* Check the JPEG handle allocation */ + if ((hjpeg == NULL) || (pConf == NULL)) + { + return HAL_ERROR; + } + else + { + /* Check the parameters */ + assert_param(IS_JPEG_COLORSPACE(pConf->ColorSpace)); + assert_param(IS_JPEG_CHROMASUBSAMPLING(pConf->ChromaSubsampling)); + assert_param(IS_JPEG_IMAGE_QUALITY(pConf->ImageQuality)); + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + hjpeg->State = HAL_JPEG_STATE_BUSY; + + hjpeg->Conf.ColorSpace = pConf->ColorSpace; + hjpeg->Conf.ChromaSubsampling = pConf->ChromaSubsampling; + hjpeg->Conf.ImageHeight = pConf->ImageHeight; + hjpeg->Conf.ImageWidth = pConf->ImageWidth; + hjpeg->Conf.ImageQuality = pConf->ImageQuality; + + /* Reset the Color Space : by default only one quantization table is used*/ + hjpeg->Instance->CONFR1 &= ~JPEG_CONFR1_COLORSPACE; + + /* Set Number of color components*/ + if (hjpeg->Conf.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) + { + /*Gray Scale is only one component 8x8 blocks i.e 4:4:4*/ + hjpeg->Conf.ChromaSubsampling = JPEG_444_SUBSAMPLING; + + JPEG_SetColorGrayScale(hjpeg); + /* Set quantization table 0*/ + error = JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable0, (hjpeg->Instance->QMEM0)); + } + else if (hjpeg->Conf.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + /* + Set the Color Space for YCbCr : 2 quantization tables are used + one for Luminance(Y) and one for both Chrominances (Cb & Cr) + */ + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_COLORSPACE_0; + + JPEG_SetColorYCBCR(hjpeg); + + /* Set quantization table 0*/ + error = JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable0, (hjpeg->Instance->QMEM0)); + /*By default quantization table 0 for component 0 and quantization table 1 for both components 1 and 2*/ + error |= JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable1, (hjpeg->Instance->QMEM1)); + + if ((hjpeg->Context & JPEG_CONTEXT_CUSTOM_TABLES) != 0UL) + { + /*Use user customized quantization tables , 1 table per component*/ + /* use 3 quantization tables , one for each component*/ + hjpeg->Instance->CONFR1 &= (~JPEG_CONFR1_COLORSPACE); + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_COLORSPACE_1; + + error |= JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable2, (hjpeg->Instance->QMEM2)); + + /*Use Quantization 1 table for component 1*/ + hjpeg->Instance->CONFR5 &= (~JPEG_CONFR5_QT); + hjpeg->Instance->CONFR5 |= JPEG_CONFR5_QT_0; + + /*Use Quantization 2 table for component 2*/ + hjpeg->Instance->CONFR6 &= (~JPEG_CONFR6_QT); + hjpeg->Instance->CONFR6 |= JPEG_CONFR6_QT_1; + } + } + else /* ColorSpace == JPEG_CMYK_COLORSPACE */ + { + JPEG_SetColorCMYK(hjpeg); + + /* Set quantization table 0*/ + error = JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable0, (hjpeg->Instance->QMEM0)); + /*By default quantization table 0 for All components*/ + + if ((hjpeg->Context & JPEG_CONTEXT_CUSTOM_TABLES) != 0UL) + { + /*Use user customized quantization tables , 1 table per component*/ + /* use 4 quantization tables , one for each component*/ + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_COLORSPACE; + + error |= JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable1, (hjpeg->Instance->QMEM1)); + error |= JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable2, (hjpeg->Instance->QMEM2)); + error |= JPEG_Set_Quantization_Mem(hjpeg, hjpeg->QuantTable3, (hjpeg->Instance->QMEM3)); + + /*Use Quantization 1 table for component 1*/ + hjpeg->Instance->CONFR5 |= JPEG_CONFR5_QT_0; + + /*Use Quantization 2 table for component 2*/ + hjpeg->Instance->CONFR6 |= JPEG_CONFR6_QT_1; + + /*Use Quantization 3 table for component 3*/ + hjpeg->Instance->CONFR7 |= JPEG_CONFR7_QT; + } + } + + if (error != 0UL) + { + hjpeg->ErrorCode = HAL_JPEG_ERROR_QUANT_TABLE; + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Set the JPEG State to ready */ + hjpeg->State = HAL_JPEG_STATE_READY; + + return HAL_ERROR; + } + /* Set the image size*/ + /* set the number of lines*/ + MODIFY_REG(hjpeg->Instance->CONFR1, JPEG_CONFR1_YSIZE, ((hjpeg->Conf.ImageHeight & 0x0000FFFFUL) << 16)); + /* set the number of pixels per line*/ + MODIFY_REG(hjpeg->Instance->CONFR3, JPEG_CONFR3_XSIZE, ((hjpeg->Conf.ImageWidth & 0x0000FFFFUL) << 16)); + + + if (hjpeg->Conf.ChromaSubsampling == JPEG_420_SUBSAMPLING) /* 4:2:0*/ + { + hfactor = 16; + vfactor = 16; + } + else if (hjpeg->Conf.ChromaSubsampling == JPEG_422_SUBSAMPLING) /* 4:2:2*/ + { + hfactor = 16; + vfactor = 8; + } + else /* Default is 8x8 MCU, 4:4:4*/ + { + hfactor = 8; + vfactor = 8; + } + + hMCU = (hjpeg->Conf.ImageWidth / hfactor); + if ((hjpeg->Conf.ImageWidth % hfactor) != 0UL) + { + hMCU++; /*+1 for horizontal incomplete MCU */ + } + + vMCU = (hjpeg->Conf.ImageHeight / vfactor); + if ((hjpeg->Conf.ImageHeight % vfactor) != 0UL) + { + vMCU++; /*+1 for vertical incomplete MCU */ + } + + numberMCU = (hMCU * vMCU) - 1UL; /* Bit Field JPEG_CONFR2_NMCU shall be set to NB_MCU - 1*/ + /* Set the number of MCU*/ + hjpeg->Instance->CONFR2 = (numberMCU & JPEG_CONFR2_NMCU); + + hjpeg->Context |= JPEG_CONTEXT_CONF_ENCODING; + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Set the JPEG State to ready */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Return function status */ + return HAL_BUSY; + } + } +} + +/** + * @brief Extract the image configuration from the JPEG header during the decoding + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pInfo pointer to a JPEG_ConfTypeDef structure that contains + * The JPEG decoded header informations + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_GetInfo(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pInfo) +{ + uint32_t yblockNb, cBblockNb, cRblockNb; + + /* Check the JPEG handle allocation */ + if ((hjpeg == NULL) || (pInfo == NULL)) + { + return HAL_ERROR; + } + + /*Read the conf parameters */ + if ((hjpeg->Instance->CONFR1 & JPEG_CONFR1_NF) == JPEG_CONFR1_NF_1) + { + pInfo->ColorSpace = JPEG_YCBCR_COLORSPACE; + } + else if ((hjpeg->Instance->CONFR1 & JPEG_CONFR1_NF) == 0UL) + { + pInfo->ColorSpace = JPEG_GRAYSCALE_COLORSPACE; + } + else if ((hjpeg->Instance->CONFR1 & JPEG_CONFR1_NF) == JPEG_CONFR1_NF) + { + pInfo->ColorSpace = JPEG_CMYK_COLORSPACE; + } + else + { + return HAL_ERROR; + } + + pInfo->ImageHeight = (hjpeg->Instance->CONFR1 & 0xFFFF0000UL) >> 16; + pInfo->ImageWidth = (hjpeg->Instance->CONFR3 & 0xFFFF0000UL) >> 16; + + if ((pInfo->ColorSpace == JPEG_YCBCR_COLORSPACE) || (pInfo->ColorSpace == JPEG_CMYK_COLORSPACE)) + { + yblockNb = (hjpeg->Instance->CONFR4 & JPEG_CONFR4_NB) >> 4; + cBblockNb = (hjpeg->Instance->CONFR5 & JPEG_CONFR5_NB) >> 4; + cRblockNb = (hjpeg->Instance->CONFR6 & JPEG_CONFR6_NB) >> 4; + + if ((yblockNb == 1UL) && (cBblockNb == 0UL) && (cRblockNb == 0UL)) + { + pInfo->ChromaSubsampling = JPEG_422_SUBSAMPLING; /*16x8 block*/ + } + else if ((yblockNb == 0UL) && (cBblockNb == 0UL) && (cRblockNb == 0UL)) + { + pInfo->ChromaSubsampling = JPEG_444_SUBSAMPLING; + } + else if ((yblockNb == 3UL) && (cBblockNb == 0UL) && (cRblockNb == 0UL)) + { + pInfo->ChromaSubsampling = JPEG_420_SUBSAMPLING; + } + else /*Default is 4:4:4*/ + { + pInfo->ChromaSubsampling = JPEG_444_SUBSAMPLING; + } + } + else + { + pInfo->ChromaSubsampling = JPEG_444_SUBSAMPLING; + } + + pInfo->ImageQuality = JPEG_GetQuality(hjpeg); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Enable JPEG Header parsing for decoding + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for the JPEG. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_EnableHeaderParsing(JPEG_HandleTypeDef *hjpeg) +{ + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_BUSY; + + /* Enable header processing*/ + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_HDR; + + /* Process unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + return HAL_OK; + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } +} + +/** + * @brief Disable JPEG Header parsing for decoding + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for the JPEG. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_DisableHeaderParsing(JPEG_HandleTypeDef *hjpeg) +{ + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_BUSY; + + /* Disable header processing*/ + hjpeg->Instance->CONFR1 &= ~JPEG_CONFR1_HDR; + + /* Process unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + return HAL_OK; + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } +} + +/** + * @brief Modify the default Quantization tables used for JPEG encoding. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param QTable0 pointer to uint8_t , define the user quantification table for color component 1. + * If NULL assume no need to update the table and no error return + * @param QTable1 pointer to uint8_t , define the user quantification table for color component 2. + * If NULL assume no need to update the table and no error return. + * @param QTable2 pointer to uint8_t , define the user quantification table for color component 3, + * If NULL assume no need to update the table and no error return. + * @param QTable3 pointer to uint8_t , define the user quantification table for color component 4. + * If NULL assume no need to update the table and no error return. + * + * @retval HAL status + */ + + +HAL_StatusTypeDef HAL_JPEG_SetUserQuantTables(JPEG_HandleTypeDef *hjpeg, uint8_t *QTable0, uint8_t *QTable1, + uint8_t *QTable2, uint8_t *QTable3) +{ + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /* Change the DMA state */ + hjpeg->State = HAL_JPEG_STATE_BUSY; + + hjpeg->Context |= JPEG_CONTEXT_CUSTOM_TABLES; + + hjpeg->QuantTable0 = QTable0; + hjpeg->QuantTable1 = QTable1; + hjpeg->QuantTable2 = QTable2; + hjpeg->QuantTable3 = QTable3; + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the DMA state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } +} + +/** + * @} + */ + +/** @defgroup JPEG_Exported_Functions_Group3 encoding/decoding processing functions + * @brief processing functions. + * +@verbatim + ============================================================================== + ##### JPEG processing functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) HAL_JPEG_Encode() : JPEG encoding with polling process + (+) HAL_JPEG_Decode() : JPEG decoding with polling process + (+) HAL_JPEG_Encode_IT() : JPEG encoding with interrupt process + (+) HAL_JPEG_Decode_IT() : JPEG decoding with interrupt process + (+) HAL_JPEG_Encode_DMA() : JPEG encoding with DMA process + (+) HAL_JPEG_Decode_DMA() : JPEG decoding with DMA process + (+) HAL_JPEG_Pause() : Pause the Input/Output processing + (+) HAL_JPEG_Resume() : Resume the JPEG Input/Output processing + (+) HAL_JPEG_ConfigInputBuffer() : Config Encoding/Decoding Input Buffer + (+) HAL_JPEG_ConfigOutputBuffer() : Config Encoding/Decoding Output Buffer + (+) HAL_JPEG_Abort() : Aborts the JPEG Encoding/Decoding + +@endverbatim + * @{ + */ + +/** + * @brief Starts JPEG encoding with polling processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataInMCU Pointer to the Input buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOut Pointer to the jpeg output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @param Timeout Specify Timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Encode(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataInMCU == NULL) || (pDataOut == NULL)) + { + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State != HAL_JPEG_STATE_READY) + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + if ((hjpeg->Context & JPEG_CONTEXT_CONF_ENCODING) == JPEG_CONTEXT_CONF_ENCODING) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_ENCODING; + + /*Set the Context to Encode with Polling*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_ENCODE | JPEG_CONTEXT_POLLING); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataInMCU; + hjpeg->pJpegOutBuffPtr = pDataOut; + hjpeg->InDataLength = InDataLength - (InDataLength % 4UL); /* In Data length must be multiple of 4 Bytes (1 word)*/ + hjpeg->OutDataLength = OutDataLength - (OutDataLength % 4UL); /* Out Data length must be multiple of 4 Bytes (1 word)*/ + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + /*JPEG data processing : In/Out FIFO transfer*/ + while ((JPEG_Process(hjpeg) == JPEG_PROCESS_ONGOING)) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0UL)) + { + + /* Update error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_READY; + + return HAL_TIMEOUT; + } + } + } + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_READY; + + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_ERROR; + } + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts JPEG decoding with polling processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataIn Pointer to the input data buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOutMCU Pointer to the Output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @param Timeout Specify Timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Decode(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataIn == NULL) || (pDataOutMCU == NULL)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + /* Get tick */ + tickstart = HAL_GetTick(); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_DECODING; + + /*Set the Context to Decode with Polling*/ + /*Set the Context to Encode with Polling*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_DECODE | JPEG_CONTEXT_POLLING); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataIn; + hjpeg->pJpegOutBuffPtr = pDataOutMCU; + hjpeg->InDataLength = InDataLength - (InDataLength % 4UL); /*In Data length must be multiple of 4 Bytes (1 word)*/ + hjpeg->OutDataLength = OutDataLength - (OutDataLength % 4UL); /*Out Data length must be multiple of 4 Bytes (1 word)*/ + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + /*JPEG data processing : In/Out FIFO transfer*/ + while ((JPEG_Process(hjpeg) == JPEG_PROCESS_ONGOING)) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0UL)) + { + + /* Update error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_READY; + + return HAL_TIMEOUT; + } + } + } + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_READY; + + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts JPEG encoding with interrupt processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataInMCU Pointer to the Input buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOut Pointer to the jpeg output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Encode_IT(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength) +{ + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataInMCU == NULL) || (pDataOut == NULL)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State != HAL_JPEG_STATE_READY) + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + else + { + if ((hjpeg->Context & JPEG_CONTEXT_CONF_ENCODING) == JPEG_CONTEXT_CONF_ENCODING) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_ENCODING; + + /*Set the Context to Encode with IT*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_ENCODE | JPEG_CONTEXT_IT); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataInMCU; + hjpeg->pJpegOutBuffPtr = pDataOut; + hjpeg->InDataLength = InDataLength - (InDataLength % 4UL); /*In Data length must be multiple of 4 Bytes (1 word)*/ + hjpeg->OutDataLength = OutDataLength - (OutDataLength % 4UL); /*Out Data length must be multiple of 4 Bytes (1 word)*/ + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_ERROR; + } + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts JPEG decoding with interrupt processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataIn Pointer to the input data buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOutMCU Pointer to the Output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Decode_IT(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength) +{ + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataIn == NULL) || (pDataOutMCU == NULL)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_DECODING; + + /*Set the Context to Decode with IT*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_DECODE | JPEG_CONTEXT_IT); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataIn; + hjpeg->pJpegOutBuffPtr = pDataOutMCU; + hjpeg->InDataLength = InDataLength - (InDataLength % 4UL); /*In Data length must be multiple of 4 Bytes (1 word)*/ + hjpeg->OutDataLength = OutDataLength - (OutDataLength % 4UL); /*Out Data length must be multiple of 4 Bytes (1 word)*/ + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts JPEG encoding with DMA processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataInMCU Pointer to the Input buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOut Pointer to the jpeg output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Encode_DMA(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataInMCU, uint32_t InDataLength, + uint8_t *pDataOut, uint32_t OutDataLength) +{ + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataInMCU == NULL) || (pDataOut == NULL)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State != HAL_JPEG_STATE_READY) + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + else + { + if ((hjpeg->Context & JPEG_CONTEXT_CONF_ENCODING) == JPEG_CONTEXT_CONF_ENCODING) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_ENCODING; + + /*Set the Context to Encode with DMA*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_ENCODE | JPEG_CONTEXT_DMA); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataInMCU; + hjpeg->pJpegOutBuffPtr = pDataOut; + hjpeg->InDataLength = InDataLength; + hjpeg->OutDataLength = OutDataLength; + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + /* JPEG encoding process using DMA */ + if (JPEG_DMA_StartProcess(hjpeg) != HAL_OK) + { + /* Update State */ + hjpeg->State = HAL_JPEG_STATE_ERROR; + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_ERROR; + } + + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_ERROR; + } + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts JPEG decoding with DMA processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataIn Pointer to the input data buffer + * @param InDataLength size in bytes Input buffer + * @param pDataOutMCU Pointer to the Output data buffer + * @param OutDataLength size in bytes of the Output buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Decode_DMA(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataIn, uint32_t InDataLength, + uint8_t *pDataOutMCU, uint32_t OutDataLength) +{ + /* Check the parameters */ + assert_param((InDataLength >= 4UL)); + assert_param((OutDataLength >= 4UL)); + + /* Check In/out buffer allocation and size */ + if ((hjpeg == NULL) || (pDataIn == NULL) || (pDataOutMCU == NULL)) + { + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hjpeg); + + if (hjpeg->State == HAL_JPEG_STATE_READY) + { + /*Change JPEG state*/ + hjpeg->State = HAL_JPEG_STATE_BUSY_DECODING; + + /*Set the Context to Decode with DMA*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK); + hjpeg->Context |= (JPEG_CONTEXT_DECODE | JPEG_CONTEXT_DMA); + + /*Store In/out buffers pointers and size*/ + hjpeg->pJpegInBuffPtr = pDataIn; + hjpeg->pJpegOutBuffPtr = pDataOutMCU; + hjpeg->InDataLength = InDataLength; + hjpeg->OutDataLength = OutDataLength; + + /*Reset In/out data counter */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Init decoding process*/ + JPEG_Init_Process(hjpeg); + + /* JPEG decoding process using DMA */ + if (JPEG_DMA_StartProcess(hjpeg) != HAL_OK) + { + /* Update State */ + hjpeg->State = HAL_JPEG_STATE_ERROR; + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_ERROR; + } + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + return HAL_BUSY; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Pause the JPEG Input/Output processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param XferSelection This parameter can be one of the following values : + * JPEG_PAUSE_RESUME_INPUT : Pause Input processing + * JPEG_PAUSE_RESUME_OUTPUT: Pause Output processing + * JPEG_PAUSE_RESUME_INPUT_OUTPUT: Pause Input and Output processing + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Pause(JPEG_HandleTypeDef *hjpeg, uint32_t XferSelection) +{ + uint32_t mask = 0; + + assert_param(IS_JPEG_PAUSE_RESUME_STATE(XferSelection)); + + if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_DMA) + { + if ((XferSelection & JPEG_PAUSE_RESUME_INPUT) == JPEG_PAUSE_RESUME_INPUT) + { + hjpeg->Context |= JPEG_CONTEXT_PAUSE_INPUT; + mask |= JPEG_DMA_IDMA; + } + if ((XferSelection & JPEG_PAUSE_RESUME_OUTPUT) == JPEG_PAUSE_RESUME_OUTPUT) + { + hjpeg->Context |= JPEG_CONTEXT_PAUSE_OUTPUT; + mask |= JPEG_DMA_ODMA; + } + JPEG_DISABLE_DMA(hjpeg, mask); + + } + else if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_IT) + { + + if ((XferSelection & JPEG_PAUSE_RESUME_INPUT) == JPEG_PAUSE_RESUME_INPUT) + { + hjpeg->Context |= JPEG_CONTEXT_PAUSE_INPUT; + mask |= (JPEG_IT_IFT | JPEG_IT_IFNF); + } + if ((XferSelection & JPEG_PAUSE_RESUME_OUTPUT) == JPEG_PAUSE_RESUME_OUTPUT) + { + hjpeg->Context |= JPEG_CONTEXT_PAUSE_OUTPUT; + mask |= (JPEG_IT_OFT | JPEG_IT_OFNE | JPEG_IT_EOC); + } + __HAL_JPEG_DISABLE_IT(hjpeg, mask); + + } + else + { + /* Nothing to do */ + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Resume the JPEG Input/Output processing + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param XferSelection This parameter can be one of the following values : + * JPEG_PAUSE_RESUME_INPUT : Resume Input processing + * JPEG_PAUSE_RESUME_OUTPUT: Resume Output processing + * JPEG_PAUSE_RESUME_INPUT_OUTPUT: Resume Input and Output processing + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Resume(JPEG_HandleTypeDef *hjpeg, uint32_t XferSelection) +{ + uint32_t mask = 0; + + assert_param(IS_JPEG_PAUSE_RESUME_STATE(XferSelection)); + + if ((hjpeg->Context & (JPEG_CONTEXT_PAUSE_INPUT | JPEG_CONTEXT_PAUSE_OUTPUT)) == 0UL) + { + /* if nothing paused to resume return error*/ + return HAL_ERROR; + } + + if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_DMA) + { + + if ((XferSelection & JPEG_PAUSE_RESUME_INPUT) == JPEG_PAUSE_RESUME_INPUT) + { + hjpeg->Context &= (~JPEG_CONTEXT_PAUSE_INPUT); + mask |= JPEG_DMA_IDMA; + + /*JPEG Input DMA transfer data number must be multiple of DMA buffer size + as the destination is a 32 bits register */ + hjpeg->InDataLength = hjpeg->InDataLength - (hjpeg->InDataLength % 4UL); + + if (hjpeg->InDataLength > 0UL) + { + /* Start DMA FIFO In transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmain, (uint32_t)hjpeg->pJpegInBuffPtr, (uint32_t)&hjpeg->Instance->DIR, + hjpeg->InDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + hjpeg->State = HAL_JPEG_STATE_ERROR; + return HAL_ERROR; + } + } + } + if ((XferSelection & JPEG_PAUSE_RESUME_OUTPUT) == JPEG_PAUSE_RESUME_OUTPUT) + { + hjpeg->Context &= (~JPEG_CONTEXT_PAUSE_OUTPUT); + + if ((hjpeg->Context & JPEG_CONTEXT_ENDING_DMA) != 0UL) + { + JPEG_DMA_PollResidualData(hjpeg); + } + else + { + mask |= JPEG_DMA_ODMA; + + /* Start DMA FIFO Out transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmaout, (uint32_t)&hjpeg->Instance->DOR, (uint32_t)hjpeg->pJpegOutBuffPtr, + hjpeg->OutDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + hjpeg->State = HAL_JPEG_STATE_ERROR; + return HAL_ERROR; + } + } + + } + JPEG_ENABLE_DMA(hjpeg, mask); + + } + else if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_IT) + { + if ((XferSelection & JPEG_PAUSE_RESUME_INPUT) == JPEG_PAUSE_RESUME_INPUT) + { + hjpeg->Context &= (~JPEG_CONTEXT_PAUSE_INPUT); + mask |= (JPEG_IT_IFT | JPEG_IT_IFNF); + } + if ((XferSelection & JPEG_PAUSE_RESUME_OUTPUT) == JPEG_PAUSE_RESUME_OUTPUT) + { + hjpeg->Context &= (~JPEG_CONTEXT_PAUSE_OUTPUT); + mask |= (JPEG_IT_OFT | JPEG_IT_OFNE | JPEG_IT_EOC); + } + __HAL_JPEG_ENABLE_IT(hjpeg, mask); + + } + else + { + /* Nothing to do */ + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Config Encoding/Decoding Input Buffer. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module. + * @param pNewInputBuffer Pointer to the new input data buffer + * @param InDataLength Size in bytes of the new Input data buffer + * @retval HAL status + */ +void HAL_JPEG_ConfigInputBuffer(JPEG_HandleTypeDef *hjpeg, uint8_t *pNewInputBuffer, uint32_t InDataLength) +{ + hjpeg->pJpegInBuffPtr = pNewInputBuffer; + hjpeg->InDataLength = InDataLength; +} + +/** + * @brief Config Encoding/Decoding Output Buffer. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module. + * @param pNewOutputBuffer Pointer to the new output data buffer + * @param OutDataLength Size in bytes of the new Output data buffer + * @retval HAL status + */ +void HAL_JPEG_ConfigOutputBuffer(JPEG_HandleTypeDef *hjpeg, uint8_t *pNewOutputBuffer, uint32_t OutDataLength) +{ + hjpeg->pJpegOutBuffPtr = pNewOutputBuffer; + hjpeg->OutDataLength = OutDataLength; +} + +/** + * @brief Aborts the JPEG Encoding/Decoding. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_JPEG_Abort(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t tickstart, tmpContext; + tmpContext = hjpeg->Context; + + /*Reset the Context operation and method*/ + hjpeg->Context &= ~(JPEG_CONTEXT_OPERATION_MASK | JPEG_CONTEXT_METHOD_MASK | JPEG_CONTEXT_ENDING_DMA); + + if ((tmpContext & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_DMA) + { + /* Stop the DMA In/out Xfer*/ + if (HAL_DMA_Abort(hjpeg->hdmaout) != HAL_OK) + { + if (hjpeg->hdmaout->ErrorCode == HAL_DMA_ERROR_TIMEOUT) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + } + } + if (HAL_DMA_Abort(hjpeg->hdmain) != HAL_OK) + { + if (hjpeg->hdmain->ErrorCode == HAL_DMA_ERROR_TIMEOUT) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + } + } + + } + + /* Stop the JPEG encoding/decoding process*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if the JPEG Codec is effectively disabled */ + while (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_COF) != 0UL) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > JPEG_TIMEOUT_VALUE) + { + /* Update error code */ + hjpeg->ErrorCode |= HAL_JPEG_ERROR_TIMEOUT; + + /* Change the DMA state */ + hjpeg->State = HAL_JPEG_STATE_ERROR; + break; + } + } + + /* Disable All Interrupts */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + /* Disable All DMA requests */ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_MASK); + + /* Flush input and output FIFOs*/ + hjpeg->Instance->CR |= JPEG_CR_IFF; + hjpeg->Instance->CR |= JPEG_CR_OFF; + + /* Clear all flags */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_ALL); + + /* Reset JpegInCount and JpegOutCount */ + hjpeg->JpegInCount = 0; + hjpeg->JpegOutCount = 0; + + /*Reset the Context Pause*/ + hjpeg->Context &= ~(JPEG_CONTEXT_PAUSE_INPUT | JPEG_CONTEXT_PAUSE_OUTPUT); + + /* Change the DMA state*/ + if (hjpeg->ErrorCode != HAL_JPEG_ERROR_NONE) + { + hjpeg->State = HAL_JPEG_STATE_ERROR; + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + /* Return function status */ + return HAL_ERROR; + } + else + { + hjpeg->State = HAL_JPEG_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + /* Return function status */ + return HAL_OK; + } + +} + + +/** + * @} + */ + +/** @defgroup JPEG_Exported_Functions_Group4 JPEG Decode/Encode callback functions + * @brief JPEG process callback functions. + * +@verbatim + ============================================================================== + ##### JPEG Decode and Encode callback functions ##### + ============================================================================== + [..] This section provides callback functions: + (+) HAL_JPEG_InfoReadyCallback() : Decoding JPEG Info ready callback + (+) HAL_JPEG_EncodeCpltCallback() : Encoding complete callback. + (+) HAL_JPEG_DecodeCpltCallback() : Decoding complete callback. + (+) HAL_JPEG_ErrorCallback() : JPEG error callback. + (+) HAL_JPEG_GetDataCallback() : Get New Data chunk callback. + (+) HAL_JPEG_DataReadyCallback() : Decoded/Encoded Data ready callback. + +@endverbatim + * @{ + */ + +/** + * @brief Decoding JPEG Info ready callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pInfo pointer to a JPEG_ConfTypeDef structure that contains + * The JPEG decoded header informations + * @retval None + */ +__weak void HAL_JPEG_InfoReadyCallback(JPEG_HandleTypeDef *hjpeg, JPEG_ConfTypeDef *pInfo) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + UNUSED(pInfo); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_HeaderParsingCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Encoding complete callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +__weak void HAL_JPEG_EncodeCpltCallback(JPEG_HandleTypeDef *hjpeg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_EncodeCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Decoding complete callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +__weak void HAL_JPEG_DecodeCpltCallback(JPEG_HandleTypeDef *hjpeg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_EncodeCpltCallback could be implemented in the user file + */ +} + +/** + * @brief JPEG error callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +__weak void HAL_JPEG_ErrorCallback(JPEG_HandleTypeDef *hjpeg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Get New Data chunk callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param NbDecodedData Number of consummed data in the previous chunk in bytes + * @retval None + */ +__weak void HAL_JPEG_GetDataCallback(JPEG_HandleTypeDef *hjpeg, uint32_t NbDecodedData) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + UNUSED(NbDecodedData); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_GetDataCallback could be implemented in the user file + */ +} + +/** + * @brief Decoded/Encoded Data ready callback. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param pDataOut pointer to the output data buffer + * @param OutDataLength number in bytes of data available in the specified output buffer + * @retval None + */ +__weak void HAL_JPEG_DataReadyCallback(JPEG_HandleTypeDef *hjpeg, uint8_t *pDataOut, uint32_t OutDataLength) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hjpeg); + UNUSED(pDataOut); + UNUSED(OutDataLength); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_JPEG_DataReadyCallback could be implemented in the user file + */ +} + +/** + * @} + */ + + +/** @defgroup JPEG_Exported_Functions_Group5 JPEG IRQ handler management + * @brief JPEG IRQ handler. + * +@verbatim + ============================================================================== + ##### JPEG IRQ handler management ##### + ============================================================================== + [..] This section provides JPEG IRQ handler function. + (+) HAL_JPEG_IRQHandler() : handles JPEG interrupt request + +@endverbatim + * @{ + */ + +/** + * @brief This function handles JPEG interrupt request. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +void HAL_JPEG_IRQHandler(JPEG_HandleTypeDef *hjpeg) +{ + switch (hjpeg->State) + { + case HAL_JPEG_STATE_BUSY_ENCODING: + case HAL_JPEG_STATE_BUSY_DECODING: + /* continue JPEG data encoding/Decoding*/ + /* JPEG data processing : In/Out FIFO transfer*/ + if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_IT) + { + (void) JPEG_Process(hjpeg); + } + else if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_DMA) + { + JPEG_DMA_ContinueProcess(hjpeg); + } + else + { + /* Nothing to do */ + } + break; + + default: + break; + } +} + +/** + * @} + */ + +/** @defgroup JPEG_Exported_Functions_Group6 Peripheral State functions + * @brief Peripheral State functions. + * +@verbatim + ============================================================================== + ##### Peripheral State and Error functions ##### + ============================================================================== + [..] This section provides JPEG State and Errors function. + (+) HAL_JPEG_GetState() : permits to get in run-time the JPEG state. + (+) HAL_JPEG_GetError() : Returns the JPEG error code if any. + +@endverbatim + * @{ + */ + +/** + * @brief Returns the JPEG state. + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG state + */ +HAL_JPEG_STATETypeDef HAL_JPEG_GetState(JPEG_HandleTypeDef *hjpeg) +{ + return hjpeg->State; +} + +/** + * @brief Return the JPEG error code + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for the specified JPEG. + * @retval JPEG Error Code + */ +uint32_t HAL_JPEG_GetError(JPEG_HandleTypeDef *hjpeg) +{ + return hjpeg->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + + +/** @addtogroup JPEG_Private_Functions + * @{ + */ + +/** + * @brief Generates Huffman sizes/Codes Table from Bits/vals Table + * @param Bits pointer to bits table + * @param Huffsize pointer to sizes table + * @param Huffcode pointer to codes table + * @param LastK pointer to last Coeff (table dimmension) + * @retval HAL status + */ +static HAL_StatusTypeDef JPEG_Bits_To_SizeCodes(uint8_t *Bits, uint8_t *Huffsize, uint32_t *Huffcode, uint32_t *LastK) +{ + uint32_t i, p, l, code, si; + + /* Figure C.1: Generation of table of Huffman code sizes */ + p = 0; + for (l = 0; l < 16UL; l++) + { + i = (uint32_t)Bits[l]; + if ((p + i) > 256UL) + { + /* check for table overflow */ + return HAL_ERROR; + } + while (i != 0UL) + { + Huffsize[p] = (uint8_t) l + 1U; + p++; + i--; + } + } + Huffsize[p] = 0; + *LastK = p; + + /* Figure C.2: Generation of table of Huffman codes */ + code = 0; + si = Huffsize[0]; + p = 0; + while (Huffsize[p] != 0U) + { + while (((uint32_t) Huffsize[p]) == si) + { + Huffcode[p] = code; + p++; + code++; + } + /* code must fit in "size" bits (si), no code is allowed to be all ones*/ + if(si > 31UL) + { + return HAL_ERROR; + } + if (((uint32_t) code) >= (((uint32_t) 1) << si)) + { + return HAL_ERROR; + } + code <<= 1; + si++; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Transform a Bits/Vals AC Huffman table to sizes/Codes huffman Table + * that can programmed to the JPEG encoder registers + * @param AC_BitsValsTable pointer to AC huffman bits/vals table + * @param AC_SizeCodesTable pointer to AC huffman Sizes/Codes table + * @retval HAL status + */ +static HAL_StatusTypeDef JPEG_ACHuff_BitsVals_To_SizeCodes(JPEG_ACHuffTableTypeDef *AC_BitsValsTable, + JPEG_AC_HuffCodeTableTypeDef *AC_SizeCodesTable) +{ + HAL_StatusTypeDef error; + uint8_t huffsize[257]; + uint32_t huffcode[257]; + uint32_t k; + uint32_t l, lsb, msb; + uint32_t lastK; + + error = JPEG_Bits_To_SizeCodes(AC_BitsValsTable->Bits, huffsize, huffcode, &lastK); + if (error != HAL_OK) + { + return error; + } + + /* Figure C.3: Ordering procedure for encoding procedure code tables */ + k = 0; + + while (k < lastK) + { + l = AC_BitsValsTable->HuffVal[k]; + if (l == 0UL) + { + l = 160; /*l = 0x00 EOB code*/ + } + else if (l == 0xF0UL) /* l = 0xF0 ZRL code*/ + { + l = 161; + } + else + { + msb = (l & 0xF0UL) >> 4; + lsb = (l & 0x0FUL); + l = (msb * 10UL) + lsb - 1UL; + } + if (l >= JPEG_AC_HUFF_TABLE_SIZE) + { + return HAL_ERROR; /* Huffman Table overflow error*/ + } + else + { + AC_SizeCodesTable->HuffmanCode[l] = huffcode[k]; + AC_SizeCodesTable->CodeLength[l] = huffsize[k] - 1U; + k++; + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Transform a Bits/Vals DC Huffman table to sizes/Codes huffman Table + * that can programmed to the JPEG encoder registers + * @param DC_BitsValsTable pointer to DC huffman bits/vals table + * @param DC_SizeCodesTable pointer to DC huffman Sizes/Codes table + * @retval HAL status + */ +static HAL_StatusTypeDef JPEG_DCHuff_BitsVals_To_SizeCodes(JPEG_DCHuffTableTypeDef *DC_BitsValsTable, + JPEG_DC_HuffCodeTableTypeDef *DC_SizeCodesTable) +{ + HAL_StatusTypeDef error; + + uint32_t k; + uint32_t l; + uint32_t lastK; + uint8_t huffsize[257]; + uint32_t huffcode[257]; + error = JPEG_Bits_To_SizeCodes(DC_BitsValsTable->Bits, huffsize, huffcode, &lastK); + if (error != HAL_OK) + { + return error; + } + /* Figure C.3: ordering procedure for encoding procedure code tables */ + k = 0; + + while (k < lastK) + { + l = DC_BitsValsTable->HuffVal[k]; + if (l >= JPEG_DC_HUFF_TABLE_SIZE) + { + return HAL_ERROR; /* Huffman Table overflow error*/ + } + else + { + DC_SizeCodesTable->HuffmanCode[l] = huffcode[k]; + DC_SizeCodesTable->CodeLength[l] = huffsize[k] - 1U; + k++; + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set the JPEG register with an DC huffman table at the given DC table address + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param HuffTableDC pointer to DC huffman table + * @param DCTableAddress Encoder DC huffman table address it could be HUFFENC_DC0 or HUFFENC_DC1. + * @retval HAL status + */ +static HAL_StatusTypeDef JPEG_Set_HuffDC_Mem(JPEG_HandleTypeDef *hjpeg, JPEG_DCHuffTableTypeDef *HuffTableDC, + const __IO uint32_t *DCTableAddress) +{ + HAL_StatusTypeDef error; + JPEG_DC_HuffCodeTableTypeDef dcSizeCodesTable; + uint32_t i, lsb, msb; + __IO uint32_t *address, *addressDef; + + if (DCTableAddress == (hjpeg->Instance->HUFFENC_DC0)) + { + address = (hjpeg->Instance->HUFFENC_DC0 + (JPEG_DC_HUFF_TABLE_SIZE / 2UL)); + } + else if (DCTableAddress == (hjpeg->Instance->HUFFENC_DC1)) + { + address = (hjpeg->Instance->HUFFENC_DC1 + (JPEG_DC_HUFF_TABLE_SIZE / 2UL)); + } + else + { + return HAL_ERROR; + } + + if (HuffTableDC != NULL) + { + error = JPEG_DCHuff_BitsVals_To_SizeCodes(HuffTableDC, &dcSizeCodesTable); + if (error != HAL_OK) + { + return error; + } + addressDef = address; + *addressDef = 0x0FFF0FFF; + addressDef++; + *addressDef = 0x0FFF0FFF; + + i = JPEG_DC_HUFF_TABLE_SIZE; + while (i > 1UL) + { + i--; + address --; + msb = ((uint32_t)(((uint32_t)dcSizeCodesTable.CodeLength[i] & 0xFU) << 8)) | ((uint32_t)dcSizeCodesTable.HuffmanCode[i] & + 0xFFUL); + i--; + lsb = ((uint32_t)(((uint32_t)dcSizeCodesTable.CodeLength[i] & 0xFU) << 8)) | ((uint32_t)dcSizeCodesTable.HuffmanCode[i] & + 0xFFUL); + + *address = lsb | (msb << 16); + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set the JPEG register with an AC huffman table at the given AC table address + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param HuffTableAC pointer to AC huffman table + * @param ACTableAddress Encoder AC huffman table address it could be HUFFENC_AC0 or HUFFENC_AC1. + * @retval HAL status + */ +static HAL_StatusTypeDef JPEG_Set_HuffAC_Mem(JPEG_HandleTypeDef *hjpeg, JPEG_ACHuffTableTypeDef *HuffTableAC, + const __IO uint32_t *ACTableAddress) +{ + HAL_StatusTypeDef error; + JPEG_AC_HuffCodeTableTypeDef acSizeCodesTable; + uint32_t i, lsb, msb; + __IO uint32_t *address, *addressDef; + + if (ACTableAddress == (hjpeg->Instance->HUFFENC_AC0)) + { + address = (hjpeg->Instance->HUFFENC_AC0 + (JPEG_AC_HUFF_TABLE_SIZE / 2UL)); + } + else if (ACTableAddress == (hjpeg->Instance->HUFFENC_AC1)) + { + address = (hjpeg->Instance->HUFFENC_AC1 + (JPEG_AC_HUFF_TABLE_SIZE / 2UL)); + } + else + { + return HAL_ERROR; + } + + if (HuffTableAC != NULL) + { + error = JPEG_ACHuff_BitsVals_To_SizeCodes(HuffTableAC, &acSizeCodesTable); + if (error != HAL_OK) + { + return error; + } + /* Default values settings: 162:167 FFFh , 168:175 FD0h_FD7h */ + /* Locations 162:175 of each AC table contain information used internally by the core */ + + addressDef = address; + for (i = 0; i < 3UL; i++) + { + *addressDef = 0x0FFF0FFF; + addressDef++; + } + *addressDef = 0x0FD10FD0; + addressDef++; + *addressDef = 0x0FD30FD2; + addressDef++; + *addressDef = 0x0FD50FD4; + addressDef++; + *addressDef = 0x0FD70FD6; + /* end of Locations 162:175 */ + + + i = JPEG_AC_HUFF_TABLE_SIZE; + while (i > 1UL) + { + i--; + address--; + msb = ((uint32_t)(((uint32_t)acSizeCodesTable.CodeLength[i] & 0xFU) << 8)) | ((uint32_t)acSizeCodesTable.HuffmanCode[i] & + 0xFFUL); + i--; + lsb = ((uint32_t)(((uint32_t)acSizeCodesTable.CodeLength[i] & 0xFU) << 8)) | ((uint32_t)acSizeCodesTable.HuffmanCode[i] & + 0xFFUL); + + *address = lsb | (msb << 16); + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configure the JPEG encoder register huffman tables to used during + * the encdoing operation + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static HAL_StatusTypeDef JPEG_Set_HuffEnc_Mem(JPEG_HandleTypeDef *hjpeg) +{ + HAL_StatusTypeDef error; + + JPEG_Set_Huff_DHTMem(hjpeg); + error = JPEG_Set_HuffAC_Mem(hjpeg, (JPEG_ACHuffTableTypeDef *)(uint32_t)&JPEG_ACLUM_HuffTable, + (hjpeg->Instance->HUFFENC_AC0)); + if (error != HAL_OK) + { + return error; + } + + error = JPEG_Set_HuffAC_Mem(hjpeg, (JPEG_ACHuffTableTypeDef *)(uint32_t)&JPEG_ACCHROM_HuffTable, + (hjpeg->Instance->HUFFENC_AC1)); + if (error != HAL_OK) + { + return error; + } + + error = JPEG_Set_HuffDC_Mem(hjpeg, (JPEG_DCHuffTableTypeDef *)(uint32_t)&JPEG_DCLUM_HuffTable, + hjpeg->Instance->HUFFENC_DC0); + if (error != HAL_OK) + { + return error; + } + + error = JPEG_Set_HuffDC_Mem(hjpeg, (JPEG_DCHuffTableTypeDef *)(uint32_t)&JPEG_DCCHROM_HuffTable, + hjpeg->Instance->HUFFENC_DC1); + if (error != HAL_OK) + { + return error; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configure the JPEG register huffman tables to be included in the JPEG + * file header (used for encoding only) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static void JPEG_Set_Huff_DHTMem(JPEG_HandleTypeDef *hjpeg) +{ + JPEG_ACHuffTableTypeDef *HuffTableAC0 = (JPEG_ACHuffTableTypeDef *)(uint32_t)&JPEG_ACLUM_HuffTable; + JPEG_ACHuffTableTypeDef *HuffTableAC1 = (JPEG_ACHuffTableTypeDef *)(uint32_t)&JPEG_ACCHROM_HuffTable; + JPEG_DCHuffTableTypeDef *HuffTableDC0 = (JPEG_DCHuffTableTypeDef *)(uint32_t)&JPEG_DCLUM_HuffTable; + JPEG_DCHuffTableTypeDef *HuffTableDC1 = (JPEG_DCHuffTableTypeDef *)(uint32_t)&JPEG_DCCHROM_HuffTable; + uint32_t value, index; + __IO uint32_t *address; + + /* DC0 Huffman Table : BITS*/ + /* DC0 BITS is a 16 Bytes table i.e 4x32bits words from DHTMEM base address to DHTMEM + 3*/ + address = (hjpeg->Instance->DHTMEM + 3); + index = 16; + while (index > 3UL) + { + + *address = (((uint32_t)HuffTableDC0->Bits[index - 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableDC0->Bits[index - 2UL] & 0xFFUL) << 16) | + (((uint32_t)HuffTableDC0->Bits[index - 3UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableDC0->Bits[index - 4UL] & 0xFFUL); + address--; + index -= 4UL; + + } + /* DC0 Huffman Table : Val*/ + /* DC0 VALS is a 12 Bytes table i.e 3x32bits words from DHTMEM base address +4 to DHTMEM + 6 */ + address = (hjpeg->Instance->DHTMEM + 6); + index = 12; + while (index > 3UL) + { + *address = (((uint32_t)HuffTableDC0->HuffVal[index - 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableDC0->HuffVal[index - 2UL] & 0xFFUL) << 16) | + (((uint32_t)HuffTableDC0->HuffVal[index - 3UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableDC0->HuffVal[index - 4UL] & 0xFFUL); + address--; + index -= 4UL; + } + + /* AC0 Huffman Table : BITS*/ + /* AC0 BITS is a 16 Bytes table i.e 4x32bits words from DHTMEM base address + 7 to DHTMEM + 10*/ + address = (hjpeg->Instance->DHTMEM + 10UL); + index = 16; + while (index > 3UL) + { + + *address = (((uint32_t)HuffTableAC0->Bits[index - 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableAC0->Bits[index - 2UL] & 0xFFUL) << 16) | + (((uint32_t)HuffTableAC0->Bits[index - 3UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableAC0->Bits[index - 4UL] & 0xFFUL); + address--; + index -= 4UL; + + } + /* AC0 Huffman Table : Val*/ + /* AC0 VALS is a 162 Bytes table i.e 41x32bits words from DHTMEM base address + 11 to DHTMEM + 51 */ + /* only Byte 0 and Byte 1 of the last word (@ DHTMEM + 51) belong to AC0 VALS table */ + address = (hjpeg->Instance->DHTMEM + 51); + value = *address & 0xFFFF0000U; + value = value | (((uint32_t)HuffTableAC0->HuffVal[161] & 0xFFUL) << 8) | ((uint32_t)HuffTableAC0->HuffVal[160] & 0xFFUL); + *address = value; + + /*continue setting 160 AC0 huffman values */ + address--; /* address = hjpeg->Instance->DHTMEM + 50*/ + index = 160; + while (index > 3UL) + { + *address = (((uint32_t)HuffTableAC0->HuffVal[index - 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableAC0->HuffVal[index - 2UL] & 0xFFUL) << 16) | + (((uint32_t)HuffTableAC0->HuffVal[index - 3UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableAC0->HuffVal[index - 4UL] & 0xFFUL); + address--; + index -= 4UL; + } + + /* DC1 Huffman Table : BITS*/ + /* DC1 BITS is a 16 Bytes table i.e 4x32bits words from DHTMEM + 51 base address to DHTMEM + 55*/ + /* only Byte 2 and Byte 3 of the first word (@ DHTMEM + 51) belong to DC1 Bits table */ + address = (hjpeg->Instance->DHTMEM + 51); + value = *address & 0x0000FFFFU; + value = value | (((uint32_t)HuffTableDC1->Bits[1] & 0xFFUL) << 24) | (((uint32_t)HuffTableDC1->Bits[0] & 0xFFUL) << 16); + *address = value; + + /* only Byte 0 and Byte 1 of the last word (@ DHTMEM + 55) belong to DC1 Bits table */ + address = (hjpeg->Instance->DHTMEM + 55); + value = *address & 0xFFFF0000U; + value = value | (((uint32_t)HuffTableDC1->Bits[15] & 0xFFUL) << 8) | ((uint32_t)HuffTableDC1->Bits[14] & 0xFFUL); + *address = value; + + /*continue setting 12 DC1 huffman Bits from DHTMEM + 54 down to DHTMEM + 52*/ + address--; + index = 12; + while (index > 3UL) + { + + *address = (((uint32_t)HuffTableDC1->Bits[index + 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableDC1->Bits[index] & 0xFFUL) << 16) | + (((uint32_t)HuffTableDC1->Bits[index - 1UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableDC1->Bits[index - 2UL] & 0xFFUL); + address--; + index -= 4UL; + + } + /* DC1 Huffman Table : Val*/ + /* DC1 VALS is a 12 Bytes table i.e 3x32bits words from DHTMEM base address +55 to DHTMEM + 58 */ + /* only Byte 2 and Byte 3 of the first word (@ DHTMEM + 55) belong to DC1 Val table */ + address = (hjpeg->Instance->DHTMEM + 55); + value = *address & 0x0000FFFFUL; + value = value | (((uint32_t)HuffTableDC1->HuffVal[1] & 0xFFUL) << 24) | (((uint32_t)HuffTableDC1->HuffVal[0] & 0xFFUL) << + 16); + *address = value; + + /* only Byte 0 and Byte 1 of the last word (@ DHTMEM + 58) belong to DC1 Val table */ + address = (hjpeg->Instance->DHTMEM + 58); + value = *address & 0xFFFF0000UL; + value = value | (((uint32_t)HuffTableDC1->HuffVal[11] & 0xFFUL) << 8) | ((uint32_t)HuffTableDC1->HuffVal[10] & 0xFFUL); + *address = value; + + /*continue setting 8 DC1 huffman val from DHTMEM + 57 down to DHTMEM + 56*/ + address--; + index = 8; + while (index > 3UL) + { + *address = (((uint32_t)HuffTableDC1->HuffVal[index + 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableDC1->HuffVal[index] & 0xFFUL) << 16) | + (((uint32_t)HuffTableDC1->HuffVal[index - 1UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableDC1->HuffVal[index - 2UL] & 0xFFUL); + address--; + index -= 4UL; + } + + /* AC1 Huffman Table : BITS*/ + /* AC1 BITS is a 16 Bytes table i.e 4x32bits words from DHTMEM base address + 58 to DHTMEM + 62*/ + /* only Byte 2 and Byte 3 of the first word (@ DHTMEM + 58) belong to AC1 Bits table */ + address = (hjpeg->Instance->DHTMEM + 58); + value = *address & 0x0000FFFFU; + value = value | (((uint32_t)HuffTableAC1->Bits[1] & 0xFFUL) << 24) | (((uint32_t)HuffTableAC1->Bits[0] & 0xFFUL) << 16); + *address = value; + + /* only Byte 0 and Byte 1 of the last word (@ DHTMEM + 62) belong to Bits Val table */ + address = (hjpeg->Instance->DHTMEM + 62); + value = *address & 0xFFFF0000U; + value = value | (((uint32_t)HuffTableAC1->Bits[15] & 0xFFUL) << 8) | ((uint32_t)HuffTableAC1->Bits[14] & 0xFFUL); + *address = value; + + /*continue setting 12 AC1 huffman Bits from DHTMEM + 61 down to DHTMEM + 59*/ + address--; + index = 12; + while (index > 3UL) + { + + *address = (((uint32_t)HuffTableAC1->Bits[index + 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableAC1->Bits[index] & 0xFFUL) << 16) | + (((uint32_t)HuffTableAC1->Bits[index - 1UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableAC1->Bits[index - 2UL] & 0xFFUL); + address--; + index -= 4UL; + + } + /* AC1 Huffman Table : Val*/ + /* AC1 VALS is a 162 Bytes table i.e 41x32bits words from DHTMEM base address + 62 to DHTMEM + 102 */ + /* only Byte 2 and Byte 3 of the first word (@ DHTMEM + 62) belong to AC1 VALS table */ + address = (hjpeg->Instance->DHTMEM + 62); + value = *address & 0x0000FFFFUL; + value = value | (((uint32_t)HuffTableAC1->HuffVal[1] & 0xFFUL) << 24) | (((uint32_t)HuffTableAC1->HuffVal[0] & 0xFFUL) << + 16); + *address = value; + + /*continue setting 160 AC1 huffman values from DHTMEM + 63 to DHTMEM+102 */ + address = (hjpeg->Instance->DHTMEM + 102); + index = 160; + while (index > 3UL) + { + *address = (((uint32_t)HuffTableAC1->HuffVal[index + 1UL] & 0xFFUL) << 24) | + (((uint32_t)HuffTableAC1->HuffVal[index] & 0xFFUL) << 16) | + (((uint32_t)HuffTableAC1->HuffVal[index - 1UL] & 0xFFUL) << 8) | + ((uint32_t)HuffTableAC1->HuffVal[index - 2UL] & 0xFFUL); + address--; + index -= 4UL; + } + +} + +/** + * @brief Configure the JPEG registers with a given quantization table + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param QTable pointer to an array of 64 bytes giving the quantization table + * @param QTableAddress destination quantization address in the JPEG peripheral + * it could be QMEM0, QMEM1, QMEM2 or QMEM3 + * @retval 0 if no error, 1 if error + */ +static uint32_t JPEG_Set_Quantization_Mem(JPEG_HandleTypeDef *hjpeg, uint8_t *QTable, + __IO uint32_t *QTableAddress) +{ + uint32_t i, j, quantRow, quantVal, ScaleFactor; + __IO uint32_t *tableAddress; + + tableAddress = QTableAddress; + + if ((hjpeg->Conf.ImageQuality >= 50UL) && (hjpeg->Conf.ImageQuality <= 100UL)) + { + ScaleFactor = 200UL - (hjpeg->Conf.ImageQuality * 2UL); + } + else if (hjpeg->Conf.ImageQuality > 0UL) + { + ScaleFactor = ((uint32_t) 5000) / ((uint32_t) hjpeg->Conf.ImageQuality); + } + else + { + return 1UL; + } + + /*Quantization_table = (Standard_quanization_table * ScaleFactor + 50) / 100*/ + i = 0; + while (i < (JPEG_QUANT_TABLE_SIZE - 3UL)) + { + quantRow = 0; + for (j = 0; j < 4UL; j++) + { + /* Note that the quantization coefficients must be specified in the table in zigzag order */ + quantVal = ((((uint32_t) QTable[JPEG_ZIGZAG_ORDER[i + j]]) * ScaleFactor) + 50UL) / 100UL; + + if (quantVal == 0UL) + { + quantVal = 1UL; + } + else if (quantVal > 255UL) + { + quantVal = 255UL; + } + else + { + /* Nothing to do, keep same value of quantVal */ + } + + quantRow |= ((quantVal & 0xFFUL) << (8UL * j)); + } + + i += 4UL; + *tableAddress = quantRow; + tableAddress ++; + } + + /* Return function status */ + return 0UL; +} + +/** + * @brief Configure the JPEG registers for YCbCr color space + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static void JPEG_SetColorYCBCR(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t ySamplingH; + uint32_t ySamplingV; + uint32_t yblockNb; + + /*Set Number of color components to 3*/ + hjpeg->Instance->CONFR1 &= ~JPEG_CONFR1_NF; + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_NF_1; + + /* compute MCU block size and Y, Cb ,Cr sampling factors*/ + if (hjpeg->Conf.ChromaSubsampling == JPEG_420_SUBSAMPLING) + { + ySamplingH = JPEG_CONFR4_HSF_1; /* Hs = 2*/ + ySamplingV = JPEG_CONFR4_VSF_1; /* Vs = 2*/ + + yblockNb = 0x30; /* 4 blocks of 8x8*/ + } + else if (hjpeg->Conf.ChromaSubsampling == JPEG_422_SUBSAMPLING) + { + ySamplingH = JPEG_CONFR4_HSF_1; /* Hs = 2*/ + ySamplingV = JPEG_CONFR4_VSF_0; /* Vs = 1*/ + + yblockNb = 0x10; /* 2 blocks of 8x8*/ + } + else /*JPEG_444_SUBSAMPLING and default*/ + { + ySamplingH = JPEG_CONFR4_HSF_0; /* Hs = 1*/ + ySamplingV = JPEG_CONFR4_VSF_0; /* Vs = 1*/ + + yblockNb = 0; /* 1 block of 8x8*/ + } + + hjpeg->Instance->CONFR1 &= ~(JPEG_CONFR1_NF | JPEG_CONFR1_NS); + hjpeg->Instance->CONFR1 |= (JPEG_CONFR1_NF_1 | JPEG_CONFR1_NS_1); + + /*Reset CONFR4 register*/ + hjpeg->Instance->CONFR4 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 0*/ + hjpeg->Instance->CONFR4 |= (ySamplingH | ySamplingV | (yblockNb & JPEG_CONFR4_NB)); + + /*Reset CONFR5 register*/ + hjpeg->Instance->CONFR5 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 1*/ + hjpeg->Instance->CONFR5 |= (JPEG_CONFR5_HSF_0 | JPEG_CONFR5_VSF_0 | JPEG_CONFR5_QT_0 | JPEG_CONFR5_HA | JPEG_CONFR5_HD); + + /*Reset CONFR6 register*/ + hjpeg->Instance->CONFR6 = 0; + /*Set Horizental and Vertical sampling factor and number of blocks for component 2*/ + /* In YCBCR , by default, both chrominance components (component 1 and component 2) use the same Quantization table (table 1) */ + /* In YCBCR , both chrominance components (component 1 and component 2) use the same Huffman tables (table 1) */ + hjpeg->Instance->CONFR6 |= (JPEG_CONFR6_HSF_0 | JPEG_CONFR6_VSF_0 | JPEG_CONFR6_QT_0 | JPEG_CONFR6_HA | JPEG_CONFR6_HD); + +} + +/** + * @brief Configure the JPEG registers for GrayScale color space + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static void JPEG_SetColorGrayScale(JPEG_HandleTypeDef *hjpeg) +{ + /*Set Number of color components to 1*/ + hjpeg->Instance->CONFR1 &= ~(JPEG_CONFR1_NF | JPEG_CONFR1_NS); + + /*in GrayScale use 1 single Quantization table (Table 0)*/ + /*in GrayScale use only one couple of AC/DC huffman table (table 0)*/ + + /*Reset CONFR4 register*/ + hjpeg->Instance->CONFR4 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 0*/ + hjpeg->Instance->CONFR4 |= JPEG_CONFR4_HSF_0 | JPEG_CONFR4_VSF_0 ; +} + +/** + * @brief Configure the JPEG registers for CMYK color space + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static void JPEG_SetColorCMYK(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t ySamplingH; + uint32_t ySamplingV; + uint32_t yblockNb; + + /*Set Number of color components to 4*/ + hjpeg->Instance->CONFR1 |= (JPEG_CONFR1_NF | JPEG_CONFR1_NS); + + /* compute MCU block size and Y, Cb ,Cr sampling factors*/ + if (hjpeg->Conf.ChromaSubsampling == JPEG_420_SUBSAMPLING) + { + ySamplingH = JPEG_CONFR4_HSF_1; /* Hs = 2*/ + ySamplingV = JPEG_CONFR4_VSF_1; /* Vs = 2*/ + + yblockNb = 0x30; /* 4 blocks of 8x8*/ + } + else if (hjpeg->Conf.ChromaSubsampling == JPEG_422_SUBSAMPLING) + { + ySamplingH = JPEG_CONFR4_HSF_1; /* Hs = 2*/ + ySamplingV = JPEG_CONFR4_VSF_0; /* Vs = 1*/ + + yblockNb = 0x10; /* 2 blocks of 8x8*/ + } + else /*JPEG_444_SUBSAMPLING and default*/ + { + ySamplingH = JPEG_CONFR4_HSF_0; /* Hs = 1*/ + ySamplingV = JPEG_CONFR4_VSF_0; /* Vs = 1*/ + + yblockNb = 0; /* 1 block of 8x8*/ + } + + /*Reset CONFR4 register*/ + hjpeg->Instance->CONFR4 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 0*/ + hjpeg->Instance->CONFR4 |= (ySamplingH | ySamplingV | (yblockNb & JPEG_CONFR4_NB)); + + /*Reset CONFR5 register*/ + hjpeg->Instance->CONFR5 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 1*/ + hjpeg->Instance->CONFR5 |= (JPEG_CONFR5_HSF_0 | JPEG_CONFR5_VSF_0); + + /*Reset CONFR6 register*/ + hjpeg->Instance->CONFR6 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 2*/ + hjpeg->Instance->CONFR6 |= (JPEG_CONFR6_HSF_0 | JPEG_CONFR6_VSF_0); + + /*Reset CONFR7 register*/ + hjpeg->Instance->CONFR7 = 0; + /*Set Horizental and Vertical sampling factor , number of blocks , Quantization table and Huffman AC/DC tables for component 3*/ + hjpeg->Instance->CONFR7 |= (JPEG_CONFR7_HSF_0 | JPEG_CONFR7_VSF_0); +} + +/** + * @brief Init the JPEG encoding/decoding process in case of Polling or Interrupt and DMA + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None + */ +static void JPEG_Init_Process(JPEG_HandleTypeDef *hjpeg) +{ + /*Reset pause*/ + hjpeg->Context &= (~(JPEG_CONTEXT_PAUSE_INPUT | JPEG_CONTEXT_PAUSE_OUTPUT)); + + if ((hjpeg->Context & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { + /*Set JPEG Codec to Decoding mode */ + hjpeg->Instance->CONFR1 |= JPEG_CONFR1_DE; + } + else /* JPEG_CONTEXT_ENCODE */ + { + /*Set JPEG Codec to Encoding mode */ + hjpeg->Instance->CONFR1 &= ~JPEG_CONFR1_DE; + } + + /*Stop JPEG processing */ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + /* Disable All Interrupts */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + /* Disable All DMA requests */ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_MASK); + /* Flush input and output FIFOs*/ + hjpeg->Instance->CR |= JPEG_CR_IFF; + hjpeg->Instance->CR |= JPEG_CR_OFF; + + /* Clear all flags */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_ALL); + + /*Start Encoding/Decoding*/ + hjpeg->Instance->CONFR0 |= JPEG_CONFR0_START; + + if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_IT) + { + /*Enable IN/OUT, end of Conversation, and end of header parsing interruptions*/ + __HAL_JPEG_ENABLE_IT(hjpeg, JPEG_IT_IFT | JPEG_IT_IFNF | JPEG_IT_OFT | JPEG_IT_OFNE | JPEG_IT_EOC | JPEG_IT_HPD); + } + else if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_DMA) + { + /*Enable End Of Conversation, and End Of Header parsing interruptions*/ + __HAL_JPEG_ENABLE_IT(hjpeg, JPEG_IT_EOC | JPEG_IT_HPD); + + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief JPEG encoding/decoding process in case of Polling or Interrupt + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG_PROCESS_DONE if the process has ends else JPEG_PROCESS_ONGOING + */ +static uint32_t JPEG_Process(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t tmpContext; + + /*End of header processing flag rised*/ + if ((hjpeg->Context & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_HPDF) != 0UL) + { + /*Call Header parsing complet callback */ + (void) HAL_JPEG_GetInfo(hjpeg, &hjpeg->Conf); + /* Reset the ImageQuality */ + hjpeg->Conf.ImageQuality = 0; + /* Note : the image quality is only available at the end of the decoding operation */ + /* at the current stage the calculated image quality is not correct so reset it */ + + /*Call Info Ready callback */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->InfoReadyCallback(hjpeg, &hjpeg->Conf); +#else + HAL_JPEG_InfoReadyCallback(hjpeg, &hjpeg->Conf); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_IT_HPD); + + /* Clear header processing done flag */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_HPDF); + } + } + + /*Input FIFO status handling*/ + if ((hjpeg->Context & JPEG_CONTEXT_PAUSE_INPUT) == 0UL) + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_IFTF) != 0UL) + { + /*Input FIFO threshold flag rised*/ + /*JPEG_FIFO_TH_SIZE words can be written in */ + JPEG_ReadInputData(hjpeg, JPEG_FIFO_TH_SIZE); + } + else if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_IFNFF) != 0UL) + { + /*Input FIFO Not Full flag rised*/ + /*32-bit value can be written in */ + JPEG_ReadInputData(hjpeg, 1); + } + else + { + /* Nothing to do */ + } + } + + + /*Output FIFO flag handling*/ + if ((hjpeg->Context & JPEG_CONTEXT_PAUSE_OUTPUT) == 0UL) + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_OFTF) != 0UL) + { + /*Output FIFO threshold flag rised*/ + /*JPEG_FIFO_TH_SIZE words can be read out */ + JPEG_StoreOutputData(hjpeg, JPEG_FIFO_TH_SIZE); + } + else if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_OFNEF) != 0UL) + { + /*Output FIFO Not Empty flag rised*/ + /*32-bit value can be read out */ + JPEG_StoreOutputData(hjpeg, 1); + } + else + { + /* Nothing to do */ + } + } + + /*End of Conversion handling :i.e EOC flag is high and OFTF low and OFNEF low*/ + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_EOCF | JPEG_FLAG_OFTF | JPEG_FLAG_OFNEF) == JPEG_FLAG_EOCF) + { + /*Stop Encoding/Decoding*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + if ((hjpeg->Context & JPEG_CONTEXT_METHOD_MASK) == JPEG_CONTEXT_IT) + { + /* Disable All Interrupts */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + } + + /* Clear all flags */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_ALL); + + /*Call End of conversion callback */ + if (hjpeg->JpegOutCount > 0UL) + { + /*Output Buffer is not empty, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + } + + /*Reset Context Operation*/ + tmpContext = hjpeg->Context; + /*Clear all context fields execpt JPEG_CONTEXT_CONF_ENCODING and JPEG_CONTEXT_CUSTOM_TABLES*/ + hjpeg->Context &= (JPEG_CONTEXT_CONF_ENCODING | JPEG_CONTEXT_CUSTOM_TABLES); + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /*Call End of Encoding/Decoding callback */ + if ((tmpContext & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DecodeCpltCallback(hjpeg); +#else + HAL_JPEG_DecodeCpltCallback(hjpeg); +#endif /*USE_HAL_JPEG_REGISTER_CALLBACKS*/ + } + else /* JPEG_CONTEXT_ENCODE */ + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->EncodeCpltCallback(hjpeg); +#else + HAL_JPEG_EncodeCpltCallback(hjpeg); +#endif + } + + return JPEG_PROCESS_DONE; + } + + + return JPEG_PROCESS_ONGOING; +} + +/** + * @brief Store some output data from the JPEG peripheral to the output buffer. + * This function is used when the JPEG peripheral has new data to output + * in case of Polling or Interrupt process + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param nbOutputWords Number of output words (of 32 bits) ready from the JPEG peripheral + * @retval None + */ +static void JPEG_StoreOutputData(JPEG_HandleTypeDef *hjpeg, uint32_t nbOutputWords) +{ + uint32_t index, nBwords, nbBytes, dataword; + + if (hjpeg->OutDataLength >= (hjpeg->JpegOutCount + (nbOutputWords * 4UL))) + { + for (index = 0; index < nbOutputWords; index++) + { + /*Transfer 32 bits from the JPEG output FIFO*/ + dataword = hjpeg->Instance->DOR; + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount] = (uint8_t)(dataword & 0x000000FFUL); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 1UL] = (uint8_t)((dataword & 0x0000FF00UL) >> 8); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 2UL] = (uint8_t)((dataword & 0x00FF0000UL) >> 16); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 3UL] = (uint8_t)((dataword & 0xFF000000UL) >> 24); + hjpeg->JpegOutCount += 4UL; + } + if (hjpeg->OutDataLength == hjpeg->JpegOutCount) + { + /*Output Buffer is full, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /*USE_HAL_JPEG_REGISTER_CALLBACKS*/ + hjpeg->JpegOutCount = 0; + } + } + else if (hjpeg->OutDataLength > hjpeg->JpegOutCount) + { + nBwords = (hjpeg->OutDataLength - hjpeg->JpegOutCount) / 4UL; + for (index = 0; index < nBwords; index++) + { + /*Transfer 32 bits from the JPEG output FIFO*/ + dataword = hjpeg->Instance->DOR; + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount] = (uint8_t)(dataword & 0x000000FFUL); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 1UL] = (uint8_t)((dataword & 0x0000FF00UL) >> 8); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 2UL] = (uint8_t)((dataword & 0x00FF0000UL) >> 16); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 3UL] = (uint8_t)((dataword & 0xFF000000UL) >> 24); + hjpeg->JpegOutCount += 4UL; + } + if (hjpeg->OutDataLength == hjpeg->JpegOutCount) + { + /*Output Buffer is full, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + hjpeg->JpegOutCount = 0; + } + else + { + nbBytes = hjpeg->OutDataLength - hjpeg->JpegOutCount; + dataword = hjpeg->Instance->DOR; + for (index = 0; index < nbBytes; index++) + { + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount] = (uint8_t)((dataword >> (8UL * (index & 0x3UL))) & 0xFFUL); + hjpeg->JpegOutCount++; + } + /*Output Buffer is full, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + + nbBytes = 4UL - nbBytes; + for (index = nbBytes; index < 4UL; index++) + { + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount] = (uint8_t)((dataword >> (8UL * index)) & 0xFFUL); + hjpeg->JpegOutCount++; + } + } + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief Read some input Data from the input buffer. + * This function is used when the JPEG peripheral needs new data + * in case of Polling or Interrupt process + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @param nbRequestWords Number of input words (of 32 bits) that the JPE peripheral request + * @retval None + */ +static void JPEG_ReadInputData(JPEG_HandleTypeDef *hjpeg, uint32_t nbRequestWords) +{ + uint32_t nbBytes = 0, nBwords, index, Dataword, inputCount; + + if ((hjpeg->InDataLength == 0UL) || (nbRequestWords == 0UL)) + { + /* No more Input data : nothing to do*/ + (void) HAL_JPEG_Pause(hjpeg, JPEG_PAUSE_RESUME_INPUT); + } + else if (hjpeg->InDataLength > hjpeg->JpegInCount) + { + nbBytes = hjpeg->InDataLength - hjpeg->JpegInCount; + } + else if (hjpeg->InDataLength == hjpeg->JpegInCount) + { + /*Call HAL_JPEG_GetDataCallback to get new data */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->GetDataCallback(hjpeg, hjpeg->JpegInCount); +#else + HAL_JPEG_GetDataCallback(hjpeg, hjpeg->JpegInCount); +#endif /*USE_HAL_JPEG_REGISTER_CALLBACKS*/ + + if (hjpeg->InDataLength > 4UL) + { + hjpeg->InDataLength = hjpeg->InDataLength - (hjpeg->InDataLength % 4UL); + } + hjpeg->JpegInCount = 0; + nbBytes = hjpeg->InDataLength; + } + else + { + /* Nothing to do */ + } + if (((hjpeg->Context & JPEG_CONTEXT_PAUSE_INPUT) == 0UL) && (nbBytes > 0UL)) + { + nBwords = nbBytes / 4UL; + if (nBwords >= nbRequestWords) + { + for (index = 0; index < nbRequestWords; index++) + { + inputCount = hjpeg->JpegInCount; + hjpeg->Instance->DIR = (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount])) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 1UL])) << 8) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 2UL])) << 16) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 3UL])) << 24)); + + hjpeg->JpegInCount += 4UL; + } + } + else /*nBwords < nbRequestWords*/ + { + if (nBwords > 0UL) + { + for (index = 0; index < nBwords; index++) + { + inputCount = hjpeg->JpegInCount; + hjpeg->Instance->DIR = (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount])) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 1UL])) << 8) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 2UL])) << 16) | \ + (((uint32_t)(hjpeg->pJpegInBuffPtr[inputCount + 3UL])) << 24)); + + hjpeg->JpegInCount += 4UL; + } + } + else + { + /* end of file*/ + Dataword = 0; + for (index = 0; index < nbBytes; index++) + { + Dataword |= (uint32_t)hjpeg->pJpegInBuffPtr[hjpeg->JpegInCount] << (8UL * (index & 0x03UL)); + hjpeg->JpegInCount++; + } + hjpeg->Instance->DIR = Dataword; + } + } + } +} + +/** + * @brief Start the JPEG DMA process (encoding/decoding) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG_PROCESS_DONE if process ends else JPEG_PROCESS_ONGOING + */ +static HAL_StatusTypeDef JPEG_DMA_StartProcess(JPEG_HandleTypeDef *hjpeg) +{ + if ((hjpeg->InDataLength < 4UL) || (hjpeg->OutDataLength < 4UL)) + { + return HAL_ERROR; + } + /* Reset Ending DMA internal context flag*/ + hjpeg->Context &= ~JPEG_CONTEXT_ENDING_DMA; + + /* Disable DMA In/Out Request*/ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_ODMA | JPEG_DMA_IDMA); + + /* Set the JPEG DMA In transfer complete callback */ + hjpeg->hdmain->XferCpltCallback = JPEG_DMAInCpltCallback; + /* Set the DMA In error callback */ + hjpeg->hdmain->XferErrorCallback = JPEG_DMAErrorCallback; + + /* Set the JPEG DMA Out transfer complete callback */ + hjpeg->hdmaout->XferCpltCallback = JPEG_DMAOutCpltCallback; + /* Set the DMA Out error callback */ + hjpeg->hdmaout->XferErrorCallback = JPEG_DMAErrorCallback; + /* Set the DMA Out Abort callback */ + hjpeg->hdmaout->XferAbortCallback = JPEG_DMAOutAbortCallback; + + /*DMA transfer size must be a multiple of 4 bytes i.e mutliple of 32bits words*/ + hjpeg->InDataLength = hjpeg->InDataLength - (hjpeg->InDataLength % 4UL); + + /*DMA transfer size must be a multiple of 4 bytes i.e mutliple of 32bits words*/ + hjpeg->OutDataLength = hjpeg->OutDataLength - (hjpeg->OutDataLength % 4UL); + + /* Start DMA FIFO In transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmain, (uint32_t)hjpeg->pJpegInBuffPtr, (uint32_t)&hjpeg->Instance->DIR, + hjpeg->InDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + return HAL_ERROR; + } + + /* Start DMA FIFO Out transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmaout, (uint32_t)&hjpeg->Instance->DOR, (uint32_t)hjpeg->pJpegOutBuffPtr, + hjpeg->OutDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + return HAL_ERROR; + } + + /* Enable JPEG In/Out DMA requests*/ + JPEG_ENABLE_DMA(hjpeg, JPEG_DMA_IDMA | JPEG_DMA_ODMA); + + return HAL_OK; +} + +/** + * @brief Continue the current JPEG DMA process (encoding/decoding) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG_PROCESS_DONE if process ends else JPEG_PROCESS_ONGOING + */ +static void JPEG_DMA_ContinueProcess(JPEG_HandleTypeDef *hjpeg) +{ + /*End of header processing flag rises*/ + if ((hjpeg->Context & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_HPDF) != 0UL) + { + /*Call Header parsing complete callback */ + (void) HAL_JPEG_GetInfo(hjpeg, &hjpeg->Conf); + + /* Reset the ImageQuality */ + hjpeg->Conf.ImageQuality = 0; + /* Note : the image quality is only available at the end of the decoding operation */ + /* at the current stage the calculated image quality is not correct so reset it */ + + /*Call Info Ready callback */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->InfoReadyCallback(hjpeg, &hjpeg->Conf); +#else + HAL_JPEG_InfoReadyCallback(hjpeg, &hjpeg->Conf); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_IT_HPD); + + /* Clear header processing done flag */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_HPDF); + } + } + + /*End of Conversion handling*/ + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_EOCF) != 0UL) + { + /*Disabkle JPEG In/Out DMA Requests*/ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_ODMA | JPEG_DMA_IDMA); + + hjpeg->Context |= JPEG_CONTEXT_ENDING_DMA; + + /*Stop Encoding/Decoding*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + /* Clear all flags */ + __HAL_JPEG_CLEAR_FLAG(hjpeg, JPEG_FLAG_ALL); + + if (hjpeg->hdmain->State == HAL_DMA_STATE_BUSY) + { + /* Stop the DMA In Xfer*/ + (void) HAL_DMA_Abort_IT(hjpeg->hdmain); + } + + if (hjpeg->hdmaout->State == HAL_DMA_STATE_BUSY) + { + /* Stop the DMA out Xfer*/ + (void) HAL_DMA_Abort_IT(hjpeg->hdmaout); + } + else + { + JPEG_DMA_EndProcess(hjpeg); + } + } + + +} + +/** + * @brief Finalize the current JPEG DMA process (encoding/decoding) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG_PROCESS_DONE + */ +static void JPEG_DMA_EndProcess(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t tmpContext; + hjpeg->JpegOutCount = hjpeg->OutDataLength - ((hjpeg->hdmaout->Instance->NDTR & DMA_SxNDT) << 2); + + /*if Output Buffer is full, call HAL_JPEG_DataReadyCallback*/ + if (hjpeg->JpegOutCount == hjpeg->OutDataLength) + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + } + + /*Check if remaining data in the output FIFO*/ + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_OFNEF) == 0UL) + { + if (hjpeg->JpegOutCount > 0UL) + { + /*Output Buffer is not empty, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + } + + /*Stop Encoding/Decoding*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + tmpContext = hjpeg->Context; + /*Clear all context fileds execpt JPEG_CONTEXT_CONF_ENCODING and JPEG_CONTEXT_CUSTOM_TABLES*/ + hjpeg->Context &= (JPEG_CONTEXT_CONF_ENCODING | JPEG_CONTEXT_CUSTOM_TABLES); + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /*Call End of Encoding/Decoding callback */ + if ((tmpContext & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DecodeCpltCallback(hjpeg); +#else + HAL_JPEG_DecodeCpltCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + } + else /* JPEG_CONTEXT_ENCODE */ + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->EncodeCpltCallback(hjpeg); +#else + HAL_JPEG_EncodeCpltCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + } + } + else if ((hjpeg->Context & JPEG_CONTEXT_PAUSE_OUTPUT) == 0UL) + { + JPEG_DMA_PollResidualData(hjpeg); + } + else + { + /* Nothing to do */ + } + +} + +/** + * @brief Poll residual output data when DMA process (encoding/decoding) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval None. + */ +static void JPEG_DMA_PollResidualData(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t tmpContext, count, dataOut; + + for (count = JPEG_FIFO_SIZE; count > 0UL; count--) + { + if ((hjpeg->Context & JPEG_CONTEXT_PAUSE_OUTPUT) == 0UL) + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_OFNEF) != 0UL) + { + dataOut = hjpeg->Instance->DOR; + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount] = (uint8_t)(dataOut & 0x000000FFUL); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 1UL] = (uint8_t)((dataOut & 0x0000FF00UL) >> 8); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 2UL] = (uint8_t)((dataOut & 0x00FF0000UL) >> 16); + hjpeg->pJpegOutBuffPtr[hjpeg->JpegOutCount + 3UL] = (uint8_t)((dataOut & 0xFF000000UL) >> 24); + hjpeg->JpegOutCount += 4UL; + + if (hjpeg->JpegOutCount == hjpeg->OutDataLength) + { + /*Output Buffer is full, call HAL_JPEG_DataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + } + + } + } + } + + tmpContext = hjpeg->Context; + + if ((__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_OFNEF) == 0UL) || ((tmpContext & JPEG_CONTEXT_PAUSE_OUTPUT) == 0UL)) + { + /*Stop Encoding/Decoding*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + if (hjpeg->JpegOutCount > 0UL) + { + /*Output Buffer is not empty, call DecodedDataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + hjpeg->JpegOutCount = 0; + } + + tmpContext = hjpeg->Context; + /*Clear all context fileds execpt JPEG_CONTEXT_CONF_ENCODING and JPEG_CONTEXT_CUSTOM_TABLES*/ + hjpeg->Context &= (JPEG_CONTEXT_CONF_ENCODING | JPEG_CONTEXT_CUSTOM_TABLES); + + /* Process Unlocked */ + __HAL_UNLOCK(hjpeg); + + /* Change the JPEG state */ + hjpeg->State = HAL_JPEG_STATE_READY; + + /*Call End of Encoding/Decoding callback */ + if ((tmpContext & JPEG_CONTEXT_OPERATION_MASK) == JPEG_CONTEXT_DECODE) + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DecodeCpltCallback(hjpeg); +#else + HAL_JPEG_DecodeCpltCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + } + else /* JPEG_CONTEXT_ENCODE */ + { +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->EncodeCpltCallback(hjpeg); +#else + HAL_JPEG_EncodeCpltCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + } + } +} + +/** + * @brief DMA input transfer complete callback + * @param hdma pointer to a DMA_HandleTypeDef structure. + * @retval None + */ +static void JPEG_DMAInCpltCallback(DMA_HandleTypeDef *hdma) +{ + JPEG_HandleTypeDef *hjpeg = (JPEG_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Disable The JPEG IT so the DMA Input Callback can not be interrupted by the JPEG EOC IT or JPEG HPD IT */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + if ((hjpeg->Context & (JPEG_CONTEXT_METHOD_MASK | JPEG_CONTEXT_ENDING_DMA)) == + JPEG_CONTEXT_DMA) /* Check if context method is DMA and we are not in ending DMA stage */ + { + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_IDMA); + + hjpeg->JpegInCount = hjpeg->InDataLength - ((hdma->Instance->NDTR & DMA_SxNDT) << 2); + + /*Call HAL_JPEG_GetDataCallback to get new data */ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->GetDataCallback(hjpeg, hjpeg->JpegInCount); +#else + HAL_JPEG_GetDataCallback(hjpeg, hjpeg->JpegInCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + if (hjpeg->InDataLength >= 4UL) + { + /*JPEG Input DMA transfer data number must be multiple of 32 bits word + as the destination is a 32 bits (4 bytes) register */ + hjpeg->InDataLength = hjpeg->InDataLength - (hjpeg->InDataLength % 4UL); + } + else if (hjpeg->InDataLength > 0UL) + { + /*Transfer last data word (i.e last 4 bytes)*/ + hjpeg->InDataLength = 4; + } + + if (((hjpeg->Context & JPEG_CONTEXT_PAUSE_INPUT) == 0UL) && (hjpeg->InDataLength > 0UL)) + { + /* Start DMA FIFO In transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmain, (uint32_t)hjpeg->pJpegInBuffPtr, (uint32_t)&hjpeg->Instance->DIR, + hjpeg->InDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + hjpeg->State = HAL_JPEG_STATE_ERROR; +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->ErrorCallback(hjpeg); +#else + HAL_JPEG_ErrorCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + return; + } + JPEG_ENABLE_DMA(hjpeg, JPEG_DMA_IDMA); + } + + /* JPEG Conversion still on going : Enable the JPEG IT */ + __HAL_JPEG_ENABLE_IT(hjpeg, JPEG_IT_EOC | JPEG_IT_HPD); + } +} + +/** + * @brief DMA output transfer complete callback + * @param hdma pointer to a DMA_HandleTypeDef structure. + * @retval None + */ +static void JPEG_DMAOutCpltCallback(DMA_HandleTypeDef *hdma) +{ + JPEG_HandleTypeDef *hjpeg = (JPEG_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Disable The JPEG IT so the DMA Output Callback can not be interrupted by the JPEG EOC IT or JPEG HPD IT */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + if ((hjpeg->Context & (JPEG_CONTEXT_METHOD_MASK | JPEG_CONTEXT_ENDING_DMA)) == + JPEG_CONTEXT_DMA) /* Check if context method is DMA and we are not in ending DMA stage */ + { + if (__HAL_JPEG_GET_FLAG(hjpeg, JPEG_FLAG_EOCF) == 0UL) + { + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_ODMA); + hjpeg->JpegOutCount = hjpeg->OutDataLength - ((hdma->Instance->NDTR & DMA_SxNDT) << 2); + + /*Output Buffer is full, call HAL_JPEG_DataReadyCallback*/ +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#else + HAL_JPEG_DataReadyCallback(hjpeg, hjpeg->pJpegOutBuffPtr, hjpeg->JpegOutCount); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + + if ((hjpeg->Context & JPEG_CONTEXT_PAUSE_OUTPUT) == 0UL) + { + /* Start DMA FIFO Out transfer */ + if (HAL_DMA_Start_IT(hjpeg->hdmaout, (uint32_t)&hjpeg->Instance->DOR, (uint32_t)hjpeg->pJpegOutBuffPtr, + hjpeg->OutDataLength >> 2) != HAL_OK) + { + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; + hjpeg->State = HAL_JPEG_STATE_ERROR; +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->ErrorCallback(hjpeg); +#else + HAL_JPEG_ErrorCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + return; + } + JPEG_ENABLE_DMA(hjpeg, JPEG_DMA_ODMA); + } + } + + /* JPEG Conversion still on going : Enable the JPEG IT */ + __HAL_JPEG_ENABLE_IT(hjpeg, JPEG_IT_EOC | JPEG_IT_HPD); + } +} + + +/** + * @brief DMA Transfer error callback + * @param hdma pointer to a DMA_HandleTypeDef structure. + * @retval None + */ +static void JPEG_DMAErrorCallback(DMA_HandleTypeDef *hdma) +{ + JPEG_HandleTypeDef *hjpeg = (JPEG_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* if DMA error is FIFO error ignore it */ + if (HAL_DMA_GetError(hdma) != HAL_DMA_ERROR_FE) + { + /*Stop Encoding/Decoding*/ + hjpeg->Instance->CONFR0 &= ~JPEG_CONFR0_START; + + /* Disable All Interrupts */ + __HAL_JPEG_DISABLE_IT(hjpeg, JPEG_INTERRUPT_MASK); + + /* Disable All DMA requests */ + JPEG_DISABLE_DMA(hjpeg, JPEG_DMA_MASK); + + hjpeg->State = HAL_JPEG_STATE_READY; + hjpeg->ErrorCode |= HAL_JPEG_ERROR_DMA; +#if (USE_HAL_JPEG_REGISTER_CALLBACKS == 1) + hjpeg->ErrorCallback(hjpeg); +#else + HAL_JPEG_ErrorCallback(hjpeg); +#endif /* USE_HAL_JPEG_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA output Abort callback + * @param hdma pointer to a DMA_HandleTypeDef structure. + * @retval None + */ +static void JPEG_DMAOutAbortCallback(DMA_HandleTypeDef *hdma) +{ + JPEG_HandleTypeDef *hjpeg = (JPEG_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if ((hjpeg->Context & JPEG_CONTEXT_ENDING_DMA) != 0UL) + { + JPEG_DMA_EndProcess(hjpeg); + } +} + + +/** + * @brief Calculate the decoded image quality (from 1 to 100) + * @param hjpeg pointer to a JPEG_HandleTypeDef structure that contains + * the configuration information for JPEG module + * @retval JPEG image quality from 1 to 100. + */ +static uint32_t JPEG_GetQuality(JPEG_HandleTypeDef *hjpeg) +{ + uint32_t quality = 0; + uint32_t quantRow, quantVal, scale, i, j; + __IO uint32_t *tableAddress = hjpeg->Instance->QMEM0; + + i = 0; + while (i < (JPEG_QUANT_TABLE_SIZE - 3UL)) + { + quantRow = *tableAddress; + for (j = 0; j < 4UL; j++) + { + quantVal = (quantRow >> (8UL * j)) & 0xFFUL; + if (quantVal == 1UL) + { + /* if Quantization value = 1 then quality is 100%*/ + quality += 100UL; + } + else + { + /* Note that the quantization coefficients must be specified in the table in zigzag order */ + scale = (quantVal * 100UL) / ((uint32_t) hjpeg->QuantTable0[JPEG_ZIGZAG_ORDER[i + j]]); + + if (scale <= 100UL) + { + quality += (200UL - scale) / 2UL; + } + else + { + quality += 5000UL / scale; + } + } + } + + i += 4UL; + tableAddress ++; + } + + return (quality / 64UL); +} +/** + * @} + */ + +#endif /* JPEG */ +#endif /* HAL_JPEG_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c new file mode 100644 index 000000000..50671bfa3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc.c @@ -0,0 +1,2142 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_ltdc.c + * @author MCD Application Team + * @brief LTDC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the LTDC peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Program the required configuration through the following parameters: + the LTDC timing, the horizontal and vertical polarity, + the pixel clock polarity, Data Enable polarity and the LTDC background color value + using HAL_LTDC_Init() function + + (#) Program the required configuration through the following parameters: + the pixel format, the blending factors, input alpha value, the window size + and the image size using HAL_LTDC_ConfigLayer() function for foreground + or/and background layer. + + (#) Optionally, configure and enable the CLUT using HAL_LTDC_ConfigCLUT() and + HAL_LTDC_EnableCLUT functions. + + (#) Optionally, enable the Dither using HAL_LTDC_EnableDither(). + + (#) Optionally, configure and enable the Color keying using HAL_LTDC_ConfigColorKeying() + and HAL_LTDC_EnableColorKeying functions. + + (#) Optionally, configure LineInterrupt using HAL_LTDC_ProgramLineEvent() + function + + (#) If needed, reconfigure and change the pixel format value, the alpha value + value, the window size, the window position and the layer start address + for foreground or/and background layer using respectively the following + functions: HAL_LTDC_SetPixelFormat(), HAL_LTDC_SetAlpha(), HAL_LTDC_SetWindowSize(), + HAL_LTDC_SetWindowPosition() and HAL_LTDC_SetAddress(). + + (#) Variant functions with _NoReload suffix allows to set the LTDC configuration/settings without immediate reload. + This is useful in case when the program requires to modify serval LTDC settings (on one or both layers) + then applying(reload) these settings in one shot by calling the function HAL_LTDC_Reload(). + + After calling the _NoReload functions to set different color/format/layer settings, + the program shall call the function HAL_LTDC_Reload() to apply(reload) these settings. + Function HAL_LTDC_Reload() can be called with the parameter ReloadType set to LTDC_RELOAD_IMMEDIATE if + an immediate reload is required. + Function HAL_LTDC_Reload() can be called with the parameter ReloadType set to LTDC_RELOAD_VERTICAL_BLANKING if + the reload should be done in the next vertical blanking period, + this option allows to avoid display flicker by applying the new settings during the vertical blanking period. + + + (#) To control LTDC state you can use the following function: HAL_LTDC_GetState() + + *** LTDC HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in LTDC HAL driver. + + (+) __HAL_LTDC_ENABLE: Enable the LTDC. + (+) __HAL_LTDC_DISABLE: Disable the LTDC. + (+) __HAL_LTDC_LAYER_ENABLE: Enable an LTDC Layer. + (+) __HAL_LTDC_LAYER_DISABLE: Disable an LTDC Layer. + (+) __HAL_LTDC_RELOAD_IMMEDIATE_CONFIG: Reload Layer Configuration. + (+) __HAL_LTDC_GET_FLAG: Get the LTDC pending flags. + (+) __HAL_LTDC_CLEAR_FLAG: Clear the LTDC pending flags. + (+) __HAL_LTDC_ENABLE_IT: Enable the specified LTDC interrupts. + (+) __HAL_LTDC_DISABLE_IT: Disable the specified LTDC interrupts. + (+) __HAL_LTDC_GET_IT_SOURCE: Check whether the specified LTDC interrupt has occurred or not. + + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_LTDC_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function @ref HAL_LTDC_RegisterCallback() to register a callback. + + Function @ref HAL_LTDC_RegisterCallback() allows to register following callbacks: + (+) LineEventCallback : LTDC Line Event Callback. + (+) ReloadEventCallback : LTDC Reload Event Callback. + (+) ErrorCallback : LTDC Error Callback + (+) MspInitCallback : LTDC MspInit. + (+) MspDeInitCallback : LTDC MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_LTDC_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_LTDC_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) LineEventCallback : LTDC Line Event Callback. + (+) ReloadEventCallback : LTDC Reload Event Callback. + (+) ErrorCallback : LTDC Error Callback + (+) MspInitCallback : LTDC MspInit. + (+) MspDeInitCallback : LTDC MspDeInit. + + By default, after the HAL_LTDC_Init and when the state is HAL_LTDC_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_LTDC_LineEventCallback(), @ref HAL_LTDC_ErrorCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_LTDC_Init/ @ref HAL_LTDC_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the @ref HAL_LTDC_Init/ @ref HAL_LTDC_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_LTDC_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_LTDC_STATE_READY or HAL_LTDC_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_LTDC_RegisterCallback() before calling @ref HAL_LTDC_DeInit + or HAL_LTDC_Init function. + + When The compilation define USE_HAL_LTDC_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + [..] + (@) You can refer to the LTDC HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +#ifdef HAL_LTDC_MODULE_ENABLED +#if defined (LTDC) +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup LTDC LTDC + * @brief LTDC HAL module driver + * @{ + */ + + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static void LTDC_SetConfig(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx); +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup LTDC_Exported_Functions LTDC Exported Functions + * @{ + */ + +/** @defgroup LTDC_Exported_Functions_Group1 Initialization and Configuration functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the LTDC + (+) De-initialize the LTDC + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the LTDC according to the specified parameters in the LTDC_InitTypeDef. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_Init(LTDC_HandleTypeDef *hltdc) +{ + uint32_t tmp, tmp1; + + /* Check the LTDC peripheral state */ + if (hltdc == NULL) + { + return HAL_ERROR; + } + + /* Check function parameters */ + assert_param(IS_LTDC_ALL_INSTANCE(hltdc->Instance)); + assert_param(IS_LTDC_HSYNC(hltdc->Init.HorizontalSync)); + assert_param(IS_LTDC_VSYNC(hltdc->Init.VerticalSync)); + assert_param(IS_LTDC_AHBP(hltdc->Init.AccumulatedHBP)); + assert_param(IS_LTDC_AVBP(hltdc->Init.AccumulatedVBP)); + assert_param(IS_LTDC_AAH(hltdc->Init.AccumulatedActiveH)); + assert_param(IS_LTDC_AAW(hltdc->Init.AccumulatedActiveW)); + assert_param(IS_LTDC_TOTALH(hltdc->Init.TotalHeigh)); + assert_param(IS_LTDC_TOTALW(hltdc->Init.TotalWidth)); + assert_param(IS_LTDC_HSPOL(hltdc->Init.HSPolarity)); + assert_param(IS_LTDC_VSPOL(hltdc->Init.VSPolarity)); + assert_param(IS_LTDC_DEPOL(hltdc->Init.DEPolarity)); + assert_param(IS_LTDC_PCPOL(hltdc->Init.PCPolarity)); + +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + if (hltdc->State == HAL_LTDC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hltdc->Lock = HAL_UNLOCKED; + + /* Reset the LTDC callback to the legacy weak callbacks */ + hltdc->LineEventCallback = HAL_LTDC_LineEventCallback; /* Legacy weak LineEventCallback */ + hltdc->ReloadEventCallback = HAL_LTDC_ReloadEventCallback; /* Legacy weak ReloadEventCallback */ + hltdc->ErrorCallback = HAL_LTDC_ErrorCallback; /* Legacy weak ErrorCallback */ + + if (hltdc->MspInitCallback == NULL) + { + hltdc->MspInitCallback = HAL_LTDC_MspInit; + } + /* Init the low level hardware */ + hltdc->MspInitCallback(hltdc); + } +#else + if (hltdc->State == HAL_LTDC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hltdc->Lock = HAL_UNLOCKED; + /* Init the low level hardware */ + HAL_LTDC_MspInit(hltdc); + } +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Configure the HS, VS, DE and PC polarity */ + hltdc->Instance->GCR &= ~(LTDC_GCR_HSPOL | LTDC_GCR_VSPOL | LTDC_GCR_DEPOL | LTDC_GCR_PCPOL); + hltdc->Instance->GCR |= (uint32_t)(hltdc->Init.HSPolarity | hltdc->Init.VSPolarity | \ + hltdc->Init.DEPolarity | hltdc->Init.PCPolarity); + + /* Set Synchronization size */ + hltdc->Instance->SSCR &= ~(LTDC_SSCR_VSH | LTDC_SSCR_HSW); + tmp = (hltdc->Init.HorizontalSync << 16U); + hltdc->Instance->SSCR |= (tmp | hltdc->Init.VerticalSync); + + /* Set Accumulated Back porch */ + hltdc->Instance->BPCR &= ~(LTDC_BPCR_AVBP | LTDC_BPCR_AHBP); + tmp = (hltdc->Init.AccumulatedHBP << 16U); + hltdc->Instance->BPCR |= (tmp | hltdc->Init.AccumulatedVBP); + + /* Set Accumulated Active Width */ + hltdc->Instance->AWCR &= ~(LTDC_AWCR_AAH | LTDC_AWCR_AAW); + tmp = (hltdc->Init.AccumulatedActiveW << 16U); + hltdc->Instance->AWCR |= (tmp | hltdc->Init.AccumulatedActiveH); + + /* Set Total Width */ + hltdc->Instance->TWCR &= ~(LTDC_TWCR_TOTALH | LTDC_TWCR_TOTALW); + tmp = (hltdc->Init.TotalWidth << 16U); + hltdc->Instance->TWCR |= (tmp | hltdc->Init.TotalHeigh); + + /* Set the background color value */ + tmp = ((uint32_t)(hltdc->Init.Backcolor.Green) << 8U); + tmp1 = ((uint32_t)(hltdc->Init.Backcolor.Red) << 16U); + hltdc->Instance->BCCR &= ~(LTDC_BCCR_BCBLUE | LTDC_BCCR_BCGREEN | LTDC_BCCR_BCRED); + hltdc->Instance->BCCR |= (tmp1 | tmp | hltdc->Init.Backcolor.Blue); + + /* Enable the Transfer Error and FIFO underrun interrupts */ + __HAL_LTDC_ENABLE_IT(hltdc, LTDC_IT_TE | LTDC_IT_FU); + + /* Enable LTDC by setting LTDCEN bit */ + __HAL_LTDC_ENABLE(hltdc); + + /* Initialize the error code */ + hltdc->ErrorCode = HAL_LTDC_ERROR_NONE; + + /* Initialize the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + return HAL_OK; +} + +/** + * @brief De-initialize the LTDC peripheral. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ + +HAL_StatusTypeDef HAL_LTDC_DeInit(LTDC_HandleTypeDef *hltdc) +{ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + if (hltdc->MspDeInitCallback == NULL) + { + hltdc->MspDeInitCallback = HAL_LTDC_MspDeInit; + } + /* DeInit the low level hardware */ + hltdc->MspDeInitCallback(hltdc); +#else + /* DeInit the low level hardware */ + HAL_LTDC_MspDeInit(hltdc); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + + /* Initialize the error code */ + hltdc->ErrorCode = HAL_LTDC_ERROR_NONE; + + /* Initialize the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Initialize the LTDC MSP. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ +__weak void HAL_LTDC_MspInit(LTDC_HandleTypeDef *hltdc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hltdc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_LTDC_MspInit could be implemented in the user file + */ +} + +/** + * @brief De-initialize the LTDC MSP. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ +__weak void HAL_LTDC_MspDeInit(LTDC_HandleTypeDef *hltdc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hltdc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_LTDC_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User LTDC Callback + * To be used instead of the weak predefined callback + * @param hltdc ltdc handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_LTDC_LINE_EVENT_CB_ID Line Event Callback ID + * @arg @ref HAL_LTDC_RELOAD_EVENT_CB_ID Reload Event Callback ID + * @arg @ref HAL_LTDC_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_LTDC_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_LTDC_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_LTDC_RegisterCallback(LTDC_HandleTypeDef *hltdc, HAL_LTDC_CallbackIDTypeDef CallbackID, pLTDC_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hltdc); + + if (hltdc->State == HAL_LTDC_STATE_READY) + { + switch (CallbackID) + { + case HAL_LTDC_LINE_EVENT_CB_ID : + hltdc->LineEventCallback = pCallback; + break; + + case HAL_LTDC_RELOAD_EVENT_CB_ID : + hltdc->ReloadEventCallback = pCallback; + break; + + case HAL_LTDC_ERROR_CB_ID : + hltdc->ErrorCallback = pCallback; + break; + + case HAL_LTDC_MSPINIT_CB_ID : + hltdc->MspInitCallback = pCallback; + break; + + case HAL_LTDC_MSPDEINIT_CB_ID : + hltdc->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hltdc->State == HAL_LTDC_STATE_RESET) + { + switch (CallbackID) + { + case HAL_LTDC_MSPINIT_CB_ID : + hltdc->MspInitCallback = pCallback; + break; + + case HAL_LTDC_MSPDEINIT_CB_ID : + hltdc->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hltdc); + + return status; +} + +/** + * @brief Unregister an LTDC Callback + * LTDC callabck is redirected to the weak predefined callback + * @param hltdc ltdc handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_LTDC_LINE_EVENT_CB_ID Line Event Callback ID + * @arg @ref HAL_LTDC_RELOAD_EVENT_CB_ID Reload Event Callback ID + * @arg @ref HAL_LTDC_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_LTDC_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_LTDC_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_LTDC_UnRegisterCallback(LTDC_HandleTypeDef *hltdc, HAL_LTDC_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hltdc); + + if (hltdc->State == HAL_LTDC_STATE_READY) + { + switch (CallbackID) + { + case HAL_LTDC_LINE_EVENT_CB_ID : + hltdc->LineEventCallback = HAL_LTDC_LineEventCallback; /* Legacy weak LineEventCallback */ + break; + + case HAL_LTDC_RELOAD_EVENT_CB_ID : + hltdc->ReloadEventCallback = HAL_LTDC_ReloadEventCallback; /* Legacy weak ReloadEventCallback */ + break; + + case HAL_LTDC_ERROR_CB_ID : + hltdc->ErrorCallback = HAL_LTDC_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_LTDC_MSPINIT_CB_ID : + hltdc->MspInitCallback = HAL_LTDC_MspInit; /* Legcay weak MspInit Callback */ + break; + + case HAL_LTDC_MSPDEINIT_CB_ID : + hltdc->MspDeInitCallback = HAL_LTDC_MspDeInit; /* Legcay weak MspDeInit Callback */ + break; + + default : + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hltdc->State == HAL_LTDC_STATE_RESET) + { + switch (CallbackID) + { + case HAL_LTDC_MSPINIT_CB_ID : + hltdc->MspInitCallback = HAL_LTDC_MspInit; /* Legcay weak MspInit Callback */ + break; + + case HAL_LTDC_MSPDEINIT_CB_ID : + hltdc->MspDeInitCallback = HAL_LTDC_MspDeInit; /* Legcay weak MspDeInit Callback */ + break; + + default : + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hltdc); + + return status; +} +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup LTDC_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides function allowing to: + (+) Handle LTDC interrupt request + +@endverbatim + * @{ + */ +/** + * @brief Handle LTDC interrupt request. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval HAL status + */ +void HAL_LTDC_IRQHandler(LTDC_HandleTypeDef *hltdc) +{ + uint32_t isrflags = READ_REG(hltdc->Instance->ISR); + uint32_t itsources = READ_REG(hltdc->Instance->IER); + + /* Transfer Error Interrupt management ***************************************/ + if (((isrflags & LTDC_ISR_TERRIF) != 0U) && ((itsources & LTDC_IER_TERRIE) != 0U)) + { + /* Disable the transfer Error interrupt */ + __HAL_LTDC_DISABLE_IT(hltdc, LTDC_IT_TE); + + /* Clear the transfer error flag */ + __HAL_LTDC_CLEAR_FLAG(hltdc, LTDC_FLAG_TE); + + /* Update error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_TE; + + /* Change LTDC state */ + hltdc->State = HAL_LTDC_STATE_ERROR; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + /* Transfer error Callback */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + hltdc->ErrorCallback(hltdc); +#else + /* Call legacy error callback*/ + HAL_LTDC_ErrorCallback(hltdc); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + } + + /* FIFO underrun Interrupt management ***************************************/ + if (((isrflags & LTDC_ISR_FUIF) != 0U) && ((itsources & LTDC_IER_FUIE) != 0U)) + { + /* Disable the FIFO underrun interrupt */ + __HAL_LTDC_DISABLE_IT(hltdc, LTDC_IT_FU); + + /* Clear the FIFO underrun flag */ + __HAL_LTDC_CLEAR_FLAG(hltdc, LTDC_FLAG_FU); + + /* Update error code */ + hltdc->ErrorCode |= HAL_LTDC_ERROR_FU; + + /* Change LTDC state */ + hltdc->State = HAL_LTDC_STATE_ERROR; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + /* Transfer error Callback */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + hltdc->ErrorCallback(hltdc); +#else + /* Call legacy error callback*/ + HAL_LTDC_ErrorCallback(hltdc); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + } + + /* Line Interrupt management ************************************************/ + if (((isrflags & LTDC_ISR_LIF) != 0U) && ((itsources & LTDC_IER_LIE) != 0U)) + { + /* Disable the Line interrupt */ + __HAL_LTDC_DISABLE_IT(hltdc, LTDC_IT_LI); + + /* Clear the Line interrupt flag */ + __HAL_LTDC_CLEAR_FLAG(hltdc, LTDC_FLAG_LI); + + /* Change LTDC state */ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + /* Line interrupt Callback */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + /*Call registered Line Event callback */ + hltdc->LineEventCallback(hltdc); +#else + /*Call Legacy Line Event callback */ + HAL_LTDC_LineEventCallback(hltdc); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + } + + /* Register reload Interrupt management ***************************************/ + if (((isrflags & LTDC_ISR_RRIF) != 0U) && ((itsources & LTDC_IER_RRIE) != 0U)) + { + /* Disable the register reload interrupt */ + __HAL_LTDC_DISABLE_IT(hltdc, LTDC_IT_RR); + + /* Clear the register reload flag */ + __HAL_LTDC_CLEAR_FLAG(hltdc, LTDC_FLAG_RR); + + /* Change LTDC state */ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + /* Reload interrupt Callback */ +#if (USE_HAL_LTDC_REGISTER_CALLBACKS == 1) + /*Call registered reload Event callback */ + hltdc->ReloadEventCallback(hltdc); +#else + /*Call Legacy Reload Event callback */ + HAL_LTDC_ReloadEventCallback(hltdc); +#endif /* USE_HAL_LTDC_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Error LTDC callback. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ +__weak void HAL_LTDC_ErrorCallback(LTDC_HandleTypeDef *hltdc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hltdc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_LTDC_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Line Event callback. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ +__weak void HAL_LTDC_LineEventCallback(LTDC_HandleTypeDef *hltdc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hltdc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_LTDC_LineEventCallback could be implemented in the user file + */ +} + +/** + * @brief Reload Event callback. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval None + */ +__weak void HAL_LTDC_ReloadEventCallback(LTDC_HandleTypeDef *hltdc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hltdc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_LTDC_ReloadEvenCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup LTDC_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the LTDC foreground or/and background parameters. + (+) Set the active layer. + (+) Configure the color keying. + (+) Configure the C-LUT. + (+) Enable / Disable the color keying. + (+) Enable / Disable the C-LUT. + (+) Update the layer position. + (+) Update the layer size. + (+) Update pixel format on the fly. + (+) Update transparency on the fly. + (+) Update address on the fly. + +@endverbatim + * @{ + */ + +/** + * @brief Configure the LTDC Layer according to the specified + * parameters in the LTDC_InitTypeDef and create the associated handle. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param pLayerCfg pointer to a LTDC_LayerCfgTypeDef structure that contains + * the configuration information for the Layer. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ConfigLayer(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_HCONFIGST(pLayerCfg->WindowX0)); + assert_param(IS_LTDC_HCONFIGSP(pLayerCfg->WindowX1)); + assert_param(IS_LTDC_VCONFIGST(pLayerCfg->WindowY0)); + assert_param(IS_LTDC_VCONFIGSP(pLayerCfg->WindowY1)); + assert_param(IS_LTDC_PIXEL_FORMAT(pLayerCfg->PixelFormat)); + assert_param(IS_LTDC_ALPHA(pLayerCfg->Alpha)); + assert_param(IS_LTDC_ALPHA(pLayerCfg->Alpha0)); + assert_param(IS_LTDC_BLENDING_FACTOR1(pLayerCfg->BlendingFactor1)); + assert_param(IS_LTDC_BLENDING_FACTOR2(pLayerCfg->BlendingFactor2)); + assert_param(IS_LTDC_CFBLL(pLayerCfg->ImageWidth)); + assert_param(IS_LTDC_CFBLNBR(pLayerCfg->ImageHeight)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Copy new layer configuration into handle structure */ + hltdc->LayerCfg[LayerIdx] = *pLayerCfg; + + /* Configure the LTDC Layer */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Initialize the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Configure the color keying. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param RGBValue the color key value + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ConfigColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t RGBValue, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Configure the default color values */ + LTDC_LAYER(hltdc, LayerIdx)->CKCR &= ~(LTDC_LxCKCR_CKBLUE | LTDC_LxCKCR_CKGREEN | LTDC_LxCKCR_CKRED); + LTDC_LAYER(hltdc, LayerIdx)->CKCR = RGBValue; + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Load the color lookup table. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param pCLUT pointer to the color lookup table address. + * @param CLUTSize the color lookup table size. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ConfigCLUT(LTDC_HandleTypeDef *hltdc, uint32_t *pCLUT, uint32_t CLUTSize, uint32_t LayerIdx) +{ + uint32_t tmp; + uint32_t counter; + uint32_t *pcolorlut = pCLUT; + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + for (counter = 0U; (counter < CLUTSize); counter++) + { + if (hltdc->LayerCfg[LayerIdx].PixelFormat == LTDC_PIXEL_FORMAT_AL44) + { + tmp = (((counter + (16U*counter)) << 24U) | ((uint32_t)(*pcolorlut) & 0xFFU) | ((uint32_t)(*pcolorlut) & 0xFF00U) | ((uint32_t)(*pcolorlut) & 0xFF0000U)); + } + else + { + tmp = ((counter << 24U) | ((uint32_t)(*pcolorlut) & 0xFFU) | ((uint32_t)(*pcolorlut) & 0xFF00U) | ((uint32_t)(*pcolorlut) & 0xFF0000U)); + } + + pcolorlut++; + + /* Specifies the C-LUT address and RGB value */ + LTDC_LAYER(hltdc, LayerIdx)->CLUTWR = tmp; + } + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Enable the color keying. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_EnableColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Enable LTDC color keying by setting COLKEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR |= (uint32_t)LTDC_LxCR_COLKEN; + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Disable the color keying. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_DisableColorKeying(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable LTDC color keying by setting COLKEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR &= ~(uint32_t)LTDC_LxCR_COLKEN; + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Enable the color lookup table. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_EnableCLUT(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Enable LTDC color lookup table by setting CLUTEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR |= (uint32_t)LTDC_LxCR_CLUTEN; + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Disable the color lookup table. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_DisableCLUT(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable LTDC color lookup table by setting CLUTEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR &= ~(uint32_t)LTDC_LxCR_CLUTEN; + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Enable Dither. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_LTDC_EnableDither(LTDC_HandleTypeDef *hltdc) +{ + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Enable Dither by setting DTEN bit */ + LTDC->GCR |= (uint32_t)LTDC_GCR_DEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Disable Dither. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_LTDC_DisableDither(LTDC_HandleTypeDef *hltdc) +{ + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable Dither by setting DTEN bit */ + LTDC->GCR &= ~(uint32_t)LTDC_GCR_DEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Set the LTDC window size. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param XSize LTDC Pixel per line + * @param YSize LTDC Line number + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetWindowSize(LTDC_HandleTypeDef *hltdc, uint32_t XSize, uint32_t YSize, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters (Layers parameters)*/ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_CFBLL(XSize)); + assert_param(IS_LTDC_CFBLNBR(YSize)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* update horizontal stop */ + pLayerCfg->WindowX1 = XSize + pLayerCfg->WindowX0; + + /* update vertical stop */ + pLayerCfg->WindowY1 = YSize + pLayerCfg->WindowY0; + + /* Reconfigures the color frame buffer pitch in byte */ + pLayerCfg->ImageWidth = XSize; + + /* Reconfigures the frame buffer line number */ + pLayerCfg->ImageHeight = YSize; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Set the LTDC window position. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param X0 LTDC window X offset + * @param Y0 LTDC window Y offset + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetWindowPosition(LTDC_HandleTypeDef *hltdc, uint32_t X0, uint32_t Y0, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_CFBLL(X0)); + assert_param(IS_LTDC_CFBLNBR(Y0)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* update horizontal start/stop */ + pLayerCfg->WindowX0 = X0; + pLayerCfg->WindowX1 = X0 + pLayerCfg->ImageWidth; + + /* update vertical start/stop */ + pLayerCfg->WindowY0 = Y0; + pLayerCfg->WindowY1 = Y0 + pLayerCfg->ImageHeight; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reconfigure the pixel format. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Pixelformat new pixel format value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetPixelFormat(LTDC_HandleTypeDef *hltdc, uint32_t Pixelformat, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_PIXEL_FORMAT(Pixelformat)); + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the pixel format */ + pLayerCfg->PixelFormat = Pixelformat; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reconfigure the layer alpha value. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Alpha new alpha value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetAlpha(LTDC_HandleTypeDef *hltdc, uint32_t Alpha, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_ALPHA(Alpha)); + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the Alpha value */ + pLayerCfg->Alpha = Alpha; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} +/** + * @brief Reconfigure the frame buffer Address. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Address new address value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetAddress(LTDC_HandleTypeDef *hltdc, uint32_t Address, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the Address */ + pLayerCfg->FBStartAdress = Address; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Set the Immediate Reload type */ + hltdc->Instance->SRCR = LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Function used to reconfigure the pitch for specific cases where the attached LayerIdx buffer have a width that is + * larger than the one intended to be displayed on screen. Example of a buffer 800x480 attached to layer for which we + * want to read and display on screen only a portion 320x240 taken in the center of the buffer. The pitch in pixels + * will be in that case 800 pixels and not 320 pixels as initially configured by previous call to HAL_LTDC_ConfigLayer(). + * @note This function should be called only after a previous call to HAL_LTDC_ConfigLayer() to modify the default pitch + * configured by HAL_LTDC_ConfigLayer() when required (refer to example described just above). + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LinePitchInPixels New line pitch in pixels to configure for LTDC layer 'LayerIdx'. + * @param LayerIdx LTDC layer index concerned by the modification of line pitch. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetPitch(LTDC_HandleTypeDef *hltdc, uint32_t LinePitchInPixels, uint32_t LayerIdx) +{ + uint32_t tmp; + uint32_t pitchUpdate; + uint32_t pixelFormat; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* get LayerIdx used pixel format */ + pixelFormat = hltdc->LayerCfg[LayerIdx].PixelFormat; + + if (pixelFormat == LTDC_PIXEL_FORMAT_ARGB8888) + { + tmp = 4U; + } + else if (pixelFormat == LTDC_PIXEL_FORMAT_RGB888) + { + tmp = 3U; + } + else if ((pixelFormat == LTDC_PIXEL_FORMAT_ARGB4444) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_RGB565) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_ARGB1555) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_AL88)) + { + tmp = 2U; + } + else + { + tmp = 1U; + } + + pitchUpdate = ((LinePitchInPixels * tmp) << 16U); + + /* Clear previously set standard pitch */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLR &= ~LTDC_LxCFBLR_CFBP; + + /* Set the Reload type as immediate update of LTDC pitch configured above */ + LTDC->SRCR |= LTDC_SRCR_IMR; + + /* Set new line pitch value */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLR |= pitchUpdate; + + /* Set the Reload type as immediate update of LTDC pitch configured above */ + LTDC->SRCR |= LTDC_SRCR_IMR; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Define the position of the line interrupt. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Line Line Interrupt Position. + * @note User application may resort to HAL_LTDC_LineEventCallback() at line interrupt generation. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ProgramLineEvent(LTDC_HandleTypeDef *hltdc, uint32_t Line) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LIPOS(Line)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable the Line interrupt */ + __HAL_LTDC_DISABLE_IT(hltdc, LTDC_IT_LI); + + /* Set the Line Interrupt position */ + LTDC->LIPCR = (uint32_t)Line; + + /* Enable the Line interrupt */ + __HAL_LTDC_ENABLE_IT(hltdc, LTDC_IT_LI); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reload LTDC Layers configuration. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param ReloadType This parameter can be one of the following values : + * LTDC_RELOAD_IMMEDIATE : Immediate Reload + * LTDC_RELOAD_VERTICAL_BLANKING : Reload in the next Vertical Blanking + * @note User application may resort to HAL_LTDC_ReloadEventCallback() at reload interrupt generation. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_Reload(LTDC_HandleTypeDef *hltdc, uint32_t ReloadType) +{ + /* Check the parameters */ + assert_param(IS_LTDC_RELOAD(ReloadType)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Enable the Reload interrupt */ + __HAL_LTDC_ENABLE_IT(hltdc, LTDC_IT_RR); + + /* Apply Reload type */ + hltdc->Instance->SRCR = ReloadType; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Configure the LTDC Layer according to the specified without reloading + * parameters in the LTDC_InitTypeDef and create the associated handle. + * Variant of the function HAL_LTDC_ConfigLayer without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param pLayerCfg pointer to a LTDC_LayerCfgTypeDef structure that contains + * the configuration information for the Layer. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ConfigLayer_NoReload(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_HCONFIGST(pLayerCfg->WindowX0)); + assert_param(IS_LTDC_HCONFIGSP(pLayerCfg->WindowX1)); + assert_param(IS_LTDC_VCONFIGST(pLayerCfg->WindowY0)); + assert_param(IS_LTDC_VCONFIGSP(pLayerCfg->WindowY1)); + assert_param(IS_LTDC_PIXEL_FORMAT(pLayerCfg->PixelFormat)); + assert_param(IS_LTDC_ALPHA(pLayerCfg->Alpha)); + assert_param(IS_LTDC_ALPHA(pLayerCfg->Alpha0)); + assert_param(IS_LTDC_BLENDING_FACTOR1(pLayerCfg->BlendingFactor1)); + assert_param(IS_LTDC_BLENDING_FACTOR2(pLayerCfg->BlendingFactor2)); + assert_param(IS_LTDC_CFBLL(pLayerCfg->ImageWidth)); + assert_param(IS_LTDC_CFBLNBR(pLayerCfg->ImageHeight)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Copy new layer configuration into handle structure */ + hltdc->LayerCfg[LayerIdx] = *pLayerCfg; + + /* Configure the LTDC Layer */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Initialize the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Set the LTDC window size without reloading. + * Variant of the function HAL_LTDC_SetWindowSize without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param XSize LTDC Pixel per line + * @param YSize LTDC Line number + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetWindowSize_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t XSize, uint32_t YSize, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters (Layers parameters)*/ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_CFBLL(XSize)); + assert_param(IS_LTDC_CFBLNBR(YSize)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* update horizontal stop */ + pLayerCfg->WindowX1 = XSize + pLayerCfg->WindowX0; + + /* update vertical stop */ + pLayerCfg->WindowY1 = YSize + pLayerCfg->WindowY0; + + /* Reconfigures the color frame buffer pitch in byte */ + pLayerCfg->ImageWidth = XSize; + + /* Reconfigures the frame buffer line number */ + pLayerCfg->ImageHeight = YSize; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Set the LTDC window position without reloading. + * Variant of the function HAL_LTDC_SetWindowPosition without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param X0 LTDC window X offset + * @param Y0 LTDC window Y offset + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetWindowPosition_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t X0, uint32_t Y0, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + assert_param(IS_LTDC_CFBLL(X0)); + assert_param(IS_LTDC_CFBLNBR(Y0)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* update horizontal start/stop */ + pLayerCfg->WindowX0 = X0; + pLayerCfg->WindowX1 = X0 + pLayerCfg->ImageWidth; + + /* update vertical start/stop */ + pLayerCfg->WindowY0 = Y0; + pLayerCfg->WindowY1 = Y0 + pLayerCfg->ImageHeight; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reconfigure the pixel format without reloading. + * Variant of the function HAL_LTDC_SetPixelFormat without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDfef structure that contains + * the configuration information for the LTDC. + * @param Pixelformat new pixel format value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetPixelFormat_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Pixelformat, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_PIXEL_FORMAT(Pixelformat)); + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the pixel format */ + pLayerCfg->PixelFormat = Pixelformat; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reconfigure the layer alpha value without reloading. + * Variant of the function HAL_LTDC_SetAlpha without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Alpha new alpha value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetAlpha_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Alpha, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_ALPHA(Alpha)); + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the Alpha value */ + pLayerCfg->Alpha = Alpha; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Reconfigure the frame buffer Address without reloading. + * Variant of the function HAL_LTDC_SetAddress without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param Address new address value. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetAddress_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t Address, uint32_t LayerIdx) +{ + LTDC_LayerCfgTypeDef *pLayerCfg; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Get layer configuration from handle structure */ + pLayerCfg = &hltdc->LayerCfg[LayerIdx]; + + /* Reconfigure the Address */ + pLayerCfg->FBStartAdress = Address; + + /* Set LTDC parameters */ + LTDC_SetConfig(hltdc, pLayerCfg, LayerIdx); + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Function used to reconfigure the pitch for specific cases where the attached LayerIdx buffer have a width that is + * larger than the one intended to be displayed on screen. Example of a buffer 800x480 attached to layer for which we + * want to read and display on screen only a portion 320x240 taken in the center of the buffer. The pitch in pixels + * will be in that case 800 pixels and not 320 pixels as initially configured by previous call to HAL_LTDC_ConfigLayer(). + * @note This function should be called only after a previous call to HAL_LTDC_ConfigLayer() to modify the default pitch + * configured by HAL_LTDC_ConfigLayer() when required (refer to example described just above). + * Variant of the function HAL_LTDC_SetPitch without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LinePitchInPixels New line pitch in pixels to configure for LTDC layer 'LayerIdx'. + * @param LayerIdx LTDC layer index concerned by the modification of line pitch. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_SetPitch_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LinePitchInPixels, uint32_t LayerIdx) +{ + uint32_t tmp; + uint32_t pitchUpdate; + uint32_t pixelFormat; + + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* get LayerIdx used pixel format */ + pixelFormat = hltdc->LayerCfg[LayerIdx].PixelFormat; + + if (pixelFormat == LTDC_PIXEL_FORMAT_ARGB8888) + { + tmp = 4U; + } + else if (pixelFormat == LTDC_PIXEL_FORMAT_RGB888) + { + tmp = 3U; + } + else if ((pixelFormat == LTDC_PIXEL_FORMAT_ARGB4444) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_RGB565) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_ARGB1555) || \ + (pixelFormat == LTDC_PIXEL_FORMAT_AL88)) + { + tmp = 2U; + } + else + { + tmp = 1U; + } + + pitchUpdate = ((LinePitchInPixels * tmp) << 16U); + + /* Clear previously set standard pitch */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLR &= ~LTDC_LxCFBLR_CFBP; + + /* Set new line pitch value */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLR |= pitchUpdate; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + + +/** + * @brief Configure the color keying without reloading. + * Variant of the function HAL_LTDC_ConfigColorKeying without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param RGBValue the color key value + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_ConfigColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t RGBValue, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Configure the default color values */ + LTDC_LAYER(hltdc, LayerIdx)->CKCR &= ~(LTDC_LxCKCR_CKBLUE | LTDC_LxCKCR_CKGREEN | LTDC_LxCKCR_CKRED); + LTDC_LAYER(hltdc, LayerIdx)->CKCR = RGBValue; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Enable the color keying without reloading. + * Variant of the function HAL_LTDC_EnableColorKeying without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_EnableColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Enable LTDC color keying by setting COLKEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR |= (uint32_t)LTDC_LxCR_COLKEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Disable the color keying without reloading. + * Variant of the function HAL_LTDC_DisableColorKeying without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_DisableColorKeying_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable LTDC color keying by setting COLKEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR &= ~(uint32_t)LTDC_LxCR_COLKEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Enable the color lookup table without reloading. + * Variant of the function HAL_LTDC_EnableCLUT without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_EnableCLUT_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable LTDC color lookup table by setting CLUTEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR |= (uint32_t)LTDC_LxCR_CLUTEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @brief Disable the color lookup table without reloading. + * Variant of the function HAL_LTDC_DisableCLUT without immediate reload. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: + * LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDC_DisableCLUT_NoReload(LTDC_HandleTypeDef *hltdc, uint32_t LayerIdx) +{ + /* Check the parameters */ + assert_param(IS_LTDC_LAYER(LayerIdx)); + + /* Process locked */ + __HAL_LOCK(hltdc); + + /* Change LTDC peripheral state */ + hltdc->State = HAL_LTDC_STATE_BUSY; + + /* Disable LTDC color lookup table by setting CLUTEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR &= ~(uint32_t)LTDC_LxCR_CLUTEN; + + /* Change the LTDC state*/ + hltdc->State = HAL_LTDC_STATE_READY; + + /* Process unlocked */ + __HAL_UNLOCK(hltdc); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup LTDC_Exported_Functions_Group4 Peripheral State and Errors functions + * @brief Peripheral State and Errors functions + * +@verbatim + =============================================================================== + ##### Peripheral State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the LTDC handle state. + (+) Get the LTDC handle error code. + +@endverbatim + * @{ + */ + +/** + * @brief Return the LTDC handle state. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval HAL state + */ +HAL_LTDC_StateTypeDef HAL_LTDC_GetState(LTDC_HandleTypeDef *hltdc) +{ + return hltdc->State; +} + +/** + * @brief Return the LTDC handle error code. + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @retval LTDC Error Code + */ +uint32_t HAL_LTDC_GetError(LTDC_HandleTypeDef *hltdc) +{ + return hltdc->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup LTDC_Private_Functions LTDC Private Functions + * @{ + */ + +/** + * @brief Configure the LTDC peripheral + * @param hltdc Pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param pLayerCfg Pointer LTDC Layer Configuration structure + * @param LayerIdx LTDC Layer index. + * This parameter can be one of the following values: LTDC_LAYER_1 (0) or LTDC_LAYER_2 (1) + * @retval None + */ +static void LTDC_SetConfig(LTDC_HandleTypeDef *hltdc, LTDC_LayerCfgTypeDef *pLayerCfg, uint32_t LayerIdx) +{ + uint32_t tmp; + uint32_t tmp1; + uint32_t tmp2; + + /* Configure the horizontal start and stop position */ + tmp = ((pLayerCfg->WindowX1 + ((hltdc->Instance->BPCR & LTDC_BPCR_AHBP) >> 16U)) << 16U); + LTDC_LAYER(hltdc, LayerIdx)->WHPCR &= ~(LTDC_LxWHPCR_WHSTPOS | LTDC_LxWHPCR_WHSPPOS); + LTDC_LAYER(hltdc, LayerIdx)->WHPCR = ((pLayerCfg->WindowX0 + ((hltdc->Instance->BPCR & LTDC_BPCR_AHBP) >> 16U) + 1U) | tmp); + + /* Configure the vertical start and stop position */ + tmp = ((pLayerCfg->WindowY1 + (hltdc->Instance->BPCR & LTDC_BPCR_AVBP)) << 16U); + LTDC_LAYER(hltdc, LayerIdx)->WVPCR &= ~(LTDC_LxWVPCR_WVSTPOS | LTDC_LxWVPCR_WVSPPOS); + LTDC_LAYER(hltdc, LayerIdx)->WVPCR = ((pLayerCfg->WindowY0 + (hltdc->Instance->BPCR & LTDC_BPCR_AVBP) + 1U) | tmp); + + /* Specifies the pixel format */ + LTDC_LAYER(hltdc, LayerIdx)->PFCR &= ~(LTDC_LxPFCR_PF); + LTDC_LAYER(hltdc, LayerIdx)->PFCR = (pLayerCfg->PixelFormat); + + /* Configure the default color values */ + tmp = ((uint32_t)(pLayerCfg->Backcolor.Green) << 8U); + tmp1 = ((uint32_t)(pLayerCfg->Backcolor.Red) << 16U); + tmp2 = (pLayerCfg->Alpha0 << 24U); + LTDC_LAYER(hltdc, LayerIdx)->DCCR &= ~(LTDC_LxDCCR_DCBLUE | LTDC_LxDCCR_DCGREEN | LTDC_LxDCCR_DCRED | LTDC_LxDCCR_DCALPHA); + LTDC_LAYER(hltdc, LayerIdx)->DCCR = (pLayerCfg->Backcolor.Blue | tmp | tmp1 | tmp2); + + /* Specifies the constant alpha value */ + LTDC_LAYER(hltdc, LayerIdx)->CACR &= ~(LTDC_LxCACR_CONSTA); + LTDC_LAYER(hltdc, LayerIdx)->CACR = (pLayerCfg->Alpha); + + /* Specifies the blending factors */ + LTDC_LAYER(hltdc, LayerIdx)->BFCR &= ~(LTDC_LxBFCR_BF2 | LTDC_LxBFCR_BF1); + LTDC_LAYER(hltdc, LayerIdx)->BFCR = (pLayerCfg->BlendingFactor1 | pLayerCfg->BlendingFactor2); + + /* Configure the color frame buffer start address */ + LTDC_LAYER(hltdc, LayerIdx)->CFBAR &= ~(LTDC_LxCFBAR_CFBADD); + LTDC_LAYER(hltdc, LayerIdx)->CFBAR = (pLayerCfg->FBStartAdress); + + if (pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_ARGB8888) + { + tmp = 4U; + } + else if (pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_RGB888) + { + tmp = 3U; + } + else if ((pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_ARGB4444) || \ + (pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_RGB565) || \ + (pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_ARGB1555) || \ + (pLayerCfg->PixelFormat == LTDC_PIXEL_FORMAT_AL88)) + { + tmp = 2U; + } + else + { + tmp = 1U; + } + + /* Configure the color frame buffer pitch in byte */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLR &= ~(LTDC_LxCFBLR_CFBLL | LTDC_LxCFBLR_CFBP); + LTDC_LAYER(hltdc, LayerIdx)->CFBLR = (((pLayerCfg->ImageWidth * tmp) << 16U) | (((pLayerCfg->WindowX1 - pLayerCfg->WindowX0) * tmp) + 3U)); + /* Configure the frame buffer line number */ + LTDC_LAYER(hltdc, LayerIdx)->CFBLNR &= ~(LTDC_LxCFBLNR_CFBLNBR); + LTDC_LAYER(hltdc, LayerIdx)->CFBLNR = (pLayerCfg->ImageHeight); + + /* Enable LTDC_Layer by setting LEN bit */ + LTDC_LAYER(hltdc, LayerIdx)->CR |= (uint32_t)LTDC_LxCR_LEN; +} + +/** + * @} + */ + + +/** + * @} + */ + + +/** + * @} + */ + +#endif /* LTDC */ +#endif /* HAL_LTDC_MODULE_ENABLED */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c new file mode 100644 index 000000000..79c6a24f7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_ltdc_ex.c @@ -0,0 +1,149 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_ltdc_ex.c + * @author MCD Application Team + * @brief LTDC Extension HAL module driver. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +#if defined (LTDC) && defined (DSI) + +/** @defgroup LTDCEx LTDCEx + * @brief LTDC HAL module driver + * @{ + */ + +#if defined(HAL_LTDC_MODULE_ENABLED) && defined(HAL_DSI_MODULE_ENABLED) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup LTDCEx_Exported_Functions LTDC Extended Exported Functions + * @{ + */ + +/** @defgroup LTDCEx_Exported_Functions_Group1 Initialization and Configuration functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the LTDC + +@endverbatim + * @{ + */ + +/** + * @brief Retrieve common parameters from DSI Video mode configuration structure + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param VidCfg pointer to a DSI_VidCfgTypeDef structure that contains + * the DSI video mode configuration parameters + * @note The implementation of this function is taking into account the LTDC + * polarities inversion as described in the current LTDC specification + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDCEx_StructInitFromVideoConfig(LTDC_HandleTypeDef *hltdc, DSI_VidCfgTypeDef *VidCfg) +{ + /* Retrieve signal polarities from DSI */ + + /* The following polarity is inverted: + LTDC_DEPOLARITY_AL <-> LTDC_DEPOLARITY_AH */ + + /* Note 1 : Code in line w/ Current LTDC specification */ + hltdc->Init.DEPolarity = (VidCfg->DEPolarity == DSI_DATA_ENABLE_ACTIVE_HIGH) ? LTDC_DEPOLARITY_AL : LTDC_DEPOLARITY_AH; + hltdc->Init.VSPolarity = (VidCfg->VSPolarity == DSI_VSYNC_ACTIVE_HIGH) ? LTDC_VSPOLARITY_AH : LTDC_VSPOLARITY_AL; + hltdc->Init.HSPolarity = (VidCfg->HSPolarity == DSI_HSYNC_ACTIVE_HIGH) ? LTDC_HSPOLARITY_AH : LTDC_HSPOLARITY_AL; + + /* Note 2: Code to be used in case LTDC polarities inversion updated in the specification */ + /* hltdc->Init.DEPolarity = VidCfg->DEPolarity << 29; + hltdc->Init.VSPolarity = VidCfg->VSPolarity << 29; + hltdc->Init.HSPolarity = VidCfg->HSPolarity << 29; */ + + /* Retrieve vertical timing parameters from DSI */ + hltdc->Init.VerticalSync = VidCfg->VerticalSyncActive - 1U; + hltdc->Init.AccumulatedVBP = VidCfg->VerticalSyncActive + VidCfg->VerticalBackPorch - 1U; + hltdc->Init.AccumulatedActiveH = VidCfg->VerticalSyncActive + VidCfg->VerticalBackPorch + VidCfg->VerticalActive - 1U; + hltdc->Init.TotalHeigh = VidCfg->VerticalSyncActive + VidCfg->VerticalBackPorch + VidCfg->VerticalActive + VidCfg->VerticalFrontPorch - 1U; + + return HAL_OK; +} + +/** + * @brief Retrieve common parameters from DSI Adapted command mode configuration structure + * @param hltdc pointer to a LTDC_HandleTypeDef structure that contains + * the configuration information for the LTDC. + * @param CmdCfg pointer to a DSI_CmdCfgTypeDef structure that contains + * the DSI command mode configuration parameters + * @note The implementation of this function is taking into account the LTDC + * polarities inversion as described in the current LTDC specification + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LTDCEx_StructInitFromAdaptedCommandConfig(LTDC_HandleTypeDef *hltdc, DSI_CmdCfgTypeDef *CmdCfg) +{ + /* Retrieve signal polarities from DSI */ + + /* The following polarities are inverted: + LTDC_DEPOLARITY_AL <-> LTDC_DEPOLARITY_AH + LTDC_VSPOLARITY_AL <-> LTDC_VSPOLARITY_AH + LTDC_HSPOLARITY_AL <-> LTDC_HSPOLARITY_AH)*/ + + /* Note 1 : Code in line w/ Current LTDC specification */ + hltdc->Init.DEPolarity = (CmdCfg->DEPolarity == DSI_DATA_ENABLE_ACTIVE_HIGH) ? LTDC_DEPOLARITY_AL : LTDC_DEPOLARITY_AH; + hltdc->Init.VSPolarity = (CmdCfg->VSPolarity == DSI_VSYNC_ACTIVE_HIGH) ? LTDC_VSPOLARITY_AL : LTDC_VSPOLARITY_AH; + hltdc->Init.HSPolarity = (CmdCfg->HSPolarity == DSI_HSYNC_ACTIVE_HIGH) ? LTDC_HSPOLARITY_AL : LTDC_HSPOLARITY_AH; + + /* Note 2: Code to be used in case LTDC polarities inversion updated in the specification */ + /* hltdc->Init.DEPolarity = CmdCfg->DEPolarity << 29; + hltdc->Init.VSPolarity = CmdCfg->VSPolarity << 29; + hltdc->Init.HSPolarity = CmdCfg->HSPolarity << 29; */ + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_LTCD_MODULE_ENABLED && HAL_DSI_MODULE_ENABLED */ + +/** + * @} + */ + +#endif /* LTDC && DSI */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c new file mode 100644 index 000000000..6ce4edda1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c @@ -0,0 +1,2102 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd.c + * @author MCD Application Team + * @brief PCD HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The PCD HAL driver can be used as follows: + + (#) Declare a PCD_HandleTypeDef handle structure, for example: + PCD_HandleTypeDef hpcd; + + (#) Fill parameters of Init structure in HCD handle + + (#) Call HAL_PCD_Init() API to initialize the PCD peripheral (Core, Device core, ...) + + (#) Initialize the PCD low level resources through the HAL_PCD_MspInit() API: + (##) Enable the PCD/USB Low Level interface clock using + (+++) __HAL_RCC_USB_OTG_FS_CLK_ENABLE(); + (+++) __HAL_RCC_USB_OTG_HS_CLK_ENABLE(); (For High Speed Mode) + + (##) Initialize the related GPIO clocks + (##) Configure PCD pin-out + (##) Configure PCD NVIC interrupt + + (#)Associate the Upper USB device stack to the HAL PCD Driver: + (##) hpcd.pData = pdev; + + (#)Enable PCD transmission and reception: + (##) HAL_PCD_Start(); + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PCD PCD + * @brief PCD HAL module driver + * @{ + */ + +#ifdef HAL_PCD_MODULE_ENABLED + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PCD_Private_Macros PCD Private Macros + * @{ + */ +#define PCD_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define PCD_MAX(a, b) (((a) > (b)) ? (a) : (b)) +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup PCD_Private_Functions PCD Private Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +static HAL_StatusTypeDef PCD_WriteEmptyTxFifo(PCD_HandleTypeDef *hpcd, uint32_t epnum); +static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint32_t epnum); +static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint32_t epnum); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup PCD_Exported_Functions PCD Exported Functions + * @{ + */ + +/** @defgroup PCD_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the PCD according to the specified + * parameters in the PCD_InitTypeDef and initialize the associated handle. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx; + uint8_t i; + + /* Check the PCD handle allocation */ + if (hpcd == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_PCD_ALL_INSTANCE(hpcd->Instance)); + + USBx = hpcd->Instance; + + if (hpcd->State == HAL_PCD_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hpcd->Lock = HAL_UNLOCKED; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SOFCallback = HAL_PCD_SOFCallback; + hpcd->SetupStageCallback = HAL_PCD_SetupStageCallback; + hpcd->ResetCallback = HAL_PCD_ResetCallback; + hpcd->SuspendCallback = HAL_PCD_SuspendCallback; + hpcd->ResumeCallback = HAL_PCD_ResumeCallback; + hpcd->ConnectCallback = HAL_PCD_ConnectCallback; + hpcd->DisconnectCallback = HAL_PCD_DisconnectCallback; + hpcd->DataOutStageCallback = HAL_PCD_DataOutStageCallback; + hpcd->DataInStageCallback = HAL_PCD_DataInStageCallback; + hpcd->ISOOUTIncompleteCallback = HAL_PCD_ISOOUTIncompleteCallback; + hpcd->ISOINIncompleteCallback = HAL_PCD_ISOINIncompleteCallback; + + if (hpcd->MspInitCallback == NULL) + { + hpcd->MspInitCallback = HAL_PCD_MspInit; + } + + /* Init the low level hardware */ + hpcd->MspInitCallback(hpcd); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + HAL_PCD_MspInit(hpcd); +#endif /* (USE_HAL_PCD_REGISTER_CALLBACKS) */ + } + + hpcd->State = HAL_PCD_STATE_BUSY; + + /* Disable DMA mode for FS instance */ + if ((USBx->CID & (0x1U << 8)) == 0U) + { + hpcd->Init.dma_enable = 0U; + } + + /* Disable the Interrupts */ + __HAL_PCD_DISABLE(hpcd); + + /*Init the Core (common init.) */ + if (USB_CoreInit(hpcd->Instance, hpcd->Init) != HAL_OK) + { + hpcd->State = HAL_PCD_STATE_ERROR; + return HAL_ERROR; + } + + /* Force Device Mode*/ + (void)USB_SetCurrentMode(hpcd->Instance, USB_DEVICE_MODE); + + /* Init endpoints structures */ + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + /* Init ep structure */ + hpcd->IN_ep[i].is_in = 1U; + hpcd->IN_ep[i].num = i; + hpcd->IN_ep[i].tx_fifo_num = i; + /* Control until ep is activated */ + hpcd->IN_ep[i].type = EP_TYPE_CTRL; + hpcd->IN_ep[i].maxpacket = 0U; + hpcd->IN_ep[i].xfer_buff = 0U; + hpcd->IN_ep[i].xfer_len = 0U; + } + + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + hpcd->OUT_ep[i].is_in = 0U; + hpcd->OUT_ep[i].num = i; + /* Control until ep is activated */ + hpcd->OUT_ep[i].type = EP_TYPE_CTRL; + hpcd->OUT_ep[i].maxpacket = 0U; + hpcd->OUT_ep[i].xfer_buff = 0U; + hpcd->OUT_ep[i].xfer_len = 0U; + } + + /* Init Device */ + if (USB_DevInit(hpcd->Instance, hpcd->Init) != HAL_OK) + { + hpcd->State = HAL_PCD_STATE_ERROR; + return HAL_ERROR; + } + + hpcd->USB_Address = 0U; + hpcd->State = HAL_PCD_STATE_READY; + + /* Activate LPM */ + if (hpcd->Init.lpm_enable == 1U) + { + (void)HAL_PCDEx_ActivateLPM(hpcd); + } + + (void)USB_DevDisconnect(hpcd->Instance); + + return HAL_OK; +} + +/** + * @brief DeInitializes the PCD peripheral. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DeInit(PCD_HandleTypeDef *hpcd) +{ + /* Check the PCD handle allocation */ + if (hpcd == NULL) + { + return HAL_ERROR; + } + + hpcd->State = HAL_PCD_STATE_BUSY; + + /* Stop Device */ + (void)HAL_PCD_Stop(hpcd); + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + if (hpcd->MspDeInitCallback == NULL) + { + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware */ + hpcd->MspDeInitCallback(hpcd); +#else + /* DeInit the low level hardware: CLOCK, NVIC.*/ + HAL_PCD_MspDeInit(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + hpcd->State = HAL_PCD_STATE_RESET; + + return HAL_OK; +} + +/** + * @brief Initializes the PCD MSP. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_MspInit(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes PCD MSP. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_MspDeInit(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User USB PCD Callback + * To be used instead of the weak predefined callback + * @param hpcd USB PCD handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_PCD_SOF_CB_ID USB PCD SOF callback ID + * @arg @ref HAL_PCD_SETUPSTAGE_CB_ID USB PCD Setup callback ID + * @arg @ref HAL_PCD_RESET_CB_ID USB PCD Reset callback ID + * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID + * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID + * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID + * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID, pPCD_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + switch (CallbackID) + { + case HAL_PCD_SOF_CB_ID : + hpcd->SOFCallback = pCallback; + break; + + case HAL_PCD_SETUPSTAGE_CB_ID : + hpcd->SetupStageCallback = pCallback; + break; + + case HAL_PCD_RESET_CB_ID : + hpcd->ResetCallback = pCallback; + break; + + case HAL_PCD_SUSPEND_CB_ID : + hpcd->SuspendCallback = pCallback; + break; + + case HAL_PCD_RESUME_CB_ID : + hpcd->ResumeCallback = pCallback; + break; + + case HAL_PCD_CONNECT_CB_ID : + hpcd->ConnectCallback = pCallback; + break; + + case HAL_PCD_DISCONNECT_CB_ID : + hpcd->DisconnectCallback = pCallback; + break; + + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = pCallback; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hpcd->State == HAL_PCD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = pCallback; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + return status; +} + +/** + * @brief Unregister an USB PCD Callback + * USB PCD callabck is redirected to the weak predefined callback + * @param hpcd USB PCD handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_PCD_SOF_CB_ID USB PCD SOF callback ID + * @arg @ref HAL_PCD_SETUPSTAGE_CB_ID USB PCD Setup callback ID + * @arg @ref HAL_PCD_RESET_CB_ID USB PCD Reset callback ID + * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID + * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID + * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID + * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + /* Setup Legacy weak Callbacks */ + if (hpcd->State == HAL_PCD_STATE_READY) + { + switch (CallbackID) + { + case HAL_PCD_SOF_CB_ID : + hpcd->SOFCallback = HAL_PCD_SOFCallback; + break; + + case HAL_PCD_SETUPSTAGE_CB_ID : + hpcd->SetupStageCallback = HAL_PCD_SetupStageCallback; + break; + + case HAL_PCD_RESET_CB_ID : + hpcd->ResetCallback = HAL_PCD_ResetCallback; + break; + + case HAL_PCD_SUSPEND_CB_ID : + hpcd->SuspendCallback = HAL_PCD_SuspendCallback; + break; + + case HAL_PCD_RESUME_CB_ID : + hpcd->ResumeCallback = HAL_PCD_ResumeCallback; + break; + + case HAL_PCD_CONNECT_CB_ID : + hpcd->ConnectCallback = HAL_PCD_ConnectCallback; + break; + + case HAL_PCD_DISCONNECT_CB_ID : + hpcd->DisconnectCallback = HAL_PCD_DisconnectCallback; + break; + + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = HAL_PCD_MspInit; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hpcd->State == HAL_PCD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = HAL_PCD_MspInit; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + return status; +} + +/** + * @brief Register USB PCD Data OUT Stage Callback + * To be used instead of the weak HAL_PCD_DataOutStageCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Data OUT Stage Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd, pPCD_DataOutStageCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataOutStageCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief UnRegister the USB PCD Data OUT Stage Callback + * USB PCD Data OUT Stage Callback is redirected to the weak HAL_PCD_DataOutStageCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataOutStageCallback = HAL_PCD_DataOutStageCallback; /* Legacy weak DataOutStageCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Data IN Stage Callback + * To be used instead of the weak HAL_PCD_DataInStageCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Data IN Stage Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterDataInStageCallback(PCD_HandleTypeDef *hpcd, pPCD_DataInStageCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataInStageCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief UnRegister the USB PCD Data IN Stage Callback + * USB PCD Data OUT Stage Callback is redirected to the weak HAL_PCD_DataInStageCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterDataInStageCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataInStageCallback = HAL_PCD_DataInStageCallback; /* Legacy weak DataInStageCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Iso OUT incomplete Callback + * To be used instead of the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Iso OUT incomplete Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd, pPCD_IsoOutIncpltCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOOUTIncompleteCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief UnRegister the USB PCD Iso OUT incomplete Callback + * USB PCD Iso OUT incomplete Callback is redirected to the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOOUTIncompleteCallback = HAL_PCD_ISOOUTIncompleteCallback; /* Legacy weak ISOOUTIncompleteCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Iso IN incomplete Callback + * To be used instead of the weak HAL_PCD_ISOINIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Iso IN incomplete Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, pPCD_IsoInIncpltCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOINIncompleteCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief UnRegister the USB PCD Iso IN incomplete Callback + * USB PCD Iso IN incomplete Callback is redirected to the weak HAL_PCD_ISOINIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOINIncompleteCallback = HAL_PCD_ISOINIncompleteCallback; /* Legacy weak ISOINIncompleteCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD LPM Callback + * To be used instead of the weak HAL_PCDEx_LPM_Callback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD LPM Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, pPCD_LpmCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->LPMCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief UnRegister the USB PCD LPM Callback + * USB LPM Callback is redirected to the weak HAL_PCDEx_LPM_Callback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterLpmCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->LPMCallback = HAL_PCDEx_LPM_Callback; /* Legacy weak HAL_PCDEx_LPM_Callback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group2 Input and Output operation functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the PCD data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Start the USB device + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Start(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + (void)USB_DevConnect(hpcd->Instance); + __HAL_PCD_ENABLE(hpcd); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} + +/** + * @brief Stop the USB device. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Stop(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + __HAL_PCD_DISABLE(hpcd); + + if (USB_StopDevice(hpcd->Instance) != HAL_OK) + { + __HAL_UNLOCK(hpcd); + return HAL_ERROR; + } + + (void)USB_DevDisconnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Handles PCD interrupt request. + * @param hpcd PCD handle + * @retval HAL status + */ +void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i, ep_intr, epint, epnum; + uint32_t fifoemptymsk, temp; + USB_OTG_EPTypeDef *ep; + + /* ensure that we are in device mode */ + if (USB_GetMode(hpcd->Instance) == USB_OTG_MODE_DEVICE) + { + /* avoid spurious interrupt */ + if (__HAL_PCD_IS_INVALID_INTERRUPT(hpcd)) + { + return; + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_MMIS)) + { + /* incorrect mode, acknowledge the interrupt */ + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_MMIS); + } + + /* Handle RxQLevel Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_RXFLVL)) + { + USB_MASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); + + temp = USBx->GRXSTSP; + + ep = &hpcd->OUT_ep[temp & USB_OTG_GRXSTSP_EPNUM]; + + if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_DATA_UPDT) + { + if ((temp & USB_OTG_GRXSTSP_BCNT) != 0U) + { + (void)USB_ReadPacket(USBx, ep->xfer_buff, + (uint16_t)((temp & USB_OTG_GRXSTSP_BCNT) >> 4)); + + ep->xfer_buff += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + } + } + else if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_SETUP_UPDT) + { + (void)USB_ReadPacket(USBx, (uint8_t *)hpcd->Setup, 8U); + ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + } + else + { + /* ... */ + } + USB_UNMASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_OEPINT)) + { + epnum = 0U; + + /* Read in the device interrupt bits */ + ep_intr = USB_ReadDevAllOutEpInterrupt(hpcd->Instance); + + while (ep_intr != 0U) + { + if ((ep_intr & 0x1U) != 0U) + { + epint = USB_ReadDevOutEPInterrupt(hpcd->Instance, (uint8_t)epnum); + + if ((epint & USB_OTG_DOEPINT_XFRC) == USB_OTG_DOEPINT_XFRC) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_XFRC); + (void)PCD_EP_OutXfrComplete_int(hpcd, epnum); + } + + if ((epint & USB_OTG_DOEPINT_STUP) == USB_OTG_DOEPINT_STUP) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STUP); + /* Class B setup phase done for previous decoded setup */ + (void)PCD_EP_OutSetupPacket_int(hpcd, epnum); + } + + if ((epint & USB_OTG_DOEPINT_OTEPDIS) == USB_OTG_DOEPINT_OTEPDIS) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPDIS); + } + + /* Clear Status Phase Received interrupt */ + if ((epint & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + + /* Clear OUT NAK interrupt */ + if ((epint & USB_OTG_DOEPINT_NAK) == USB_OTG_DOEPINT_NAK) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_NAK); + } + } + epnum++; + ep_intr >>= 1U; + } + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_IEPINT)) + { + /* Read in the device interrupt bits */ + ep_intr = USB_ReadDevAllInEpInterrupt(hpcd->Instance); + + epnum = 0U; + + while (ep_intr != 0U) + { + if ((ep_intr & 0x1U) != 0U) /* In ITR */ + { + epint = USB_ReadDevInEPInterrupt(hpcd->Instance, (uint8_t)epnum); + + if ((epint & USB_OTG_DIEPINT_XFRC) == USB_OTG_DIEPINT_XFRC) + { + fifoemptymsk = (uint32_t)(0x1UL << (epnum & EP_ADDR_MSK)); + USBx_DEVICE->DIEPEMPMSK &= ~fifoemptymsk; + + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_XFRC); + + if (hpcd->Init.dma_enable == 1U) + { + hpcd->IN_ep[epnum].xfer_buff += hpcd->IN_ep[epnum].maxpacket; + + /* this is ZLP, so prepare EP0 for next setup */ + if ((epnum == 0U) && (hpcd->IN_ep[epnum].xfer_len == 0U)) + { + /* prepare to rx more setup packets */ + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataInStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataInStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + if ((epint & USB_OTG_DIEPINT_TOC) == USB_OTG_DIEPINT_TOC) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_TOC); + } + if ((epint & USB_OTG_DIEPINT_ITTXFE) == USB_OTG_DIEPINT_ITTXFE) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_ITTXFE); + } + if ((epint & USB_OTG_DIEPINT_INEPNE) == USB_OTG_DIEPINT_INEPNE) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_INEPNE); + } + if ((epint & USB_OTG_DIEPINT_EPDISD) == USB_OTG_DIEPINT_EPDISD) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_EPDISD); + } + if ((epint & USB_OTG_DIEPINT_TXFE) == USB_OTG_DIEPINT_TXFE) + { + (void)PCD_WriteEmptyTxFifo(hpcd, epnum); + } + } + epnum++; + ep_intr >>= 1U; + } + } + + /* Handle Resume Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_WKUINT)) + { + /* Clear the Remote Wake-up Signaling */ + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_RWUSIG; + + if (hpcd->LPM_State == LPM_L1) + { + hpcd->LPM_State = LPM_L0; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->LPMCallback(hpcd, PCD_LPM_L0_ACTIVE); +#else + HAL_PCDEx_LPM_Callback(hpcd, PCD_LPM_L0_ACTIVE); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + else + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ResumeCallback(hpcd); +#else + HAL_PCD_ResumeCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_WKUINT); + } + + /* Handle Suspend Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_USBSUSP)) + { + if ((USBx_DEVICE->DSTS & USB_OTG_DSTS_SUSPSTS) == USB_OTG_DSTS_SUSPSTS) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SuspendCallback(hpcd); +#else + HAL_PCD_SuspendCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_USBSUSP); + } + + /* Handle LPM Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_LPMINT)) + { + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_LPMINT); + + if (hpcd->LPM_State == LPM_L0) + { + hpcd->LPM_State = LPM_L1; + hpcd->BESL = (hpcd->Instance->GLPMCFG & USB_OTG_GLPMCFG_BESL) >> 2U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->LPMCallback(hpcd, PCD_LPM_L1_ACTIVE); +#else + HAL_PCDEx_LPM_Callback(hpcd, PCD_LPM_L1_ACTIVE); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + else + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SuspendCallback(hpcd); +#else + HAL_PCD_SuspendCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + + /* Handle Reset Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_USBRST)) + { + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_RWUSIG; + (void)USB_FlushTxFifo(hpcd->Instance, 0x10U); + + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + USBx_INEP(i)->DIEPINT = 0xFB7FU; + USBx_INEP(i)->DIEPCTL &= ~USB_OTG_DIEPCTL_STALL; + USBx_INEP(i)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + USBx_OUTEP(i)->DOEPCTL &= ~USB_OTG_DOEPCTL_STALL; + USBx_OUTEP(i)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + } + USBx_DEVICE->DAINTMSK |= 0x10001U; + + if (hpcd->Init.use_dedicated_ep1 != 0U) + { + USBx_DEVICE->DOUTEP1MSK |= USB_OTG_DOEPMSK_STUPM | + USB_OTG_DOEPMSK_XFRCM | + USB_OTG_DOEPMSK_EPDM; + + USBx_DEVICE->DINEP1MSK |= USB_OTG_DIEPMSK_TOM | + USB_OTG_DIEPMSK_XFRCM | + USB_OTG_DIEPMSK_EPDM; + } + else + { + USBx_DEVICE->DOEPMSK |= USB_OTG_DOEPMSK_STUPM | + USB_OTG_DOEPMSK_XFRCM | + USB_OTG_DOEPMSK_EPDM | + USB_OTG_DOEPMSK_OTEPSPRM | + USB_OTG_DOEPMSK_NAKM; + + USBx_DEVICE->DIEPMSK |= USB_OTG_DIEPMSK_TOM | + USB_OTG_DIEPMSK_XFRCM | + USB_OTG_DIEPMSK_EPDM; + } + + /* Set Default Address to 0 */ + USBx_DEVICE->DCFG &= ~USB_OTG_DCFG_DAD; + + /* setup EP0 to receive SETUP packets */ + (void)USB_EP0_OutStart(hpcd->Instance, (uint8_t)hpcd->Init.dma_enable, + (uint8_t *)hpcd->Setup); + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_USBRST); + } + + /* Handle Enumeration done Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_ENUMDNE)) + { + (void)USB_ActivateSetup(hpcd->Instance); + hpcd->Init.speed = USB_GetDevSpeed(hpcd->Instance); + + /* Set USB Turnaround time */ + (void)USB_SetTurnaroundTime(hpcd->Instance, + HAL_RCC_GetHCLKFreq(), + (uint8_t)hpcd->Init.speed); + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ResetCallback(hpcd); +#else + HAL_PCD_ResetCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_ENUMDNE); + } + + /* Handle SOF Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_SOF)) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SOFCallback(hpcd); +#else + HAL_PCD_SOFCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_SOF); + } + + /* Handle Incomplete ISO IN Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR)) + { + /* Keep application checking the corresponding Iso IN endpoint + causing the incomplete Interrupt */ + epnum = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR); + } + + /* Handle Incomplete ISO OUT Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT)) + { + /* Keep application checking the corresponding Iso OUT endpoint + causing the incomplete Interrupt */ + epnum = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT); + } + + /* Handle Connection event Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_SRQINT)) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ConnectCallback(hpcd); +#else + HAL_PCD_ConnectCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_SRQINT); + } + + /* Handle Disconnection event Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_OTGINT)) + { + temp = hpcd->Instance->GOTGINT; + + if ((temp & USB_OTG_GOTGINT_SEDET) == USB_OTG_GOTGINT_SEDET) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DisconnectCallback(hpcd); +#else + HAL_PCD_DisconnectCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + hpcd->Instance->GOTGINT |= temp; + } + } +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @brief Data OUT stage callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DataOutStageCallback could be implemented in the user file + */ +} + +/** + * @brief Data IN stage callback + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DataInStageCallback could be implemented in the user file + */ +} +/** + * @brief Setup stage callback + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SetupStageCallback could be implemented in the user file + */ +} + +/** + * @brief USB Start Of Frame callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SOFCallback could be implemented in the user file + */ +} + +/** + * @brief USB Reset callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ResetCallback could be implemented in the user file + */ +} + +/** + * @brief Suspend event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SuspendCallback could be implemented in the user file + */ +} + +/** + * @brief Resume event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ResumeCallback could be implemented in the user file + */ +} + +/** + * @brief Incomplete ISO OUT callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ISOOUTIncompleteCallback could be implemented in the user file + */ +} + +/** + * @brief Incomplete ISO IN callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ISOINIncompleteCallback could be implemented in the user file + */ +} + +/** + * @brief Connection event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ConnectCallback could be implemented in the user file + */ +} + +/** + * @brief Disconnection event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DisconnectCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group3 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the PCD data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Connect the USB device + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + (void)USB_DevConnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} + +/** + * @brief Disconnect the USB device. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + (void)USB_DevDisconnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} + +/** + * @brief Set the USB Device address. + * @param hpcd PCD handle + * @param address new device address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address) +{ + __HAL_LOCK(hpcd); + hpcd->USB_Address = address; + (void)USB_SetDevAddress(hpcd->Instance, address); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} +/** + * @brief Open and configure an endpoint. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param ep_mps endpoint max packet size + * @param ep_type endpoint type + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint16_t ep_mps, uint8_t ep_type) +{ + HAL_StatusTypeDef ret = HAL_OK; + PCD_EPTypeDef *ep; + + if ((ep_addr & 0x80U) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + + ep->num = ep_addr & EP_ADDR_MSK; + ep->maxpacket = ep_mps; + ep->type = ep_type; + + if (ep->is_in != 0U) + { + /* Assign a Tx FIFO */ + ep->tx_fifo_num = ep->num; + } + /* Set initial data PID. */ + if (ep_type == EP_TYPE_BULK) + { + ep->data_pid_start = 0U; + } + + __HAL_LOCK(hpcd); + (void)USB_ActivateEndpoint(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + + return ret; +} + +/** + * @brief Deactivate an endpoint. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if ((ep_addr & 0x80U) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + (void)USB_DeactivateEndpoint(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} + + +/** + * @brief Receive an amount of data. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param pBuf pointer to the reception buffer + * @param len amount of data to be received + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len) +{ + PCD_EPTypeDef *ep; + + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + + /*setup and start the Xfer */ + ep->xfer_buff = pBuf; + ep->xfer_len = len; + ep->xfer_count = 0U; + ep->is_in = 0U; + ep->num = ep_addr & EP_ADDR_MSK; + + if (hpcd->Init.dma_enable == 1U) + { + ep->dma_addr = (uint32_t)pBuf; + } + + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + else + { + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + + return HAL_OK; +} + +/** + * @brief Get Received Data Size + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval Data Size + */ +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + return hpcd->OUT_ep[ep_addr & EP_ADDR_MSK].xfer_count; +} +/** + * @brief Send an amount of data + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param pBuf pointer to the transmission buffer + * @param len amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len) +{ + PCD_EPTypeDef *ep; + + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + + /*setup and start the Xfer */ + ep->xfer_buff = pBuf; + ep->xfer_len = len; + ep->xfer_count = 0U; + ep->is_in = 1U; + ep->num = ep_addr & EP_ADDR_MSK; + + if (hpcd->Init.dma_enable == 1U) + { + ep->dma_addr = (uint32_t)pBuf; + } + + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + else + { + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + + return HAL_OK; +} + +/** + * @brief Set a STALL condition over an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_SetStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if (((uint32_t)ep_addr & EP_ADDR_MSK) > hpcd->Init.dev_endpoints) + { + return HAL_ERROR; + } + + if ((0x80U & ep_addr) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr]; + ep->is_in = 0U; + } + + ep->is_stall = 1U; + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + + (void)USB_EPSetStall(hpcd->Instance, ep); + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0_OutStart(hpcd->Instance, (uint8_t)hpcd->Init.dma_enable, (uint8_t *)hpcd->Setup); + } + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Clear a STALL condition over in an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if (((uint32_t)ep_addr & 0x0FU) > hpcd->Init.dev_endpoints) + { + return HAL_ERROR; + } + + if ((0x80U & ep_addr) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + + ep->is_stall = 0U; + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + (void)USB_EPClearStall(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Flush an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Flush(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + __HAL_LOCK(hpcd); + + if ((ep_addr & 0x80U) == 0x80U) + { + (void)USB_FlushTxFifo(hpcd->Instance, (uint32_t)ep_addr & EP_ADDR_MSK); + } + else + { + (void)USB_FlushRxFifo(hpcd->Instance); + } + + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Activate remote wakeup signalling + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_ActivateRemoteWakeup(PCD_HandleTypeDef *hpcd) +{ + return (USB_ActivateRemoteWakeup(hpcd->Instance)); +} + +/** + * @brief De-activate remote wakeup signalling. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd) +{ + return (USB_DeActivateRemoteWakeup(hpcd->Instance)); +} + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group4 Peripheral State functions + * @brief Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the PCD handle state. + * @param hpcd PCD handle + * @retval HAL state + */ +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd) +{ + return hpcd->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup PCD_Private_Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Check FIFO for the next packet to be loaded. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_WriteEmptyTxFifo(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + USB_OTG_EPTypeDef *ep; + uint32_t len; + uint32_t len32b; + uint32_t fifoemptymsk; + + ep = &hpcd->IN_ep[epnum]; + + if (ep->xfer_count > ep->xfer_len) + { + return HAL_ERROR; + } + + len = ep->xfer_len - ep->xfer_count; + + if (len > ep->maxpacket) + { + len = ep->maxpacket; + } + + len32b = (len + 3U) / 4U; + + while (((USBx_INEP(epnum)->DTXFSTS & USB_OTG_DTXFSTS_INEPTFSAV) >= len32b) && + (ep->xfer_count < ep->xfer_len) && (ep->xfer_len != 0U)) + { + /* Write the FIFO */ + len = ep->xfer_len - ep->xfer_count; + + if (len > ep->maxpacket) + { + len = ep->maxpacket; + } + len32b = (len + 3U) / 4U; + + (void)USB_WritePacket(USBx, ep->xfer_buff, (uint8_t)epnum, (uint16_t)len, + (uint8_t)hpcd->Init.dma_enable); + + ep->xfer_buff += len; + ep->xfer_count += len; + } + + if (ep->xfer_len <= ep->xfer_count) + { + fifoemptymsk = (uint32_t)(0x1UL << (epnum & EP_ADDR_MSK)); + USBx_DEVICE->DIEPEMPMSK &= ~fifoemptymsk; + } + + return HAL_OK; +} + + +/** + * @brief process EP OUT transfer complete interrupt. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; + + if (hpcd->Init.dma_enable == 1U) + { + if ((DoepintReg & USB_OTG_DOEPINT_STUP) == USB_OTG_DOEPINT_STUP) /* Class C */ + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + } + else if ((DoepintReg & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) /* Class E */ + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + else if ((DoepintReg & (USB_OTG_DOEPINT_STUP | USB_OTG_DOEPINT_OTEPSPR)) == 0U) + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + else + { + /* out data packet received over EP0 */ + hpcd->OUT_ep[epnum].xfer_count = + hpcd->OUT_ep[epnum].maxpacket - + (USBx_OUTEP(epnum)->DOEPTSIZ & USB_OTG_DOEPTSIZ_XFRSIZ); + + hpcd->OUT_ep[epnum].xfer_buff += hpcd->OUT_ep[epnum].maxpacket; + + if ((epnum == 0U) && (hpcd->OUT_ep[epnum].xfer_len == 0U)) + { + /* this is ZLP, so prepare EP0 for next setup */ + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + else + { + /* ... */ + } + } + else + { + if (gSNPSiD == USB_OTG_CORE_ID_310A) + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + else + { + if ((DoepintReg & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + else + { + if ((epnum == 0U) && (hpcd->OUT_ep[epnum].xfer_len == 0U)) + { + /* this is ZLP, so prepare EP0 for next setup */ + (void)USB_EP0_OutStart(hpcd->Instance, 0U, (uint8_t *)hpcd->Setup); + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + + return HAL_OK; +} + + +/** + * @brief process EP OUT setup packet received interrupt. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; + + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + + /* Inform the upper layer that a setup packet is available */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SetupStageCallback(hpcd); +#else + HAL_PCD_SetupStageCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && (hpcd->Init.dma_enable == 1U)) + { + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } + + return HAL_OK; +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* HAL_PCD_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c new file mode 100644 index 000000000..168d96395 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c @@ -0,0 +1,207 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd_ex.c + * @author MCD Application Team + * @brief PCD Extended HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Extended features functions + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PCDEx PCDEx + * @brief PCD Extended HAL module driver + * @{ + */ + +#ifdef HAL_PCD_MODULE_ENABLED + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup PCDEx_Exported_Functions PCDEx Exported Functions + * @{ + */ + +/** @defgroup PCDEx_Exported_Functions_Group1 Peripheral Control functions + * @brief PCDEx control functions + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Update FIFO configuration + +@endverbatim + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Set Tx FIFO + * @param hpcd PCD handle + * @param fifo The number of Tx fifo + * @param size Fifo size + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_SetTxFiFo(PCD_HandleTypeDef *hpcd, uint8_t fifo, uint16_t size) +{ + uint8_t i; + uint32_t Tx_Offset; + + /* TXn min size = 16 words. (n : Transmit FIFO index) + When a TxFIFO is not used, the Configuration should be as follows: + case 1 : n > m and Txn is not used (n,m : Transmit FIFO indexes) + --> Txm can use the space allocated for Txn. + case2 : n < m and Txn is not used (n,m : Transmit FIFO indexes) + --> Txn should be configured with the minimum space of 16 words + The FIFO is used optimally when used TxFIFOs are allocated in the top + of the FIFO.Ex: use EP1 and EP2 as IN instead of EP1 and EP3 as IN ones. + When DMA is used 3n * FIFO locations should be reserved for internal DMA registers */ + + Tx_Offset = hpcd->Instance->GRXFSIZ; + + if (fifo == 0U) + { + hpcd->Instance->DIEPTXF0_HNPTXFSIZ = ((uint32_t)size << 16) | Tx_Offset; + } + else + { + Tx_Offset += (hpcd->Instance->DIEPTXF0_HNPTXFSIZ) >> 16; + for (i = 0U; i < (fifo - 1U); i++) + { + Tx_Offset += (hpcd->Instance->DIEPTXF[i] >> 16); + } + + /* Multiply Tx_Size by 2 to get higher performance */ + hpcd->Instance->DIEPTXF[fifo - 1U] = ((uint32_t)size << 16) | Tx_Offset; + } + + return HAL_OK; +} + +/** + * @brief Set Rx FIFO + * @param hpcd PCD handle + * @param size Size of Rx fifo + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size) +{ + hpcd->Instance->GRXFSIZ = size; + + return HAL_OK; +} + +/** + * @brief Activate LPM feature. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_ActivateLPM(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + + hpcd->lpm_active = 1U; + hpcd->LPM_State = LPM_L0; + USBx->GINTMSK |= USB_OTG_GINTMSK_LPMINTM; + USBx->GLPMCFG |= (USB_OTG_GLPMCFG_LPMEN | USB_OTG_GLPMCFG_LPMACK | USB_OTG_GLPMCFG_ENBESL); + + return HAL_OK; +} + +/** + * @brief Deactivate LPM feature. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + + hpcd->lpm_active = 0U; + USBx->GINTMSK &= ~USB_OTG_GINTMSK_LPMINTM; + USBx->GLPMCFG &= ~(USB_OTG_GLPMCFG_LPMEN | USB_OTG_GLPMCFG_LPMACK | USB_OTG_GLPMCFG_ENBESL); + + return HAL_OK; +} + +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @brief Send LPM message to user layer callback. + * @param hpcd PCD handle + * @param msg LPM message + * @retval HAL status + */ +__weak void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(msg); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCDEx_LPM_Callback could be implemented in the user file + */ +} + +/** + * @brief Send BatteryCharging message to user layer callback. + * @param hpcd PCD handle + * @param msg LPM message + * @retval HAL status + */ +__weak void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(msg); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCDEx_BCD_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* HAL_PCD_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c new file mode 100644 index 000000000..4ed81b516 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c @@ -0,0 +1,599 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr.c + * @author MCD Application Team + * @brief PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Power Controller (PWR) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PWR PWR + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWR_Private_Constants + * @{ + */ + +/** @defgroup PWR_PVD_Mode_Mask PWR PVD Mode Mask + * @{ + */ +#define PVD_MODE_IT ((uint32_t)0x00010000U) +#define PVD_MODE_EVT ((uint32_t)0x00020000U) +#define PVD_RISING_EDGE ((uint32_t)0x00000001U) +#define PVD_FALLING_EDGE ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup PWR_ENABLE_WUP_Mask PWR Enable WUP Mask + * @{ + */ +#define PWR_EWUP_MASK ((uint32_t)0x00003F00) +/** + * @} + */ + +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @defgroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + After reset, the backup domain (RTC registers, RTC backup data + registers and backup SRAM) is protected against possible unwanted + write accesses. + To enable access to the RTC Domain and RTC registers, proceed as follows: + (+) Enable the Power Controller (PWR) APB1 interface clock using the + __HAL_RCC_PWR_CLK_ENABLE() macro. + (+) Enable access to RTC domain using the HAL_PWR_EnableBkUpAccess() function. + +@endverbatim + * @{ + */ + +/** + * @brief Deinitializes the HAL PWR peripheral registers to their default reset values. + * @retval None + */ +void HAL_PWR_DeInit(void) +{ + __HAL_RCC_PWR_FORCE_RESET(); + __HAL_RCC_PWR_RELEASE_RESET(); +} + +/** + * @brief Enables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @retval None + */ +void HAL_PWR_EnableBkUpAccess(void) +{ + /* Enable access to RTC and backup registers */ + SET_BIT(PWR->CR1, PWR_CR1_DBP); +} + +/** + * @brief Disables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @retval None + */ +void HAL_PWR_DisableBkUpAccess(void) +{ + /* Disable access to RTC and backup registers */ + CLEAR_BIT(PWR->CR1, PWR_CR1_DBP); +} + +/** + * @} + */ + +/** @defgroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @brief Low Power modes configuration functions + * +@verbatim + + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + + *** PVD configuration *** + ========================= + [..] + (+) The PVD is used to monitor the VDD power supply by comparing it to a + threshold selected by the PVD Level (PLS[2:0] bits in the PWR_CR). + (+) A PVDO flag is available to indicate if VDD/VDDA is higher or lower + than the PVD threshold. This event is internally connected to the EXTI + line16 and can generate an interrupt if enabled. This is done through + __HAL_PWR_PVD_EXTI_ENABLE_IT() macro. + (+) The PVD is stopped in Standby mode. + + *** Wake-up pin configuration *** + ================================ + [..] + (+) Wake-up pin is used to wake up the system from Standby mode. This pin is + forced in input pull-down configuration and is active on rising edges. + (+) There are up to 6 Wake-up pin in the STM32F7 devices family + + *** Low Power modes configuration *** + ===================================== + [..] + The devices feature 3 low-power modes: + (+) Sleep mode: Cortex-M7 core stopped, peripherals kept running. + (+) Stop mode: all clocks are stopped, regulator running, regulator + in low power mode + (+) Standby mode: 1.2V domain powered off. + + *** Sleep mode *** + ================== + [..] + (+) Entry: + The Sleep mode is entered by using the HAL_PWR_EnterSLEEPMode(PWR_MAINREGULATOR_ON, PWR_SLEEPENTRY_WFI) + functions with + (++) PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction + (++) PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + + -@@- The Regulator parameter is not used for the STM32F7 family + and is kept as parameter just to maintain compatibility with the + lower power families (STM32L). + (+) Exit: + Any peripheral interrupt acknowledged by the nested vectored interrupt + controller (NVIC) can wake up the device from Sleep mode. + + *** Stop mode *** + ================= + [..] + In Stop mode, all clocks in the 1.2V domain are stopped, the PLL, the HSI, + and the HSE RC oscillators are disabled. Internal SRAM and register contents + are preserved. + The voltage regulator can be configured either in normal or low-power mode. + To minimize the consumption In Stop mode, FLASH can be powered off before + entering the Stop mode using the HAL_PWREx_EnableFlashPowerDown() function. + It can be switched on again by software after exiting the Stop mode using + the HAL_PWREx_DisableFlashPowerDown() function. + + (+) Entry: + The Stop mode is entered using the HAL_PWR_EnterSTOPMode(PWR_MAINREGULATOR_ON) + function with: + (++) Main regulator ON. + (++) Low Power regulator ON. + (+) Exit: + Any EXTI Line (Internal or External) configured in Interrupt/Event mode. + + *** Standby mode *** + ==================== + [..] + (+) + The Standby mode allows to achieve the lowest power consumption. It is based + on the Cortex-M7 deep sleep mode, with the voltage regulator disabled. + The 1.2V domain is consequently powered off. The PLL, the HSI oscillator and + the HSE oscillator are also switched off. SRAM and register contents are lost + except for the RTC registers, RTC backup registers, backup SRAM and Standby + circuitry. + + The voltage regulator is OFF. + + (++) Entry: + (+++) The Standby mode is entered using the HAL_PWR_EnterSTANDBYMode() function. + (++) Exit: + (+++) WKUP pin rising or falling edge, RTC alarm (Alarm A and Alarm B), RTC + wakeup, tamper event, time stamp event, external reset in NRST pin, IWDG reset. + + *** Auto-wakeup (AWU) from low-power mode *** + ============================================= + [..] + + (+) The MCU can be woken up from low-power mode by an RTC Alarm event, an RTC + Wakeup event, a tamper event or a time-stamp event, without depending on + an external interrupt (Auto-wakeup mode). + + (+) RTC auto-wakeup (AWU) from the Stop and Standby modes + + (++) To wake up from the Stop mode with an RTC alarm event, it is necessary to + configure the RTC to generate the RTC alarm using the HAL_RTC_SetAlarm_IT() function. + + (++) To wake up from the Stop mode with an RTC Tamper or time stamp event, it + is necessary to configure the RTC to detect the tamper or time stamp event using the + HAL_RTCEx_SetTimeStamp_IT() or HAL_RTCEx_SetTamper_IT() functions. + + (++) To wake up from the Stop mode with an RTC WakeUp event, it is necessary to + configure the RTC to generate the RTC WakeUp event using the HAL_RTCEx_SetWakeUpTimer_IT() function. + +@endverbatim + * @{ + */ + +/** + * @brief Configures the voltage threshold detected by the Power Voltage Detector(PVD). + * @param sConfigPVD pointer to an PWR_PVDTypeDef structure that contains the configuration + * information for the PVD. + * @note Refer to the electrical characteristics of your device datasheet for + * more details about the voltage threshold corresponding to each + * detection level. + * @retval None + */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD) +{ + /* Check the parameters */ + assert_param(IS_PWR_PVD_LEVEL(sConfigPVD->PVDLevel)); + assert_param(IS_PWR_PVD_MODE(sConfigPVD->Mode)); + + /* Set PLS[7:5] bits according to PVDLevel value */ + MODIFY_REG(PWR->CR1, PWR_CR1_PLS, sConfigPVD->PVDLevel); + + /* Clear any previous config. Keep it clear if no event or IT mode is selected */ + __HAL_PWR_PVD_EXTI_DISABLE_EVENT(); + __HAL_PWR_PVD_EXTI_DISABLE_IT(); + __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE(); + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); + + /* Configure interrupt mode */ + if((sConfigPVD->Mode & PVD_MODE_IT) == PVD_MODE_IT) + { + __HAL_PWR_PVD_EXTI_ENABLE_IT(); + } + + /* Configure event mode */ + if((sConfigPVD->Mode & PVD_MODE_EVT) == PVD_MODE_EVT) + { + __HAL_PWR_PVD_EXTI_ENABLE_EVENT(); + } + + /* Configure the edge */ + if((sConfigPVD->Mode & PVD_RISING_EDGE) == PVD_RISING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE(); + } + + if((sConfigPVD->Mode & PVD_FALLING_EDGE) == PVD_FALLING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE(); + } +} + +/** + * @brief Enables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_EnablePVD(void) +{ + /* Enable the power voltage detector */ + SET_BIT(PWR->CR1, PWR_CR1_PVDE); +} + +/** + * @brief Disables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_DisablePVD(void) +{ + /* Disable the power voltage detector */ + CLEAR_BIT(PWR->CR1, PWR_CR1_PVDE); +} + +/** + * @brief Enable the WakeUp PINx functionality. + * @param WakeUpPinPolarity Specifies which Wake-Up pin to enable. + * This parameter can be one of the following legacy values, which sets the default polarity: + * detection on high level (rising edge): + * @arg PWR_WAKEUP_PIN1, PWR_WAKEUP_PIN2, PWR_WAKEUP_PIN3, PWR_WAKEUP_PIN4, PWR_WAKEUP_PIN5, PWR_WAKEUP_PIN6 + * or one of the following value where the user can explicitly states the enabled pin and + * the chosen polarity + * @arg PWR_WAKEUP_PIN1_HIGH or PWR_WAKEUP_PIN1_LOW + * @arg PWR_WAKEUP_PIN2_HIGH or PWR_WAKEUP_PIN2_LOW + * @arg PWR_WAKEUP_PIN3_HIGH or PWR_WAKEUP_PIN3_LOW + * @arg PWR_WAKEUP_PIN4_HIGH or PWR_WAKEUP_PIN4_LOW + * @arg PWR_WAKEUP_PIN5_HIGH or PWR_WAKEUP_PIN5_LOW + * @arg PWR_WAKEUP_PIN6_HIGH or PWR_WAKEUP_PIN6_LOW + * @note PWR_WAKEUP_PINx and PWR_WAKEUP_PINx_HIGH are equivalent. + * @retval None + */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinPolarity) +{ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinPolarity)); + + /* Enable wake-up pin */ + SET_BIT(PWR->CSR2, (PWR_EWUP_MASK & WakeUpPinPolarity)); + + /* Specifies the Wake-Up pin polarity for the event detection + (rising or falling edge) */ + MODIFY_REG(PWR->CR2, (PWR_EWUP_MASK & WakeUpPinPolarity), (WakeUpPinPolarity >> 0x06)); +} + +/** + * @brief Disables the WakeUp PINx functionality. + * @param WakeUpPinx Specifies the Power Wake-Up pin to disable. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN1 + * @arg PWR_WAKEUP_PIN2 + * @arg PWR_WAKEUP_PIN3 + * @arg PWR_WAKEUP_PIN4 + * @arg PWR_WAKEUP_PIN5 + * @arg PWR_WAKEUP_PIN6 + * @retval None + */ +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx) +{ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinx)); + + CLEAR_BIT(PWR->CSR2, WakeUpPinx); +} + +/** + * @brief Enters Sleep mode. + * + * @note In Sleep mode, all I/O pins keep the same state as in Run mode. + * + * @note In Sleep mode, the systick is stopped to avoid exit from this mode with + * systick interrupt when used as time base for Timeout + * + * @param Regulator Specifies the regulator state in SLEEP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: SLEEP mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: SLEEP mode with low power regulator ON + * @note This parameter is not used for the STM32F7 family and is kept as parameter + * just to maintain compatibility with the lower power families. + * @param SLEEPEntry Specifies if SLEEP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + * @retval None + */ +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) +{ + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_SLEEP_ENTRY(SLEEPEntry)); + + /* Clear SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); + + /* Ensure that all instructions done before entering SLEEP mode */ + __DSB(); + __ISB(); + + /* Select SLEEP mode entry -------------------------------------------------*/ + if(SLEEPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __SEV(); + __WFE(); + __WFE(); + } +} + +/** + * @brief Enters Stop mode. + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * @note When exiting Stop mode by issuing an interrupt or a wakeup event, + * the HSI RC oscillator is selected as system clock. + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * @param Regulator Specifies the regulator state in Stop mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: Stop mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: Stop mode with low power regulator ON + * @param STOPEntry Specifies if Stop mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_STOPENTRY_WFI: Enter Stop mode with WFI instruction + * @arg PWR_STOPENTRY_WFE: Enter Stop mode with WFE instruction + * @retval None + */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Select the regulator state in Stop mode ---------------------------------*/ + tmpreg = PWR->CR1; + /* Clear PDDS and LPDS bits */ + tmpreg &= (uint32_t)~(PWR_CR1_PDDS | PWR_CR1_LPDS); + + /* Set LPDS, MRLVDS and LPLVDS bits according to Regulator value */ + tmpreg |= Regulator; + + /* Store the new value */ + PWR->CR1 = tmpreg; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* Ensure that all instructions done before entering STOP mode */ + __DSB(); + __ISB(); + + /* Select Stop mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_STOPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __SEV(); + __WFE(); + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR &= (uint32_t)~((uint32_t)SCB_SCR_SLEEPDEEP_Msk); +} + +/** + * @brief Enters Standby mode. + * @note In Standby mode, all I/O pins are high impedance except for: + * - Reset pad (still available) + * - RTC_AF1 pin (PC13) if configured for tamper, time-stamp, RTC + * Alarm out, or RTC clock calibration out. + * - RTC_AF2 pin (PI8) if configured for tamper or time-stamp. + * - WKUP pins if enabled. + * @retval None + */ +void HAL_PWR_EnterSTANDBYMode(void) +{ + /* Select Standby mode */ + PWR->CR1 |= PWR_CR1_PDDS; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* This option is used to ensure that store operations are completed */ +#if defined ( __CC_ARM) + __force_stores(); +#endif + /* Request Wait For Interrupt */ + __WFI(); +} + +/** + * @brief This function handles the PWR PVD interrupt request. + * @note This API should be called under the PVD_IRQHandler(). + * @retval None + */ +void HAL_PWR_PVD_IRQHandler(void) +{ + /* Check PWR Exti flag */ + if(__HAL_PWR_PVD_EXTI_GET_FLAG() != RESET) + { + /* PWR PVD interrupt user callback */ + HAL_PWR_PVDCallback(); + + /* Clear PWR Exti pending bit */ + __HAL_PWR_PVD_EXTI_CLEAR_FLAG(); + } +} + +/** + * @brief PWR PVD interrupt callback + * @retval None + */ +__weak void HAL_PWR_PVDCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_PWR_PVDCallback could be implemented in the user file + */ +} + +/** + * @brief Indicates Sleep-On-Exit when returning from Handler mode to Thread mode. + * @note Set SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * Setting this bit is useful when the processor is expected to run only on + * interruptions handling. + * @retval None + */ +void HAL_PWR_EnableSleepOnExit(void) +{ + /* Set SLEEPONEXIT bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Disables Sleep-On-Exit feature when returning from Handler mode to Thread mode. + * @note Clears SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * @retval None + */ +void HAL_PWR_DisableSleepOnExit(void) +{ + /* Clear SLEEPONEXIT bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Enables CORTEX M4 SEVONPEND bit. + * @note Sets SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_EnableSEVOnPend(void) +{ + /* Set SEVONPEND bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Disables CORTEX M4 SEVONPEND bit. + * @note Clears SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_DisableSEVOnPend(void) +{ + /* Clear SEVONPEND bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c new file mode 100644 index 000000000..af8f870f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c @@ -0,0 +1,554 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr_ex.c + * @author MCD Application Team + * @brief Extended PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of PWR extension peripheral: + * + Peripheral Extended features functions + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PWREx PWREx + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWREx_Private_Constants + * @{ + */ +#define PWR_OVERDRIVE_TIMEOUT_VALUE 1000 +#define PWR_UDERDRIVE_TIMEOUT_VALUE 1000 +#define PWR_BKPREG_TIMEOUT_VALUE 1000 +#define PWR_VOSRDY_TIMEOUT_VALUE 1000 +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @defgroup PWREx_Exported_Functions_Group1 Peripheral Extended features functions + * @brief Peripheral Extended features functions + * +@verbatim + + =============================================================================== + ##### Peripheral extended features functions ##### + =============================================================================== + + *** Main and Backup Regulators configuration *** + ================================================ + [..] + (+) The backup domain includes 4 Kbytes of backup SRAM accessible only from + the CPU, and address in 32-bit, 16-bit or 8-bit mode. Its content is + retained even in Standby or VBAT mode when the low power backup regulator + is enabled. It can be considered as an internal EEPROM when VBAT is + always present. You can use the HAL_PWREx_EnableBkUpReg() function to + enable the low power backup regulator. + + (+) When the backup domain is supplied by VDD (analog switch connected to VDD) + the backup SRAM is powered from VDD which replaces the VBAT power supply to + save battery life. + + (+) The backup SRAM is not mass erased by a tamper event. It is read + protected to prevent confidential data, such as cryptographic private + key, from being accessed. The backup SRAM can be erased only through + the Flash interface when a protection level change from level 1 to + level 0 is requested. + -@- Refer to the description of Read protection (RDP) in the Flash + programming manual. + + (+) The main internal regulator can be configured to have a tradeoff between + performance and power consumption when the device does not operate at + the maximum frequency. This is done through __HAL_PWR_MAINREGULATORMODE_CONFIG() + macro which configure VOS bit in PWR_CR register + + Refer to the product datasheets for more details. + + *** FLASH Power Down configuration **** + ======================================= + [..] + (+) By setting the FPDS bit in the PWR_CR register by using the + HAL_PWREx_EnableFlashPowerDown() function, the Flash memory also enters power + down mode when the device enters Stop mode. When the Flash memory + is in power down mode, an additional startup delay is incurred when + waking up from Stop mode. + + *** Over-Drive and Under-Drive configuration **** + ================================================= + [..] + (+) In Run mode: the main regulator has 2 operating modes available: + (++) Normal mode: The CPU and core logic operate at maximum frequency at a given + voltage scaling (scale 1, scale 2 or scale 3) + (++) Over-drive mode: This mode allows the CPU and the core logic to operate at a + higher frequency than the normal mode for a given voltage scaling (scale 1, + scale 2 or scale 3). This mode is enabled through HAL_PWREx_EnableOverDrive() function and + disabled by HAL_PWREx_DisableOverDrive() function, to enter or exit from Over-drive mode please follow + the sequence described in Reference manual. + + (+) In Stop mode: the main regulator or low power regulator supplies a low power + voltage to the 1.2V domain, thus preserving the content of registers + and internal SRAM. 2 operating modes are available: + (++) Normal mode: the 1.2V domain is preserved in nominal leakage mode. This mode is only + available when the main regulator or the low power regulator is used in Scale 3 or + low voltage mode. + (++) Under-drive mode: the 1.2V domain is preserved in reduced leakage mode. This mode is only + available when the main regulator or the low power regulator is in low voltage mode. + +@endverbatim + * @{ + */ + +/** + * @brief Enables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void) +{ + uint32_t tickstart = 0; + + /* Enable Backup regulator */ + PWR->CSR1 |= PWR_CSR1_BRE; + + /* Workaround for the following hardware bug: */ + /* Id 19: PWR : No STANDBY wake-up when Back-up RAM enabled (ref. Errata Sheet p23) */ + PWR->CSR1 |= PWR_CSR1_EIWUP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) == RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Disables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void) +{ + uint32_t tickstart = 0; + + /* Disable Backup regulator */ + PWR->CSR1 &= (uint32_t)~((uint32_t)PWR_CSR1_BRE); + + /* Workaround for the following hardware bug: */ + /* Id 19: PWR : No STANDBY wake-up when Back-up RAM enabled (ref. Errata Sheet p23) */ + PWR->CSR1 |= PWR_CSR1_EIWUP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) != RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Enables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_EnableFlashPowerDown(void) +{ + /* Enable the Flash Power Down */ + PWR->CR1 |= PWR_CR1_FPDS; +} + +/** + * @brief Disables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_DisableFlashPowerDown(void) +{ + /* Disable the Flash Power Down */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_FPDS); +} + +/** + * @brief Enables Main Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_EnableMainRegulatorLowVoltage(void) +{ + /* Enable Main regulator low voltage */ + PWR->CR1 |= PWR_CR1_MRUDS; +} + +/** + * @brief Disables Main Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_DisableMainRegulatorLowVoltage(void) +{ + /* Disable Main regulator low voltage */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_MRUDS); +} + +/** + * @brief Enables Low Power Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_EnableLowRegulatorLowVoltage(void) +{ + /* Enable low power regulator */ + PWR->CR1 |= PWR_CR1_LPUDS; +} + +/** + * @brief Disables Low Power Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_DisableLowRegulatorLowVoltage(void) +{ + /* Disable low power regulator */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_LPUDS); +} + +/** + * @brief Activates the Over-Drive mode. + * @note This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void) +{ + uint32_t tickstart = 0; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable the Over-drive to extend the clock frequency to 216 MHz */ + __HAL_PWR_OVERDRIVE_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Enable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Deactivates the Over-Drive mode. + * @note This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void) +{ + uint32_t tickstart = 0; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Disable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Disable the Over-drive */ + __HAL_PWR_OVERDRIVE_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Enters in Under-Drive STOP mode. + * + * @note This mode can be selected only when the Under-Drive is already active + * + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode + * + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + * + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * + * @note When exiting Stop mode by issuing an interrupt or a wakeup event, + * the HSI RC oscillator is selected as system clock. + * + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * + * @param Regulator specifies the regulator state in STOP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_UNDERDRIVE_ON: Main Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @arg PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON: Low Power Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @param STOPEntry specifies if STOP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI: enter STOP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE: enter STOP mode with WFE instruction + * @retval None + */ +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + uint32_t tempreg = 0; + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR_UNDERDRIVE(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Enable the Under-drive Mode ---------------------------------------------*/ + /* Clear Under-drive flag */ + __HAL_PWR_CLEAR_ODRUDR_FLAG(); + + /* Enable the Under-drive */ + __HAL_PWR_UNDERDRIVE_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for UnderDrive mode is ready */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_UDRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_UDERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Select the regulator state in STOP mode ---------------------------------*/ + tempreg = PWR->CR1; + /* Clear PDDS, LPDS, MRLUDS and LPLUDS bits */ + tempreg &= (uint32_t)~(PWR_CR1_PDDS | PWR_CR1_LPDS | PWR_CR1_LPUDS | PWR_CR1_MRUDS); + + /* Set LPDS, MRLUDS and LPLUDS bits according to PWR_Regulator value */ + tempreg |= Regulator; + + /* Store the new value */ + PWR->CR1 = tempreg; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* Select STOP mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR &= (uint32_t)~((uint32_t)SCB_SCR_SLEEPDEEP_Msk); + + return HAL_OK; +} + +/** + * @brief Returns Voltage Scaling Range. + * @retval VOS bit field (PWR_REGULATOR_VOLTAGE_SCALE1, PWR_REGULATOR_VOLTAGE_SCALE2 or + * PWR_REGULATOR_VOLTAGE_SCALE3)PWR_REGULATOR_VOLTAGE_SCALE1 + */ +uint32_t HAL_PWREx_GetVoltageRange(void) +{ + return (PWR->CR1 & PWR_CR1_VOS); +} + +/** + * @brief Configures the main internal regulator output voltage. + * @param VoltageScaling specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption. + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output range 1 mode, + * typical output voltage at 1.4 V, + * system frequency up to 216 MHz. + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output range 2 mode, + * typical output voltage at 1.2 V, + * system frequency up to 180 MHz. + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output range 2 mode, + * typical output voltage at 1.00 V, + * system frequency up to 151 MHz. + * @note To update the system clock frequency(SYSCLK): + * - Set the HSI or HSE as system clock frequency using the HAL_RCC_ClockConfig(). + * - Call the HAL_RCC_OscConfig() to configure the PLL. + * - Call HAL_PWREx_ConfigVoltageScaling() API to adjust the voltage scale. + * - Set the new system clock frequency using the HAL_RCC_ClockConfig(). + * @note The scale can be modified only when the HSI or HSE clock source is selected + * as system clock source, otherwise the API returns HAL_ERROR. + * @note When the PLL is OFF, the voltage scale 3 is automatically selected and the VOS bits + * value in the PWR_CR1 register are not taken in account. + * @note This API forces the PLL state ON to allow the possibility to configure the voltage scale 1 or 2. + * @note The new voltage scale is active only when the PLL is ON. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling) +{ + uint32_t tickstart = 0; + + assert_param(IS_PWR_REGULATOR_VOLTAGE(VoltageScaling)); + + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Check if the PLL is used as system clock or not */ + if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + { + /* Disable the main PLL */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set Range */ + __HAL_PWR_VOLTAGESCALING_CONFIG(VoltageScaling); + + /* Enable the main PLL */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + while((__HAL_PWR_GET_FLAG(PWR_FLAG_VOSRDY) == RESET)) + { + if((HAL_GetTick() - tickstart ) > PWR_VOSRDY_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + return HAL_ERROR; + } + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c new file mode 100644 index 000000000..d7132c045 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c @@ -0,0 +1,1242 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc.c + * @author MCD Application Team + * @brief RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Reset and Clock Control (RCC) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### RCC specific features ##### + ============================================================================== + [..] + After reset the device is running from Internal High Speed oscillator + (HSI 16MHz) with Flash 0 wait state, Flash prefetch buffer, D-Cache + and I-Cache are disabled, and all peripherals are off except internal + SRAM, Flash and JTAG. + (+) There is no prescaler on High speed (AHB) and Low speed (APB) busses; + all peripherals mapped on these busses are running at HSI speed. + (+) The clock for all peripherals is switched off, except the SRAM and FLASH. + (+) All GPIOs are in input floating state, except the JTAG pins which + are assigned to be used for debug purpose. + + [..] + Once the device started from reset, the user application has to: + (+) Configure the clock source to be used to drive the System clock + (if the application needs higher frequency/performance) + (+) Configure the System clock frequency and Flash settings + (+) Configure the AHB and APB busses prescalers + (+) Enable the clock for the peripheral(s) to be used + (+) Configure the clock source(s) for peripherals which clocks are not + derived from the System clock (I2S, RTC, ADC, USB OTG FS/SDIO/RNG) + + ##### RCC Limitations ##### + ============================================================================== + [..] + A delay between an RCC peripheral clock enable and the effective peripheral + enabling should be taken into account in order to manage the peripheral read/write + from/to registers. + (+) This delay depends on the peripheral mapping. + (+) If peripheral is mapped on AHB: the delay is 2 AHB clock cycle + after the clock enable bit is set on the hardware register + (+) If peripheral is mapped on APB: the delay is 2 APB clock cycle + after the clock enable bit is set on the hardware register + + [..] + Implemented Workaround: + (+) For AHB & APB peripherals, a dummy read to the peripheral register has been + inserted in each __HAL_RCC_PPP_CLK_ENABLE() macro. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RCC RCC + * @brief RCC HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/** @defgroup RCC_Private_Macros RCC Private Macros + * @{ + */ + +#define MCO1_CLK_ENABLE() __HAL_RCC_GPIOA_CLK_ENABLE() +#define MCO1_GPIO_PORT GPIOA +#define MCO1_PIN GPIO_PIN_8 + +#define MCO2_CLK_ENABLE() __HAL_RCC_GPIOC_CLK_ENABLE() +#define MCO2_GPIO_PORT GPIOC +#define MCO2_PIN GPIO_PIN_9 + +/** + * @} + */ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Variables RCC Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions ---------------------------------------------------------*/ + +/** @defgroup RCC_Exported_Functions RCC Exported Functions + * @{ + */ + +/** @defgroup RCC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + =============================================================================== +##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to configure the internal/external oscillators + (HSE, HSI, LSE, LSI, PLL, CSS and MCO) and the System buses clocks (SYSCLK, AHB, APB1 + and APB2). + + [..] Internal/external clock and PLL configuration + (#) HSI (high-speed internal), 16 MHz factory-trimmed RC used directly or through + the PLL as System clock source. + + (#) LSI (low-speed internal), 32 KHz low consumption RC used as IWDG and/or RTC + clock source. + + (#) HSE (high-speed external), 4 to 26 MHz crystal oscillator used directly or + through the PLL as System clock source. Can be used also as RTC clock source. + + (#) LSE (low-speed external), 32 KHz oscillator used as RTC clock source. + + (#) PLL (clocked by HSI or HSE), featuring two different output clocks: + (++) The first output is used to generate the high speed system clock (up to 216 MHz) + (++) The second output is used to generate the clock for the USB OTG FS (48 MHz), + the random analog generator (<=48 MHz) and the SDIO (<= 48 MHz). + + (#) CSS (Clock security system), once enable using the function HAL_RCC_EnableCSS() + and if a HSE clock failure occurs(HSE used directly or through PLL as System + clock source), the System clock is automatically switched to HSI and an interrupt + is generated if enabled. The interrupt is linked to the Cortex-M7 NMI + (Non-Maskable Interrupt) exception vector. + + (#) MCO1 (microcontroller clock output), used to output HSI, LSE, HSE or PLL + clock (through a configurable prescaler) on PA8 pin. + + (#) MCO2 (microcontroller clock output), used to output HSE, PLL, SYSCLK or PLLI2S + clock (through a configurable prescaler) on PC9 pin. + + [..] System, AHB and APB busses clocks configuration + (#) Several clock sources can be used to drive the System clock (SYSCLK): HSI, + HSE and PLL. + The AHB clock (HCLK) is derived from System clock through configurable + prescaler and used to clock the CPU, memory and peripherals mapped + on AHB bus (DMA, GPIO...). APB1 (PCLK1) and APB2 (PCLK2) clocks are derived + from AHB clock through configurable prescalers and used to clock + the peripherals mapped on these busses. You can use + "HAL_RCC_GetSysClockFreq()" function to retrieve the frequencies of these clocks. + + -@- All the peripheral clocks are derived from the System clock (SYSCLK) except: + (+@) I2S: the I2S clock can be derived either from a specific PLL (PLLI2S) or + from an external clock mapped on the I2S_CKIN pin. + You have to use __HAL_RCC_PLLI2S_CONFIG() macro to configure this clock. + (+@) SAI: the SAI clock can be derived either from a specific PLL (PLLI2S) or (PLLSAI) or + from an external clock mapped on the I2S_CKIN pin. + You have to use __HAL_RCC_PLLI2S_CONFIG() macro to configure this clock. + (+@) RTC: the RTC clock can be derived either from the LSI, LSE or HSE clock + divided by 2 to 31. You have to use __HAL_RCC_RTC_CONFIG() and __HAL_RCC_RTC_ENABLE() + macros to configure this clock. + (+@) USB OTG FS, SDIO and RTC: USB OTG FS require a frequency equal to 48 MHz + to work correctly, while the SDIO require a frequency equal or lower than + to 48. This clock is derived of the main PLL through PLLQ divider. + (+@) IWDG clock which is always the LSI clock. +@endverbatim + * @{ + */ + +/** + * @brief Resets the RCC clock configuration to the default reset state. + * @note The default reset state of the clock configuration is given below: + * - HSI ON and used as system clock source + * - HSE, PLL, PLLI2S and PLLSAI OFF + * - AHB, APB1 and APB2 prescaler set to 1. + * - CSS, MCO1 and MCO2 OFF + * - All interrupts disabled + * @note This function doesn't modify the configuration of the + * - Peripheral clocks + * - LSI, LSE and RTC clocks + * @retval None + */ +HAL_StatusTypeDef HAL_RCC_DeInit(void) +{ + uint32_t tickstart; + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Set HSION bit to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSION); + + /* Wait till HSI is ready */ + while (READ_BIT(RCC->CR, RCC_CR_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set HSITRIM[4:0] bits to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSITRIM_4); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset CFGR register */ + CLEAR_REG(RCC->CFGR); + + /* Wait till clock switch is ready */ + while (READ_BIT(RCC->CFGR, RCC_CFGR_SWS) != RESET) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear HSEON, HSEBYP and CSSON bits */ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON | RCC_CR_HSEBYP | RCC_CR_CSSON); + + /* Wait till HSE is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear PLLON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLON); + + /* Wait till PLL is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLI2SON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLI2SON); + + /* Wait till PLLI2S is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLSAI bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLSAION); + + /* Wait till PLLSAI is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLSAIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Once PLL, PLLI2S and PLLSAI are OFF, reset PLLCFGR register to default value */ + RCC->PLLCFGR = RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLN_6 | RCC_PLLCFGR_PLLN_7 | RCC_PLLCFGR_PLLQ_2 | 0x20000000U; + + /* Reset PLLI2SCFGR register to default value */ + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SR_1; + + /* Reset PLLSAICFGR register to default value */ + RCC->PLLSAICFGR = RCC_PLLSAICFGR_PLLSAIN_6 | RCC_PLLSAICFGR_PLLSAIN_7 | RCC_PLLSAICFGR_PLLSAIQ_2 | 0x20000000U; + + /* Disable all interrupts */ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSIRDYIE | RCC_CIR_LSERDYIE | RCC_CIR_HSIRDYIE | RCC_CIR_HSERDYIE | RCC_CIR_PLLRDYIE | RCC_CIR_PLLI2SRDYIE | RCC_CIR_PLLSAIRDYIE); + + /* Clear all interrupt flags */ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC | RCC_CIR_LSERDYC | RCC_CIR_HSIRDYC | RCC_CIR_HSERDYC | RCC_CIR_PLLRDYC | RCC_CIR_PLLI2SRDYC | RCC_CIR_PLLSAIRDYC | RCC_CIR_CSSC); + + /* Clear LSION bit */ + CLEAR_BIT(RCC->CSR, RCC_CSR_LSION); + + /* Reset all CSR flags */ + SET_BIT(RCC->CSR, RCC_CSR_RMVF); + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HSI_VALUE; + + /* Adapt Systick interrupt period */ + if (HAL_InitTick(uwTickPrio) != HAL_OK) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +/** + * @brief Initializes the RCC Oscillators according to the specified parameters in the + * RCC_OscInitTypeDef. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC Oscillators. + * @note The PLL is not disabled when used as system clock. + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not + * supported by this function. User should request a transition to LSE Off + * first and then LSE On or LSE Bypass. + * @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this function. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + uint32_t tickstart; + uint32_t pll_config; + FlagStatus pwrclkchanged = RESET; + + /* Check Null pointer */ + if (RCC_OscInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); + + /*------------------------------- HSE Configuration ------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + { + /* Check the parameters */ + assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); + /* When the HSE is used as system clock or clock source for PLL, It can not be disabled */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_HSE) + || ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_PLLCLK) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + { + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + { + return HAL_ERROR; + } + } + else + { + /* Set the new HSE configuration ---------------------------------------*/ + __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); + + /* Check the HSE State */ + if (RCC_OscInitStruct->HSEState != RCC_HSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is bypassed or disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*----------------------------- HSI Configuration --------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + { + /* Check the parameters */ + assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); + assert_param(IS_RCC_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue)); + + /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_HSI) + || ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_PLLCLK) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + { + /* When HSI is used as system clock it will not disabled */ + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + { + return HAL_ERROR; + } + /* Otherwise, just the calibration is allowed */ + else + { + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + } + else + { + /* Check the HSI State */ + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) + { + /* Enable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + else + { + /* Disable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*------------------------------ LSI Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + { + /* Check the parameters */ + assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); + + /* Check the LSI State */ + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) + { + /* Enable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + /*------------------------------ LSE Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + { + /* Check the parameters */ + assert_param(IS_RCC_LSE(RCC_OscInitStruct->LSEState)); + + /* Update LSE configuration in Backup Domain control register */ + /* Requires to enable write access to Backup Domain of necessary */ + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) + { + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + pwrclkchanged = SET; + } + + if (HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP)) + { + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Wait for Backup domain Write protection disable */ + tickstart = HAL_GetTick(); + + while (HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP)) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Set the new LSE configuration -----------------------------------------*/ + __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); + /* Check the LSE State */ + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Restore clock configuration if changed */ + if (pwrclkchanged == SET) + { + __HAL_RCC_PWR_CLK_DISABLE(); + } + } + /*-------------------------------- PLL Configuration -----------------------*/ + /* Check the parameters */ + assert_param(IS_RCC_PLL(RCC_OscInitStruct->PLL.PLLState)); + if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) + { + /* Check if the PLL is used as system clock or not */ + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_SYSCLKSOURCE_STATUS_PLLCLK) + { + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + { + /* Check the parameters */ + assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); + assert_param(IS_RCC_PLLM_VALUE(RCC_OscInitStruct->PLL.PLLM)); + assert_param(IS_RCC_PLLN_VALUE(RCC_OscInitStruct->PLL.PLLN)); + assert_param(IS_RCC_PLLP_VALUE(RCC_OscInitStruct->PLL.PLLP)); + assert_param(IS_RCC_PLLQ_VALUE(RCC_OscInitStruct->PLL.PLLQ)); +#if defined (RCC_PLLCFGR_PLLR) + assert_param(IS_RCC_PLLR_VALUE(RCC_OscInitStruct->PLL.PLLR)); +#endif + + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Configure the main PLL clock source, multiplication and division factors. */ +#if defined (RCC_PLLCFGR_PLLR) + __HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource, + RCC_OscInitStruct->PLL.PLLM, + RCC_OscInitStruct->PLL.PLLN, + RCC_OscInitStruct->PLL.PLLP, + RCC_OscInitStruct->PLL.PLLQ, + RCC_OscInitStruct->PLL.PLLR); +#else + __HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource, + RCC_OscInitStruct->PLL.PLLM, + RCC_OscInitStruct->PLL.PLLN, + RCC_OscInitStruct->PLL.PLLP, + RCC_OscInitStruct->PLL.PLLQ); +#endif + + /* Enable the main PLL. */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + else + { + /* Do not return HAL_ERROR if request repeats the current configuration */ + pll_config = RCC->PLLCFGR; +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != RCC_OscInitStruct->PLL.PLLM) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != ((((RCC_OscInitStruct->PLL.PLLP) >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != RCC_OscInitStruct->PLL.PLLM) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != ((((RCC_OscInitStruct->PLL.PLLP) >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif + { + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief Initializes the CPU, AHB and APB busses clocks according to the specified + * parameters in the RCC_ClkInitStruct. + * @param RCC_ClkInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC peripheral. + * @param FLatency FLASH Latency, this parameter depend on device selected + * + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency + * and updated by HAL_RCC_GetHCLKFreq() function called within this function + * + * @note The HSI is used (enabled by hardware) as system clock source after + * startup from Reset, wake-up from STOP and STANDBY mode, or in case + * of failure of the HSE used directly or indirectly as system clock + * (if the Clock Security System CSS is enabled). + * + * @note A switch from one clock source to another occurs only if the target + * clock source is ready (clock stable after startup delay or PLL locked). + * If a clock source which is not yet ready is selected, the switch will + * occur when the clock source will be ready. + * You can use HAL_RCC_GetClockConfig() function to know which clock is + * currently used as system clock source. + * @note Depending on the device voltage range, the software has to set correctly + * HPRE[3:0] bits to ensure that HCLK not exceed the maximum allowed frequency + * (for more details refer to section above "Initialization/de-initialization functions") + * @retval None + */ +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency) +{ + uint32_t tickstart = 0; + + /* Check Null pointer */ + if (RCC_ClkInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_CLOCKTYPE(RCC_ClkInitStruct->ClockType)); + assert_param(IS_FLASH_LATENCY(FLatency)); + + /* To correctly read data from FLASH memory, the number of wait states (LATENCY) + must be correctly programmed according to the frequency of the CPU clock + (HCLK) and the supply voltage of the device. */ + + /* Increasing the CPU frequency */ + if (FLatency > __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- HCLK Configuration --------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) + { + /* Set the highest APBx dividers in order to ensure that we do not go through + a non-spec phase whatever we decrease or increase HCLK. */ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_HCLK_DIV16); + } + + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, (RCC_HCLK_DIV16 << 3)); + } + + /* Set the new HCLK clock divider */ + assert_param(IS_RCC_HCLK(RCC_ClkInitStruct->AHBCLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_ClkInitStruct->AHBCLKDivider); + } + + /*------------------------- SYSCLK Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK) + { + assert_param(IS_RCC_SYSCLKSOURCE(RCC_ClkInitStruct->SYSCLKSource)); + + /* HSE is selected as System Clock Source */ + if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE) + { + /* Check the HSE ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + return HAL_ERROR; + } + } + /* PLL is selected as System Clock Source */ + else if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK) + { + /* Check the PLL ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + return HAL_ERROR; + } + } + /* HSI is selected as System Clock Source */ + else + { + /* Check the HSI ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + return HAL_ERROR; + } + } + + __HAL_RCC_SYSCLK_CONFIG(RCC_ClkInitStruct->SYSCLKSource); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + while (__HAL_RCC_GET_SYSCLK_SOURCE() != (RCC_ClkInitStruct->SYSCLKSource << RCC_CFGR_SWS_Pos)) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Decreasing the number of wait states because of lower CPU frequency */ + if (FLatency < __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- PCLK1 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB1CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_ClkInitStruct->APB1CLKDivider); + } + + /*-------------------------- PCLK2 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB2CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, ((RCC_ClkInitStruct->APB2CLKDivider) << 3)); + } + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[(RCC->CFGR & RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]; + + /* Configure the source of time base considering new system clocks settings*/ + HAL_InitTick(uwTickPrio); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RCC_Exported_Functions_Group2 Peripheral Control functions + * @brief RCC clocks control functions + * + @verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + +@endverbatim + * @{ + */ + +/** + * @brief Selects the clock source to output on MCO1 pin(PA8) or on MCO2 pin(PC9). + * @note PA8/PC9 should be configured in alternate function mode. + * @param RCC_MCOx specifies the output direction for the clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1: Clock source to output on MCO1 pin(PA8). + * @arg RCC_MCO2: Clock source to output on MCO2 pin(PC9). + * @param RCC_MCOSource specifies the clock source to output. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param RCC_MCODiv specifies the MCOx prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + * @retval None + */ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv) +{ + GPIO_InitTypeDef GPIO_InitStruct; + /* Check the parameters */ + assert_param(IS_RCC_MCO(RCC_MCOx)); + assert_param(IS_RCC_MCODIV(RCC_MCODiv)); + /* RCC_MCO1 */ + if (RCC_MCOx == RCC_MCO1) + { + assert_param(IS_RCC_MCO1SOURCE(RCC_MCOSource)); + + /* MCO1 Clock Enable */ + MCO1_CLK_ENABLE(); + + /* Configure the MCO1 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO1_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO1_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO1 and MCO1PRE[2:0] bits then Select MCO1 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), (RCC_MCOSource | RCC_MCODiv)); + } + else + { + assert_param(IS_RCC_MCO2SOURCE(RCC_MCOSource)); + + /* MCO2 Clock Enable */ + MCO2_CLK_ENABLE(); + + /* Configure the MCO2 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO2_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO2_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO2 and MCO2PRE[2:0] bits then Select MCO2 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), (RCC_MCOSource | (RCC_MCODiv << 3))); + } +} + +/** + * @brief Enables the Clock Security System. + * @note If a failure is detected on the HSE oscillator clock, this oscillator + * is automatically disabled and an interrupt is generated to inform the + * software about the failure (Clock Security System Interrupt, CSSI), + * allowing the MCU to perform rescue operations. The CSSI is linked to + * the Cortex-M7 NMI (Non-Maskable Interrupt) exception vector. + * @retval None + */ +void HAL_RCC_EnableCSS(void) +{ + SET_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Disables the Clock Security System. + * @retval None + */ +void HAL_RCC_DisableCSS(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Returns the SYSCLK frequency + * + * @note The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * @note If SYSCLK source is HSI, function returns values based on HSI_VALUE(*) + * @note If SYSCLK source is HSE, function returns values based on HSE_VALUE(**) + * @note If SYSCLK source is PLL, function returns values based on HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * @note (*) HSI_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * @note (**) HSE_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * @note The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @note This function can be used by the user application to compute the + * baudrate for the communication peripherals or configure other parameters. + * + * @note Each time SYSCLK changes, this function must be called to update the + * right SYSCLK value. Otherwise, any configuration based on this function will be incorrect. + * + * + * @retval SYSCLK frequency + */ +uint32_t HAL_RCC_GetSysClockFreq(void) +{ + uint32_t pllm = 0, pllvco = 0, pllp = 0; + uint32_t sysclockfreq = 0; + + /* Get SYSCLK source -------------------------------------------------------*/ + switch (RCC->CFGR & RCC_CFGR_SWS) + { + case RCC_SYSCLKSOURCE_STATUS_HSI: /* HSI used as system clock source */ + { + sysclockfreq = HSI_VALUE; + break; + } + case RCC_SYSCLKSOURCE_STATUS_HSE: /* HSE used as system clock source */ + { + sysclockfreq = HSE_VALUE; + break; + } + case RCC_SYSCLKSOURCE_STATUS_PLLCLK: /* PLL used as system clock source */ + { + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN + SYSCLK = PLL_VCO / PLLP */ + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLCFGR_PLLSRC_HSI) + { + /* HSE used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1) * 2); + + sysclockfreq = pllvco / pllp; + break; + } + default: + { + sysclockfreq = HSI_VALUE; + break; + } + } + return sysclockfreq; +} + +/** + * @brief Returns the HCLK frequency + * @note Each time HCLK changes, this function must be called to update the + * right HCLK value. Otherwise, any configuration based on this function will be incorrect. + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency. + * @retval HCLK frequency + */ +uint32_t HAL_RCC_GetHCLKFreq(void) +{ + return SystemCoreClock; +} + +/** + * @brief Returns the PCLK1 frequency + * @note Each time PCLK1 changes, this function must be called to update the + * right PCLK1 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK1 frequency + */ +uint32_t HAL_RCC_GetPCLK1Freq(void) +{ + /* Get HCLK source and Compute PCLK1 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE1) >> RCC_CFGR_PPRE1_Pos]); +} + +/** + * @brief Returns the PCLK2 frequency + * @note Each time PCLK2 changes, this function must be called to update the + * right PCLK2 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK2 frequency + */ +uint32_t HAL_RCC_GetPCLK2Freq(void) +{ + /* Get HCLK source and Compute PCLK2 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE2) >> RCC_CFGR_PPRE2_Pos]); +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + /* Set all possible values for the Oscillator type parameter ---------------*/ + RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; + + /* Get the HSE configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + { + RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; + } + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) + { + RCC_OscInitStruct->HSEState = RCC_HSE_ON; + } + else + { + RCC_OscInitStruct->HSEState = RCC_HSE_OFF; + } + + /* Get the HSI configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) + { + RCC_OscInitStruct->HSIState = RCC_HSI_ON; + } + else + { + RCC_OscInitStruct->HSIState = RCC_HSI_OFF; + } + + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + + /* Get the LSE configuration -----------------------------------------------*/ + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + { + RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; + } + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + { + RCC_OscInitStruct->LSEState = RCC_LSE_ON; + } + else + { + RCC_OscInitStruct->LSEState = RCC_LSE_OFF; + } + + /* Get the LSI configuration -----------------------------------------------*/ + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) + { + RCC_OscInitStruct->LSIState = RCC_LSI_ON; + } + else + { + RCC_OscInitStruct->LSIState = RCC_LSI_OFF; + } + + /* Get the PLL configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; + } + else + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_OFF; + } + RCC_OscInitStruct->PLL.PLLSource = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC); + RCC_OscInitStruct->PLL.PLLM = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM); + RCC_OscInitStruct->PLL.PLLN = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); + RCC_OscInitStruct->PLL.PLLP = (uint32_t)((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) + RCC_PLLCFGR_PLLP_0) << 1) >> RCC_PLLCFGR_PLLP_Pos); + RCC_OscInitStruct->PLL.PLLQ = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLQ) >> RCC_PLLCFGR_PLLQ_Pos); +#if defined (RCC_PLLCFGR_PLLR) + RCC_OscInitStruct->PLL.PLLR = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> POSITION_VAL(RCC_PLLCFGR_PLLR)); +#endif +} + +/** + * @brief Configures the RCC_ClkInitStruct according to the internal + * RCC configuration registers. + * @param RCC_ClkInitStruct pointer to an RCC_ClkInitTypeDef structure that + * will be configured. + * @param pFLatency Pointer on the Flash Latency. + * @retval None + */ +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency) +{ + /* Set all possible values for the Clock type parameter --------------------*/ + RCC_ClkInitStruct->ClockType = RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2; + + /* Get the SYSCLK configuration --------------------------------------------*/ + RCC_ClkInitStruct->SYSCLKSource = (uint32_t)(RCC->CFGR & RCC_CFGR_SW); + + /* Get the HCLK configuration ----------------------------------------------*/ + RCC_ClkInitStruct->AHBCLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_HPRE); + + /* Get the APB1 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB1CLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_PPRE1); + + /* Get the APB2 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB2CLKDivider = (uint32_t)((RCC->CFGR & RCC_CFGR_PPRE2) >> 3); + + /* Get the Flash Wait State (Latency) configuration ------------------------*/ + *pFLatency = (uint32_t)(FLASH->ACR & FLASH_ACR_LATENCY); +} + +/** + * @brief This function handles the RCC CSS interrupt request. + * @note This API should be called under the NMI_Handler(). + * @retval None + */ +void HAL_RCC_NMI_IRQHandler(void) +{ + /* Check RCC CSSF flag */ + if (__HAL_RCC_GET_IT(RCC_IT_CSS)) + { + /* RCC Clock Security System interrupt user callback */ + HAL_RCC_CSSCallback(); + + /* Clear RCC CSS pending bit */ + __HAL_RCC_CLEAR_IT(RCC_IT_CSS); + } +} + +/** + * @brief RCC Clock Security System interrupt callback + * @retval None + */ +__weak void HAL_RCC_CSSCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RCC_CSSCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c new file mode 100644 index 000000000..ee0900391 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c @@ -0,0 +1,1776 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc_ex.c + * @author MCD Application Team + * @brief Extension RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities RCC extension peripheral: + * + Extended Peripheral Control functions + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RCCEx RCCEx + * @brief RCCEx HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Defines RCCEx Private Defines + * @{ + */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ +/** + * @} + */ + +/** @defgroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ + +/** + * @} + */ + + +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup RCCEx_Exported_Functions RCCEx Exported Functions + * @{ + */ + +/** @defgroup RCCEx_Exported_Functions_Group1 Extended Peripheral Control functions + * @brief Extended Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Extended Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + [..] + (@) Important note: Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to + select the RTC clock source; in this case the Backup domain will be reset in + order to modify the RTC Clock source, as consequence RTC registers (including + the backup registers) and RCC_BDCR register will be set to their reset values. + +@endverbatim + * @{ + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || \ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || \ + defined (STM32F750xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, LTDC, RTC, TIM, UARTs, USARTs, LTPIM, SDMMC...). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0; + uint32_t tmpreg0 = 0; + uint32_t tmpreg1 = 0; + uint32_t plli2sused = 0; + uint32_t pllsaiused = 0; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------------------- I2S configuration ----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SCLKSOURCE(PeriphClkInit->I2sClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_CONFIG(PeriphClkInit->I2sClockSelection); + + /* Enable the PLLI2S when it's used as clock source for I2S */ + if(PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + } + + /*------------------------------------ SAI1 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ SAI2 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); + + /* Configure SAI2 Clock source */ + __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- SPDIF-RX Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + { + plli2sused = 1; + } + + /*------------------------------------ RTC configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait for Backup domain Write protection disable */ + while((PWR->CR1 & PWR_CR1_DBP) == RESET) + { + if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Reset the Backup domain only if the RTC Clock source selection is modified */ + tmpreg0 = (RCC->BDCR & RCC_BDCR_RTCSEL); + + if((tmpreg0 != 0x00000000U) && (tmpreg0 != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg0 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg0; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + + /*------------------------------------ TIM configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Check the parameters */ + assert_param(IS_RCC_TIMPRES(PeriphClkInit->TIMPresSelection)); + + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + + /*-------------------------------------- I2C1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C1CLKSOURCE(PeriphClkInit->I2c1ClockSelection)); + + /* Configure the I2C1 clock source */ + __HAL_RCC_I2C1_CONFIG(PeriphClkInit->I2c1ClockSelection); + } + + /*-------------------------------------- I2C2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C2CLKSOURCE(PeriphClkInit->I2c2ClockSelection)); + + /* Configure the I2C2 clock source */ + __HAL_RCC_I2C2_CONFIG(PeriphClkInit->I2c2ClockSelection); + } + + /*-------------------------------------- I2C3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C3CLKSOURCE(PeriphClkInit->I2c3ClockSelection)); + + /* Configure the I2C3 clock source */ + __HAL_RCC_I2C3_CONFIG(PeriphClkInit->I2c3ClockSelection); + } + + /*-------------------------------------- I2C4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C4CLKSOURCE(PeriphClkInit->I2c4ClockSelection)); + + /* Configure the I2C4 clock source */ + __HAL_RCC_I2C4_CONFIG(PeriphClkInit->I2c4ClockSelection); + } + + /*-------------------------------------- USART1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) + { + /* Check the parameters */ + assert_param(IS_RCC_USART1CLKSOURCE(PeriphClkInit->Usart1ClockSelection)); + + /* Configure the USART1 clock source */ + __HAL_RCC_USART1_CONFIG(PeriphClkInit->Usart1ClockSelection); + } + + /*-------------------------------------- USART2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) + { + /* Check the parameters */ + assert_param(IS_RCC_USART2CLKSOURCE(PeriphClkInit->Usart2ClockSelection)); + + /* Configure the USART2 clock source */ + __HAL_RCC_USART2_CONFIG(PeriphClkInit->Usart2ClockSelection); + } + + /*-------------------------------------- USART3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) + { + /* Check the parameters */ + assert_param(IS_RCC_USART3CLKSOURCE(PeriphClkInit->Usart3ClockSelection)); + + /* Configure the USART3 clock source */ + __HAL_RCC_USART3_CONFIG(PeriphClkInit->Usart3ClockSelection); + } + + /*-------------------------------------- UART4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) + { + /* Check the parameters */ + assert_param(IS_RCC_UART4CLKSOURCE(PeriphClkInit->Uart4ClockSelection)); + + /* Configure the UART4 clock source */ + __HAL_RCC_UART4_CONFIG(PeriphClkInit->Uart4ClockSelection); + } + + /*-------------------------------------- UART5 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) + { + /* Check the parameters */ + assert_param(IS_RCC_UART5CLKSOURCE(PeriphClkInit->Uart5ClockSelection)); + + /* Configure the UART5 clock source */ + __HAL_RCC_UART5_CONFIG(PeriphClkInit->Uart5ClockSelection); + } + + /*-------------------------------------- USART6 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) + { + /* Check the parameters */ + assert_param(IS_RCC_USART6CLKSOURCE(PeriphClkInit->Usart6ClockSelection)); + + /* Configure the USART6 clock source */ + __HAL_RCC_USART6_CONFIG(PeriphClkInit->Usart6ClockSelection); + } + + /*-------------------------------------- UART7 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) + { + /* Check the parameters */ + assert_param(IS_RCC_UART7CLKSOURCE(PeriphClkInit->Uart7ClockSelection)); + + /* Configure the UART7 clock source */ + __HAL_RCC_UART7_CONFIG(PeriphClkInit->Uart7ClockSelection); + } + + /*-------------------------------------- UART8 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) + { + /* Check the parameters */ + assert_param(IS_RCC_UART8CLKSOURCE(PeriphClkInit->Uart8ClockSelection)); + + /* Configure the UART8 clock source */ + __HAL_RCC_UART8_CONFIG(PeriphClkInit->Uart8ClockSelection); + } + + /*--------------------------------------- CEC Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) + { + /* Check the parameters */ + assert_param(IS_RCC_CECCLKSOURCE(PeriphClkInit->CecClockSelection)); + + /* Configure the CEC clock source */ + __HAL_RCC_CEC_CONFIG(PeriphClkInit->CecClockSelection); + } + + /*-------------------------------------- CK48 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48SOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLSAI when it's used as clock source for CK48 */ + if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- LTDC Configuration -----------------------------------*/ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) + { + pllsaiused = 1; + } +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + /*-------------------------------------- LPTIM1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLK(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LTPIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + + /*------------------------------------- SDMMC1 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC1CLKSOURCE(PeriphClkInit->Sdmmc1ClockSelection)); + + /* Configure the SDMMC1 clock source */ + __HAL_RCC_SDMMC1_CONFIG(PeriphClkInit->Sdmmc1ClockSelection); + } + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + /*------------------------------------- SDMMC2 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC2CLKSOURCE(PeriphClkInit->Sdmmc2ClockSelection)); + + /* Configure the SDMMC2 clock source */ + __HAL_RCC_SDMMC2_CONFIG(PeriphClkInit->Sdmmc2ClockSelection); + } + + /*------------------------------------- DFSDM1 Configuration -------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1CLKSOURCE(PeriphClkInit->Dfsdm1ClockSelection)); + + /* Configure the DFSDM1 interface clock source */ + __HAL_RCC_DFSDM1_CONFIG(PeriphClkInit->Dfsdm1ClockSelection); + } + + /*------------------------------------- DFSDM AUDIO Configuration -------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1AUDIOCLKSOURCE(PeriphClkInit->Dfsdm1AudioClockSelection)); + + /* Configure the DFSDM interface clock source */ + __HAL_RCC_DFSDM1AUDIO_CONFIG(PeriphClkInit->Dfsdm1AudioClockSelection); + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /*-------------------------------------- PLLI2S Configuration ---------------------------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2, I2S or SPDIF-RX */ + if((plli2sused == 1) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /*----------------- In Case of PLLI2S is selected as source clock for I2S -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) && (PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Read PLLI2SP and PLLI2SQ value from PLLI2SCFGR register (this value is not needed for I2S configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , tmpreg0, tmpreg1, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + /* Check for PLLI2S/DIVQ parameters */ + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SP and PLLI2SR values from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, tmpreg0, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SPDIF-RX -------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + + /* Read PLLI2SR value from PLLI2SCFGR register (this value is not needed for SPDIF-RX configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* SPDIFCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, tmpreg0, tmpreg1); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLI2SM) */ + /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*-------------------------------------- PLLSAI Configuration ---------------------------------*/ + /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, LTDC or CK48 */ + if(pllsaiused == 1) + { + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is disabled */ + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /*----------------- In Case of PLLSAI is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) ||\ + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + { + /* check for PLLSAIQ Parameter */ + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + /* check for PLLSAI/DIVQ Parameter */ + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg0, PeriphClkInit->PLLSAI.PLLSAIQ, tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*----------------- In Case of PLLSAI is selected as source clock for CLK48 -------------------*/ + /* In Case of PLLI2S is selected as source clock for CK48 */ + if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + /* Read PLLSAIQ and PLLSAIR value from PLLSAICFGR register (this value is not needed for CK48 configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + + /* Configure the PLLSAI division factors */ + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) x (PLLI2SN/PLLM) */ + /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIP, tmpreg0, tmpreg1); + } + +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + /*---------------------------- LTDC configuration -------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + { + assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); + assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); + + /* Read PLLSAIP and PLLSAIQ value from PLLSAICFGR register (these value are not needed for LTDC configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* LTDC_CLK(first level) = PLLSAI_VCO Output/PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg1, tmpreg0, PeriphClkInit->PLLSAI.PLLSAIR); + + /* LTDC_CLK = LTDC_CLK(first level)/PLLSAIDIVR */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLSAIDivR); + } +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is ready */ + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to the configured RCC_PeriphCLKInitTypeDef structure + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg = 0; + + /* Set all possible values for the extended clock type parameter------------*/ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_I2C4 |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDMMC2 |\ + RCC_PERIPHCLK_DFSDM1 | RCC_PERIPHCLK_DFSDM1_AUDIO; +#else + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_I2C4 |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48; +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /* Get the PLLI2S Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SP = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + + /* Get the PLLSAI Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIP = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + PeriphClkInit->PLLSAI.PLLSAIR = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + + /* Get the PLLSAI/PLLI2S division factors -------------------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) >> RCC_DCKCFGR1_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> RCC_DCKCFGR1_PLLSAIDIVQ_Pos); + PeriphClkInit->PLLSAIDivR = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVR) >> RCC_DCKCFGR1_PLLSAIDIVR_Pos); + + /* Get the SAI1 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai1ClockSelection = __HAL_RCC_GET_SAI1_SOURCE(); + + /* Get the SAI2 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai2ClockSelection = __HAL_RCC_GET_SAI2_SOURCE(); + + /* Get the I2S clock configuration ------------------------------------------*/ + PeriphClkInit->I2sClockSelection = __HAL_RCC_GET_I2SCLKSOURCE(); + + /* Get the I2C1 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c1ClockSelection = __HAL_RCC_GET_I2C1_SOURCE(); + + /* Get the I2C2 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c2ClockSelection = __HAL_RCC_GET_I2C2_SOURCE(); + + /* Get the I2C3 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c3ClockSelection = __HAL_RCC_GET_I2C3_SOURCE(); + + /* Get the I2C4 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c4ClockSelection = __HAL_RCC_GET_I2C4_SOURCE(); + + /* Get the USART1 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart1ClockSelection = __HAL_RCC_GET_USART1_SOURCE(); + + /* Get the USART2 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart2ClockSelection = __HAL_RCC_GET_USART2_SOURCE(); + + /* Get the USART3 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart3ClockSelection = __HAL_RCC_GET_USART3_SOURCE(); + + /* Get the UART4 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart4ClockSelection = __HAL_RCC_GET_UART4_SOURCE(); + + /* Get the UART5 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart5ClockSelection = __HAL_RCC_GET_UART5_SOURCE(); + + /* Get the USART6 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart6ClockSelection = __HAL_RCC_GET_USART6_SOURCE(); + + /* Get the UART7 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart7ClockSelection = __HAL_RCC_GET_UART7_SOURCE(); + + /* Get the UART8 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart8ClockSelection = __HAL_RCC_GET_UART8_SOURCE(); + + /* Get the LPTIM1 clock configuration ------------------------------------------*/ + PeriphClkInit->Lptim1ClockSelection = __HAL_RCC_GET_LPTIM1_SOURCE(); + + /* Get the CEC clock configuration -----------------------------------------------*/ + PeriphClkInit->CecClockSelection = __HAL_RCC_GET_CEC_SOURCE(); + + /* Get the CK48 clock configuration -----------------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDMMC1 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc1ClockSelection = __HAL_RCC_GET_SDMMC1_SOURCE(); + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + /* Get the SDMMC2 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc2ClockSelection = __HAL_RCC_GET_SDMMC2_SOURCE(); + + /* Get the DFSDM clock configuration -----------------------------------------------*/ + PeriphClkInit->Dfsdm1ClockSelection = __HAL_RCC_GET_DFSDM1_SOURCE(); + + /* Get the DFSDM AUDIO clock configuration -----------------------------------------------*/ + PeriphClkInit->Dfsdm1AudioClockSelection = __HAL_RCC_GET_DFSDM1AUDIO_SOURCE(); +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /* Get the RTC Clock configuration -----------------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the TIM Prescaler configuration --------------------------------------------*/ + if ((RCC->DCKCFGR1 & RCC_DCKCFGR1_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, RTC, TIM, UARTs, USARTs, LTPIM, SDMMC...). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0; + uint32_t tmpreg0 = 0; + uint32_t plli2sused = 0; + uint32_t pllsaiused = 0; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------------------- I2S configuration ----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SCLKSOURCE(PeriphClkInit->I2sClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_CONFIG(PeriphClkInit->I2sClockSelection); + + /* Enable the PLLI2S when it's used as clock source for I2S */ + if(PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + } + + /*------------------------------------ SAI1 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ SAI2 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); + + /* Configure SAI2 Clock source */ + __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ RTC configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait for Backup domain Write protection disable */ + while((PWR->CR1 & PWR_CR1_DBP) == RESET) + { + if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Reset the Backup domain only if the RTC Clock source selection is modified */ + tmpreg0 = (RCC->BDCR & RCC_BDCR_RTCSEL); + + if((tmpreg0 != 0x00000000U) && (tmpreg0 != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg0 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg0; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + + /*------------------------------------ TIM configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Check the parameters */ + assert_param(IS_RCC_TIMPRES(PeriphClkInit->TIMPresSelection)); + + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + + /*-------------------------------------- I2C1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C1CLKSOURCE(PeriphClkInit->I2c1ClockSelection)); + + /* Configure the I2C1 clock source */ + __HAL_RCC_I2C1_CONFIG(PeriphClkInit->I2c1ClockSelection); + } + + /*-------------------------------------- I2C2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C2CLKSOURCE(PeriphClkInit->I2c2ClockSelection)); + + /* Configure the I2C2 clock source */ + __HAL_RCC_I2C2_CONFIG(PeriphClkInit->I2c2ClockSelection); + } + + /*-------------------------------------- I2C3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C3CLKSOURCE(PeriphClkInit->I2c3ClockSelection)); + + /* Configure the I2C3 clock source */ + __HAL_RCC_I2C3_CONFIG(PeriphClkInit->I2c3ClockSelection); + } + + /*-------------------------------------- USART1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) + { + /* Check the parameters */ + assert_param(IS_RCC_USART1CLKSOURCE(PeriphClkInit->Usart1ClockSelection)); + + /* Configure the USART1 clock source */ + __HAL_RCC_USART1_CONFIG(PeriphClkInit->Usart1ClockSelection); + } + + /*-------------------------------------- USART2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) + { + /* Check the parameters */ + assert_param(IS_RCC_USART2CLKSOURCE(PeriphClkInit->Usart2ClockSelection)); + + /* Configure the USART2 clock source */ + __HAL_RCC_USART2_CONFIG(PeriphClkInit->Usart2ClockSelection); + } + + /*-------------------------------------- USART3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) + { + /* Check the parameters */ + assert_param(IS_RCC_USART3CLKSOURCE(PeriphClkInit->Usart3ClockSelection)); + + /* Configure the USART3 clock source */ + __HAL_RCC_USART3_CONFIG(PeriphClkInit->Usart3ClockSelection); + } + + /*-------------------------------------- UART4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) + { + /* Check the parameters */ + assert_param(IS_RCC_UART4CLKSOURCE(PeriphClkInit->Uart4ClockSelection)); + + /* Configure the UART4 clock source */ + __HAL_RCC_UART4_CONFIG(PeriphClkInit->Uart4ClockSelection); + } + + /*-------------------------------------- UART5 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) + { + /* Check the parameters */ + assert_param(IS_RCC_UART5CLKSOURCE(PeriphClkInit->Uart5ClockSelection)); + + /* Configure the UART5 clock source */ + __HAL_RCC_UART5_CONFIG(PeriphClkInit->Uart5ClockSelection); + } + + /*-------------------------------------- USART6 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) + { + /* Check the parameters */ + assert_param(IS_RCC_USART6CLKSOURCE(PeriphClkInit->Usart6ClockSelection)); + + /* Configure the USART6 clock source */ + __HAL_RCC_USART6_CONFIG(PeriphClkInit->Usart6ClockSelection); + } + + /*-------------------------------------- UART7 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) + { + /* Check the parameters */ + assert_param(IS_RCC_UART7CLKSOURCE(PeriphClkInit->Uart7ClockSelection)); + + /* Configure the UART7 clock source */ + __HAL_RCC_UART7_CONFIG(PeriphClkInit->Uart7ClockSelection); + } + + /*-------------------------------------- UART8 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) + { + /* Check the parameters */ + assert_param(IS_RCC_UART8CLKSOURCE(PeriphClkInit->Uart8ClockSelection)); + + /* Configure the UART8 clock source */ + __HAL_RCC_UART8_CONFIG(PeriphClkInit->Uart8ClockSelection); + } + + /*-------------------------------------- CK48 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48SOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLSAI when it's used as clock source for CK48 */ + if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- LPTIM1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLK(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LTPIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + + /*------------------------------------- SDMMC1 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC1CLKSOURCE(PeriphClkInit->Sdmmc1ClockSelection)); + + /* Configure the SDMMC1 clock source */ + __HAL_RCC_SDMMC1_CONFIG(PeriphClkInit->Sdmmc1ClockSelection); + } + + /*------------------------------------- SDMMC2 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC2CLKSOURCE(PeriphClkInit->Sdmmc2ClockSelection)); + + /* Configure the SDMMC2 clock source */ + __HAL_RCC_SDMMC2_CONFIG(PeriphClkInit->Sdmmc2ClockSelection); + } + + /*-------------------------------------- PLLI2S Configuration ---------------------------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2 or I2S */ + if((plli2sused == 1) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /*----------------- In Case of PLLI2S is selected as source clock for I2S -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) && (PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Read PLLI2SQ value from PLLI2SCFGR register (this value is not needed for I2S configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , tmpreg0, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + /* Check for PLLI2S/DIVQ parameters */ + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SP and PLLI2SR values from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg0); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLI2SM) */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*-------------------------------------- PLLSAI Configuration ---------------------------------*/ + /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, LTDC or CK48 */ + if(pllsaiused == 1) + { + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is disabled */ + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /*----------------- In Case of PLLSAI is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) ||\ + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + { + /* check for PLLSAIQ Parameter */ + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + /* check for PLLSAI/DIVQ Parameter */ + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg0, PeriphClkInit->PLLSAI.PLLSAIQ); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*----------------- In Case of PLLSAI is selected as source clock for CLK48 -------------------*/ + /* In Case of PLLI2S is selected as source clock for CK48 */ + if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + /* Read PLLSAIQ and PLLSAIR value from PLLSAICFGR register (this value is not needed for CK48 configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + + /* Configure the PLLSAI division factors */ + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) x (PLLI2SN/PLLM) */ + /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIP, tmpreg0); + } + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is ready */ + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to the configured RCC_PeriphCLKInitTypeDef structure + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg = 0; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDMMC2; + + /* Get the PLLI2S Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + + /* Get the PLLSAI Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIP = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + + /* Get the PLLSAI/PLLI2S division factors -------------------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) >> RCC_DCKCFGR1_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> RCC_DCKCFGR1_PLLSAIDIVQ_Pos); + + /* Get the SAI1 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai1ClockSelection = __HAL_RCC_GET_SAI1_SOURCE(); + + /* Get the SAI2 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai2ClockSelection = __HAL_RCC_GET_SAI2_SOURCE(); + + /* Get the I2S clock configuration ------------------------------------------*/ + PeriphClkInit->I2sClockSelection = __HAL_RCC_GET_I2SCLKSOURCE(); + + /* Get the I2C1 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c1ClockSelection = __HAL_RCC_GET_I2C1_SOURCE(); + + /* Get the I2C2 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c2ClockSelection = __HAL_RCC_GET_I2C2_SOURCE(); + + /* Get the I2C3 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c3ClockSelection = __HAL_RCC_GET_I2C3_SOURCE(); + + /* Get the USART1 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart1ClockSelection = __HAL_RCC_GET_USART1_SOURCE(); + + /* Get the USART2 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart2ClockSelection = __HAL_RCC_GET_USART2_SOURCE(); + + /* Get the USART3 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart3ClockSelection = __HAL_RCC_GET_USART3_SOURCE(); + + /* Get the UART4 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart4ClockSelection = __HAL_RCC_GET_UART4_SOURCE(); + + /* Get the UART5 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart5ClockSelection = __HAL_RCC_GET_UART5_SOURCE(); + + /* Get the USART6 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart6ClockSelection = __HAL_RCC_GET_USART6_SOURCE(); + + /* Get the UART7 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart7ClockSelection = __HAL_RCC_GET_UART7_SOURCE(); + + /* Get the UART8 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart8ClockSelection = __HAL_RCC_GET_UART8_SOURCE(); + + /* Get the LPTIM1 clock configuration ------------------------------------------*/ + PeriphClkInit->Lptim1ClockSelection = __HAL_RCC_GET_LPTIM1_SOURCE(); + + /* Get the CK48 clock configuration -----------------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDMMC1 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc1ClockSelection = __HAL_RCC_GET_SDMMC1_SOURCE(); + + /* Get the SDMMC2 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc2ClockSelection = __HAL_RCC_GET_SDMMC2_SOURCE(); + + /* Get the RTC Clock configuration -----------------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the TIM Prescaler configuration --------------------------------------------*/ + if ((RCC->DCKCFGR1 & RCC_DCKCFGR1_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_SAI1: SAI1 peripheral clock + * @arg RCC_PERIPHCLK_SAI2: SAI2 peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + uint32_t tmpreg = 0; + /* This variable is used to store the SAI clock frequency (value in Hz) */ + uint32_t frequency = 0; + /* This variable is used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0; + /* This variable is used to store the SAI clock source */ + uint32_t saiclocksource = 0; + + if (PeriphClk == RCC_PERIPHCLK_SAI1) + { + saiclocksource = RCC->DCKCFGR1; + saiclocksource &= RCC_DCKCFGR1_SAI1SEL; + switch (saiclocksource) + { + case 0: /* PLLSAI is the clock source for SAI1 */ + { + /* Configure the PLLSAI division factor */ + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + tmpreg = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24; + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + tmpreg = (((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> 8) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI1SEL_0: /* PLLI2S is the clock source for SAI1 */ + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + tmpreg = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24; + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + tmpreg = ((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI1SEL_1: /* External clock is the clock source for SAI1 */ + { + frequency = EXTERNAL_CLOCK_VALUE; + break; + } +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + case RCC_DCKCFGR1_SAI1SEL: /* HSI or HSE is the clock source for SAI*/ + { + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the main PLL Source is HSI */ + frequency = HSI_VALUE; + } + else + { + /* In Case the main PLL Source is HSE */ + frequency = HSE_VALUE; + } + break; + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + default : + { + break; + } + } + } + + if (PeriphClk == RCC_PERIPHCLK_SAI2) + { + saiclocksource = RCC->DCKCFGR1; + saiclocksource &= RCC_DCKCFGR1_SAI2SEL; + switch (saiclocksource) + { + case 0: /* PLLSAI is the clock source for SAI*/ + { + /* Configure the PLLSAI division factor */ + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + tmpreg = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24; + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + tmpreg = (((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> 8) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI2SEL_0: /* PLLI2S is the clock source for SAI2 */ + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + tmpreg = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24; + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + tmpreg = ((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI2SEL_1: /* External clock is the clock source for SAI2 */ + { + frequency = EXTERNAL_CLOCK_VALUE; + break; + } +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + case RCC_DCKCFGR1_SAI2SEL: /* HSI or HSE is the clock source for SAI2 */ + { + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the main PLL Source is HSI */ + frequency = HSI_VALUE; + } + else + { + /* In Case the main PLL Source is HSE */ + frequency = HSE_VALUE; + } + break; + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + default : + { + break; + } + } + } + + return frequency; +} + +/** + * @} + */ + +/** @defgroup RCCEx_Exported_Functions_Group2 Extended Clock management functions + * @brief Extended Clock management functions + * +@verbatim + =============================================================================== + ##### Extended clock management functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the + activation or deactivation of PLLI2S, PLLSAI. +@endverbatim + * @{ + */ + +/** + * @brief Enable PLLI2S. + * @param PLLI2SInit pointer to an RCC_PLLI2SInitTypeDef structure that + * contains the configuration information for the PLLI2S + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PLLI2SInit->PLLI2SN)); + assert_param(IS_RCC_PLLI2SR_VALUE(PLLI2SInit->PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PLLI2SInit->PLLI2SQ)); +#if defined(RCC_PLLI2SCFGR_PLLI2SP) + assert_param(IS_RCC_PLLI2SP_VALUE(PLLI2SInit->PLLI2SP)); +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLI2S division factors */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * PLLI2SN */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#else + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * PLLI2SN */ + /* I2SPCLK = PLLI2S_VCO / PLLI2SP */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SP, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Wait till PLLI2S is ready */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLI2S. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void) +{ + uint32_t tickstart; + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while(READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Enable PLLSAI. + * @param PLLSAIInit pointer to an RCC_PLLSAIInitTypeDef structure that + * contains the configuration information for the PLLSAI + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLSAIN_VALUE(PLLSAIInit->PLLSAIN)); + assert_param(IS_RCC_PLLSAIQ_VALUE(PLLSAIInit->PLLSAIQ)); + assert_param(IS_RCC_PLLSAIP_VALUE(PLLSAIInit->PLLSAIP)); +#if defined(RCC_PLLSAICFGR_PLLSAIR) + assert_param(IS_RCC_PLLSAIR_VALUE(PLLSAIInit->PLLSAIR)); +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLSAI division factors */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * PLLSAIN */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIP, PLLSAIInit->PLLSAIQ); +#else + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * PLLSAIN */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + /* SAIRCLK = PLLSAI_VCO / PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIP, \ + PLLSAIInit->PLLSAIQ, PLLSAIInit->PLLSAIR); +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + + /* Enable the PLLSAI */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Wait till PLLSAI is ready */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLSAI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void) +{ + uint32_t tickstart; + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c new file mode 100644 index 000000000..709408862 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rng.c @@ -0,0 +1,874 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rng.c + * @author MCD Application Team + * @brief RNG HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Random Number Generator (RNG) peripheral: + * + Initialization and configuration functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The RNG HAL driver can be used as follows: + + (#) Enable the RNG controller clock using __HAL_RCC_RNG_CLK_ENABLE() macro + in HAL_RNG_MspInit(). + (#) Activate the RNG peripheral using HAL_RNG_Init() function. + (#) Wait until the 32 bit Random Number Generator contains a valid + random data using (polling/interrupt) mode. + (#) Get the 32 bit random number using HAL_RNG_GenerateRandomNumber() function. + + ##### Callback registration ##### + ================================== + + [..] + The compilation define USE_HAL_RNG_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function @ref HAL_RNG_RegisterCallback() to register a user callback. + Function @ref HAL_RNG_RegisterCallback() allows to register following callbacks: + (+) ErrorCallback : RNG Error Callback. + (+) MspInitCallback : RNG MspInit. + (+) MspDeInitCallback : RNG MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + [..] + Use function @ref HAL_RNG_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. + @ref HAL_RNG_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) ErrorCallback : RNG Error Callback. + (+) MspInitCallback : RNG MspInit. + (+) MspDeInitCallback : RNG MspDeInit. + + [..] + For specific callback ReadyDataCallback, use dedicated register callbacks: + respectively @ref HAL_RNG_RegisterReadyDataCallback() , @ref HAL_RNG_UnRegisterReadyDataCallback(). + + [..] + By default, after the @ref HAL_RNG_Init() and when the state is HAL_RNG_STATE_RESET + all callbacks are set to the corresponding weak (surcharged) functions: + example @ref HAL_RNG_ErrorCallback(). + Exception done for MspInit and MspDeInit functions that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_RNG_Init() + and @ref HAL_RNG_DeInit() only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the @ref HAL_RNG_Init() and @ref HAL_RNG_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand). + + [..] + Callbacks can be registered/unregistered in HAL_RNG_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_RNG_STATE_READY or HAL_RNG_STATE_RESET state, thus registered (user) + MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_RNG_RegisterCallback() before calling @ref HAL_RNG_DeInit() + or @ref HAL_RNG_Init() function. + + [..] + When The compilation define USE_HAL_RNG_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available + and weak (surcharged) callbacks are used. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +#if defined (RNG) + +/** @addtogroup RNG + * @brief RNG HAL module driver. + * @{ + */ + +#ifdef HAL_RNG_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RNG_Private_Constants RNG Private Constants + * @{ + */ +#define RNG_TIMEOUT_VALUE 2U +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions prototypes ----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup RNG_Exported_Functions + * @{ + */ + +/** @addtogroup RNG_Exported_Functions_Group1 + * @brief Initialization and configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize the RNG according to the specified parameters + in the RNG_InitTypeDef and create the associated handle + (+) DeInitialize the RNG peripheral + (+) Initialize the RNG MSP + (+) DeInitialize RNG MSP + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the RNG peripheral and creates the associated handle. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_Init(RNG_HandleTypeDef *hrng) +{ + /* Check the RNG handle allocation */ + if (hrng == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ + assert_param(IS_RNG_ALL_INSTANCE(hrng->Instance)); + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) + if (hrng->State == HAL_RNG_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hrng->Lock = HAL_UNLOCKED; + + hrng->ReadyDataCallback = HAL_RNG_ReadyDataCallback; /* Legacy weak ReadyDataCallback */ + hrng->ErrorCallback = HAL_RNG_ErrorCallback; /* Legacy weak ErrorCallback */ + + if (hrng->MspInitCallback == NULL) + { + hrng->MspInitCallback = HAL_RNG_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware */ + hrng->MspInitCallback(hrng); + } +#else + if (hrng->State == HAL_RNG_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hrng->Lock = HAL_UNLOCKED; + + /* Init the low level hardware */ + HAL_RNG_MspInit(hrng); + } +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_BUSY; + + + /* Enable the RNG Peripheral */ + __HAL_RNG_ENABLE(hrng); + + /* Initialize the RNG state */ + hrng->State = HAL_RNG_STATE_READY; + + /* Initialise the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_NONE; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief DeInitializes the RNG peripheral. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_DeInit(RNG_HandleTypeDef *hrng) +{ + /* Check the RNG handle allocation */ + if (hrng == NULL) + { + return HAL_ERROR; + } + + /* Disable the RNG Peripheral */ + CLEAR_BIT(hrng->Instance->CR, RNG_CR_IE | RNG_CR_RNGEN); + + /* Clear RNG interrupt status flags */ + CLEAR_BIT(hrng->Instance->SR, RNG_SR_CEIS | RNG_SR_SEIS); + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) + if (hrng->MspDeInitCallback == NULL) + { + hrng->MspDeInitCallback = HAL_RNG_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware */ + hrng->MspDeInitCallback(hrng); +#else + /* DeInit the low level hardware */ + HAL_RNG_MspDeInit(hrng); +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + + /* Update the RNG state */ + hrng->State = HAL_RNG_STATE_RESET; + + /* Initialise the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_NONE; + + /* Release Lock */ + __HAL_UNLOCK(hrng); + + /* Return the function status */ + return HAL_OK; +} + +/** + * @brief Initializes the RNG MSP. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval None + */ +__weak void HAL_RNG_MspInit(RNG_HandleTypeDef *hrng) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrng); + /* NOTE : This function should not be modified. When the callback is needed, + function HAL_RNG_MspInit must be implemented in the user file. + */ +} + +/** + * @brief DeInitializes the RNG MSP. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval None + */ +__weak void HAL_RNG_MspDeInit(RNG_HandleTypeDef *hrng) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrng); + /* NOTE : This function should not be modified. When the callback is needed, + function HAL_RNG_MspDeInit must be implemented in the user file. + */ +} + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User RNG Callback + * To be used instead of the weak predefined callback + * @param hrng RNG handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_RNG_ERROR_CB_ID Error callback ID + * @arg @ref HAL_RNG_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_RNG_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_RegisterCallback(RNG_HandleTypeDef *hrng, HAL_RNG_CallbackIDTypeDef CallbackID, pRNG_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hrng); + + if (HAL_RNG_STATE_READY == hrng->State) + { + switch (CallbackID) + { + case HAL_RNG_ERROR_CB_ID : + hrng->ErrorCallback = pCallback; + break; + + case HAL_RNG_MSPINIT_CB_ID : + hrng->MspInitCallback = pCallback; + break; + + case HAL_RNG_MSPDEINIT_CB_ID : + hrng->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_RNG_STATE_RESET == hrng->State) + { + switch (CallbackID) + { + case HAL_RNG_MSPINIT_CB_ID : + hrng->MspInitCallback = pCallback; + break; + + case HAL_RNG_MSPDEINIT_CB_ID : + hrng->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrng); + return status; +} + +/** + * @brief Unregister an RNG Callback + * RNG callabck is redirected to the weak predefined callback + * @param hrng RNG handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_RNG_ERROR_CB_ID Error callback ID + * @arg @ref HAL_RNG_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_RNG_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_UnRegisterCallback(RNG_HandleTypeDef *hrng, HAL_RNG_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hrng); + + if (HAL_RNG_STATE_READY == hrng->State) + { + switch (CallbackID) + { + case HAL_RNG_ERROR_CB_ID : + hrng->ErrorCallback = HAL_RNG_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_RNG_MSPINIT_CB_ID : + hrng->MspInitCallback = HAL_RNG_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_RNG_MSPDEINIT_CB_ID : + hrng->MspDeInitCallback = HAL_RNG_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_RNG_STATE_RESET == hrng->State) + { + switch (CallbackID) + { + case HAL_RNG_MSPINIT_CB_ID : + hrng->MspInitCallback = HAL_RNG_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_RNG_MSPDEINIT_CB_ID : + hrng->MspDeInitCallback = HAL_RNG_MspDeInit; /* Legacy weak MspInit */ + break; + + default : + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrng); + return status; +} + +/** + * @brief Register Data Ready RNG Callback + * To be used instead of the weak HAL_RNG_ReadyDataCallback() predefined callback + * @param hrng RNG handle + * @param pCallback pointer to the Data Ready Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_RegisterReadyDataCallback(RNG_HandleTypeDef *hrng, pRNG_ReadyDataCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hrng); + + if (HAL_RNG_STATE_READY == hrng->State) + { + hrng->ReadyDataCallback = pCallback; + } + else + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrng); + return status; +} + +/** + * @brief UnRegister the Data Ready RNG Callback + * Data Ready RNG Callback is redirected to the weak HAL_RNG_ReadyDataCallback() predefined callback + * @param hrng RNG handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_UnRegisterReadyDataCallback(RNG_HandleTypeDef *hrng) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hrng); + + if (HAL_RNG_STATE_READY == hrng->State) + { + hrng->ReadyDataCallback = HAL_RNG_ReadyDataCallback; /* Legacy weak ReadyDataCallback */ + } + else + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrng); + return status; +} + +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup RNG_Exported_Functions_Group2 + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Get the 32 bit Random number + (+) Get the 32 bit Random number with interrupt enabled + (+) Handle RNG interrupt request + +@endverbatim + * @{ + */ + +/** + * @brief Generates a 32-bit random number. + * @note This function checks value of RNG_FLAG_DRDY flag to know if valid + * random number is available in the DR register (RNG_FLAG_DRDY flag set + * whenever a random number is available through the RNG_DR register). + * After transitioning from 0 to 1 (random number available), + * RNG_FLAG_DRDY flag remains high until output buffer becomes empty after reading + * four words from the RNG_DR register, i.e. further function calls + * will immediately return a new u32 random number (additional words are + * available and can be read by the application, till RNG_FLAG_DRDY flag remains high). + * @note When no more random number data is available in DR register, RNG_FLAG_DRDY + * flag is automatically cleared. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @param random32bit pointer to generated random number variable if successful. + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_RNG_GenerateRandomNumber(RNG_HandleTypeDef *hrng, uint32_t *random32bit) +{ + uint32_t tickstart; + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(hrng); + + /* Check RNG peripheral state */ + if (hrng->State == HAL_RNG_STATE_READY) + { + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_BUSY; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check if data register contains valid random data */ + while (__HAL_RNG_GET_FLAG(hrng, RNG_FLAG_DRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RNG_TIMEOUT_VALUE) + { + hrng->State = HAL_RNG_STATE_READY; + hrng->ErrorCode = HAL_RNG_ERROR_TIMEOUT; + /* Process Unlocked */ + __HAL_UNLOCK(hrng); + return HAL_ERROR; + } + } + + /* Get a 32bit Random number */ + hrng->RandomNumber = hrng->Instance->DR; + *random32bit = hrng->RandomNumber; + + hrng->State = HAL_RNG_STATE_READY; + } + else + { + hrng->ErrorCode = HAL_RNG_ERROR_BUSY; + status = HAL_ERROR; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hrng); + + return status; +} + +/** + * @brief Generates a 32-bit random number in interrupt mode. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RNG_GenerateRandomNumber_IT(RNG_HandleTypeDef *hrng) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(hrng); + + /* Check RNG peripheral state */ + if (hrng->State == HAL_RNG_STATE_READY) + { + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_BUSY; + + /* Enable the RNG Interrupts: Data Ready, Clock error, Seed error */ + __HAL_RNG_ENABLE_IT(hrng); + } + else + { + /* Process Unlocked */ + __HAL_UNLOCK(hrng); + + hrng->ErrorCode = HAL_RNG_ERROR_BUSY; + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Returns generated random number in polling mode (Obsolete) + * Use HAL_RNG_GenerateRandomNumber() API instead. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval Random value + */ +uint32_t HAL_RNG_GetRandomNumber(RNG_HandleTypeDef *hrng) +{ + if(HAL_RNG_GenerateRandomNumber(hrng, &(hrng->RandomNumber)) == HAL_OK) + { + return hrng->RandomNumber; + } + else + { + return 0U; + } +} + +/** + * @brief Returns a 32-bit random number with interrupt enabled (Obsolete), + * Use HAL_RNG_GenerateRandomNumber_IT() API instead. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval 32-bit random number + */ +uint32_t HAL_RNG_GetRandomNumber_IT(RNG_HandleTypeDef *hrng) +{ + uint32_t random32bit = 0U; + + /* Process locked */ + __HAL_LOCK(hrng); + + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_BUSY; + + /* Get a 32bit Random number */ + random32bit = hrng->Instance->DR; + + /* Enable the RNG Interrupts: Data Ready, Clock error, Seed error */ + __HAL_RNG_ENABLE_IT(hrng); + + /* Return the 32 bit random number */ + return random32bit; +} + +/** + * @brief Handles RNG interrupt request. + * @note In the case of a clock error, the RNG is no more able to generate + * random numbers because the PLL48CLK clock is not correct. User has + * to check that the clock controller is correctly configured to provide + * the RNG clock and clear the CEIS bit using __HAL_RNG_CLEAR_IT(). + * The clock error has no impact on the previously generated + * random numbers, and the RNG_DR register contents can be used. + * @note In the case of a seed error, the generation of random numbers is + * interrupted as long as the SECS bit is '1'. If a number is + * available in the RNG_DR register, it must not be used because it may + * not have enough entropy. In this case, it is recommended to clear the + * SEIS bit using __HAL_RNG_CLEAR_IT(), then disable and enable + * the RNG peripheral to reinitialize and restart the RNG. + * @note User-written HAL_RNG_ErrorCallback() API is called once whether SEIS + * or CEIS are set. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval None + + */ +void HAL_RNG_IRQHandler(RNG_HandleTypeDef *hrng) +{ + uint32_t rngclockerror = 0U; + + /* RNG clock error interrupt occurred */ + if (__HAL_RNG_GET_IT(hrng, RNG_IT_CEI) != RESET) + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_CLOCK; + rngclockerror = 1U; + } + else if (__HAL_RNG_GET_IT(hrng, RNG_IT_SEI) != RESET) + { + /* Update the error code */ + hrng->ErrorCode = HAL_RNG_ERROR_SEED; + rngclockerror = 1U; + } + else + { + /* Nothing to do */ + } + + if (rngclockerror == 1U) + { + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_ERROR; + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) + /* Call registered Error callback */ + hrng->ErrorCallback(hrng); +#else + /* Call legacy weak Error callback */ + HAL_RNG_ErrorCallback(hrng); +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + + /* Clear the clock error flag */ + __HAL_RNG_CLEAR_IT(hrng, RNG_IT_CEI | RNG_IT_SEI); + } + + /* Check RNG data ready interrupt occurred */ + if (__HAL_RNG_GET_IT(hrng, RNG_IT_DRDY) != RESET) + { + /* Generate random number once, so disable the IT */ + __HAL_RNG_DISABLE_IT(hrng); + + /* Get the 32bit Random number (DRDY flag automatically cleared) */ + hrng->RandomNumber = hrng->Instance->DR; + + if (hrng->State != HAL_RNG_STATE_ERROR) + { + /* Change RNG peripheral state */ + hrng->State = HAL_RNG_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hrng); + +#if (USE_HAL_RNG_REGISTER_CALLBACKS == 1) + /* Call registered Data Ready callback */ + hrng->ReadyDataCallback(hrng, hrng->RandomNumber); +#else + /* Call legacy weak Data Ready callback */ + HAL_RNG_ReadyDataCallback(hrng, hrng->RandomNumber); +#endif /* USE_HAL_RNG_REGISTER_CALLBACKS */ + } + } +} + +/** + * @brief Read latest generated random number. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval random value + */ +uint32_t HAL_RNG_ReadLastRandomNumber(RNG_HandleTypeDef *hrng) +{ + return (hrng->RandomNumber); +} + +/** + * @brief Data Ready callback in non-blocking mode. + * @note When RNG_FLAG_DRDY flag value is set, first random number has been read + * from DR register in IRQ Handler and is provided as callback parameter. + * Depending on valid data available in the conditioning output buffer, + * additional words can be read by the application from DR register till + * DRDY bit remains high. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @param random32bit generated random number. + * @retval None + */ +__weak void HAL_RNG_ReadyDataCallback(RNG_HandleTypeDef *hrng, uint32_t random32bit) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrng); + UNUSED(random32bit); + /* NOTE : This function should not be modified. When the callback is needed, + function HAL_RNG_ReadyDataCallback must be implemented in the user file. + */ +} + +/** + * @brief RNG error callbacks. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval None + */ +__weak void HAL_RNG_ErrorCallback(RNG_HandleTypeDef *hrng) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrng); + /* NOTE : This function should not be modified. When the callback is needed, + function HAL_RNG_ErrorCallback must be implemented in the user file. + */ +} +/** + * @} + */ + + +/** @addtogroup RNG_Exported_Functions_Group3 + * @brief Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Returns the RNG state. + * @param hrng pointer to a RNG_HandleTypeDef structure that contains + * the configuration information for RNG. + * @retval HAL state + */ +HAL_RNG_StateTypeDef HAL_RNG_GetState(RNG_HandleTypeDef *hrng) +{ + return hrng->State; +} + +/** + * @brief Return the RNG handle error code. + * @param hrng: pointer to a RNG_HandleTypeDef structure. + * @retval RNG Error Code +*/ +uint32_t HAL_RNG_GetError(RNG_HandleTypeDef *hrng) +{ + /* Return RNG Error Code */ + return hrng->ErrorCode; +} +/** + * @} + */ + +/** + * @} + */ + + +#endif /* HAL_RNG_MODULE_ENABLED */ +/** + * @} + */ + +#endif /* RNG */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c new file mode 100644 index 000000000..dad8655fd --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc.c @@ -0,0 +1,1850 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rtc.c + * @author MCD Application Team + * @brief RTC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Real Time Clock (RTC) peripheral: + * + Initialization and de-initialization functions + * + RTC Time and Date functions + * + RTC Alarm functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### Backup Domain Operating Condition ##### + ============================================================================== + [..] The real-time clock (RTC), the RTC backup registers, and the backup + SRAM (BKP SRAM) can be powered from the VBAT voltage when the main + VDD supply is powered off. + To retain the content of the RTC backup registers, backup SRAM, and supply + the RTC when VDD is turned off, VBAT pin can be connected to an optional + standby voltage supplied by a battery or by another source. + + [..] To allow the RTC operating even when the main digital supply (VDD) is turned + off, the VBAT pin powers the following blocks: + (#) The RTC + (#) The LSE oscillator + (#) The backup SRAM when the low power backup regulator is enabled + (#) PC13 to PC15 I/Os, plus PI8 I/O (when available) + + [..] When the backup domain is supplied by VDD (analog switch connected to VDD), + the following pins are available: + (#) PC14 and PC15 can be used as either GPIO or LSE pins + (#) PC13 can be used as a GPIO or as the RTC_AF1 pin + (#) PI8 can be used as a GPIO or as the RTC_AF2 pin + + [..] When the backup domain is supplied by VBAT (analog switch connected to VBAT + because VDD is not present), the following pins are available: + (#) PC14 and PC15 can be used as LSE pins only + (#) PC13 can be used as the RTC_AF1 pin + (#) PI8 can be used as the RTC_AF2 pin + (#) PC1 can be used as the RTC_AF3 pin + + ##### Backup Domain Reset ##### + ================================================================== + [..] The backup domain reset sets all RTC registers and the RCC_BDCR register + to their reset values. The BKPSRAM is not affected by this reset. The only + way to reset the BKPSRAM is through the Flash interface by requesting + a protection level change from 1 to 0. + [..] A backup domain reset is generated when one of the following events occurs: + (#) Software reset, triggered by setting the BDRST bit in the + RCC Backup domain control register (RCC_BDCR). + (#) VDD or VBAT power on, if both supplies have previously been powered off. + + ##### Backup Domain Access ##### + ================================================================== + [..] After reset, the backup domain (RTC registers, RTC backup data + registers and backup SRAM) is protected against possible unwanted write + accesses. + [..] To enable access to the RTC Domain and RTC registers, proceed as follows: + (+) Enable the Power Controller (PWR) APB1 interface clock using the + __HAL_RCC_PWR_CLK_ENABLE() function. + (+) Enable access to RTC domain using the HAL_PWR_EnableBkUpAccess() function. + (+) Select the RTC clock source using the __HAL_RCC_RTC_CONFIG() function. + (+) Enable RTC Clock using the __HAL_RCC_RTC_ENABLE() function. + + + ##### How to use this driver ##### + ================================================================== + [..] + (+) Enable the RTC domain access (see description in the section above). + (+) Configure the RTC Prescaler (Asynchronous and Synchronous) and RTC hour + format using the HAL_RTC_Init() function. + + *** Time and Date configuration *** + =================================== + [..] + (+) To configure the RTC Calendar (Time and Date) use the HAL_RTC_SetTime() + and HAL_RTC_SetDate() functions. + (+) To read the RTC Calendar, use the HAL_RTC_GetTime() and HAL_RTC_GetDate() functions. + + *** Alarm configuration *** + =========================== + [..] + (+) To configure the RTC Alarm use the HAL_RTC_SetAlarm() function. + You can also configure the RTC Alarm with interrupt mode using the HAL_RTC_SetAlarm_IT() function. + (+) To read the RTC Alarm, use the HAL_RTC_GetAlarm() function. + + ##### RTC and low power modes ##### + ================================================================== + [..] The MCU can be woken up from a low power mode by an RTC alternate + function. + [..] The RTC alternate functions are the RTC alarms (Alarm A and Alarm B), + RTC wake-up, RTC tamper event detection and RTC time stamp event detection. + These RTC alternate functions can wake up the system from the Stop and + Standby low power modes. + [..] The system can also wake up from low power modes without depending + on an external interrupt (Auto-wake-up mode), by using the RTC alarm + or the RTC wake-up events. + [..] The RTC provides a programmable time base for waking up from the + Stop or Standby mode at regular intervals. + Wake-up from STOP and STANDBY modes is possible only when the RTC clock source + is LSE or LSI. + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_RTC_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function @ref HAL_RTC_RegisterCallback() to register an interrupt callback. + + Function @ref HAL_RTC_RegisterCallback() allows to register following callbacks: + (+) AlarmAEventCallback : RTC Alarm A Event callback. + (+) AlarmBEventCallback : RTC Alarm B Event callback. + (+) TimeStampEventCallback : RTC TimeStamp Event callback. + (+) WakeUpTimerEventCallback : RTC WakeUpTimer Event callback. + (+) Tamper1EventCallback : RTC Tamper 1 Event callback. + (+) Tamper2EventCallback : RTC Tamper 2 Event callback. + (+) Tamper3EventCallback : RTC Tamper 3 Event callback. + (+) MspInitCallback : RTC MspInit callback. + (+) MspDeInitCallback : RTC MspDeInit callback. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_RTC_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_RTC_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) AlarmAEventCallback : RTC Alarm A Event callback. + (+) AlarmBEventCallback : RTC Alarm B Event callback. + (+) TimeStampEventCallback : RTC TimeStamp Event callback. + (+) WakeUpTimerEventCallback : RTC WakeUpTimer Event callback. + (+) Tamper1EventCallback : RTC Tamper 1 Event callback. + (+) Tamper2EventCallback : RTC Tamper 2 Event callback. + (+) Tamper3EventCallback : RTC Tamper 3 Event callback. + (+) MspInitCallback : RTC MspInit callback. + (+) MspDeInitCallback : RTC MspDeInit callback. + + By default, after the @ref HAL_RTC_Init() and when the state is HAL_RTC_STATE_RESET, + all callbacks are set to the corresponding weak functions : + examples @ref AlarmAEventCallback(), @ref WakeUpTimerEventCallback(). + Exception done for MspInit and MspDeInit callbacks that are reset to the legacy weak function + in the @ref HAL_RTC_Init()/@ref HAL_RTC_DeInit() only when these callbacks are null + (not registered beforehand). + If not, MspInit or MspDeInit are not null, @ref HAL_RTC_Init()/@ref HAL_RTC_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_RTC_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_RTC_STATE_READY or HAL_RTC_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_RTC_RegisterCallback() before calling @ref HAL_RTC_DeInit() + or @ref HAL_RTC_Init() function. + + When The compilation define USE_HAL_RTC_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + @endverbatim + + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RTC RTC + * @brief RTC HAL module driver + * @{ + */ + +#ifdef HAL_RTC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup RTC_Exported_Functions RTC Exported Functions + * @{ + */ + +/** @defgroup RTC_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to initialize and configure the + RTC Prescaler (Synchronous and Asynchronous), RTC Hour format, disable + RTC registers Write protection, enter and exit the RTC initialization mode, + RTC registers synchronization check and reference clock detection enable. + (#) The RTC Prescaler is programmed to generate the RTC 1Hz time base. + It is split into 2 programmable prescalers to minimize power consumption. + (++) A 7-bit asynchronous prescaler and a 13-bit synchronous prescaler. + (++) When both prescalers are used, it is recommended to configure the + asynchronous prescaler to a high value to minimize power consumption. + (#) All RTC registers are Write protected. Writing to the RTC registers + is enabled by writing a key into the Write Protection register, RTC_WPR. + (#) To configure the RTC Calendar, user application should enter + initialization mode. In this mode, the calendar counter is stopped + and its value can be updated. When the initialization sequence is + complete, the calendar restarts counting after 4 RTCCLK cycles. + (#) To read the calendar through the shadow registers after Calendar + initialization, calendar update or after wake-up from low power modes + the software must first clear the RSF flag. The software must then + wait until it is set again before reading the calendar, which means + that the calendar registers have been correctly copied into the + RTC_TR and RTC_DR shadow registers.The HAL_RTC_WaitForSynchro() function + implements the above software sequence (RSF clear and RSF check). + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the RTC peripheral + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_Init(RTC_HandleTypeDef *hrtc) +{ + /* Check the RTC peripheral state */ + if(hrtc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RTC_ALL_INSTANCE(hrtc->Instance)); + assert_param(IS_RTC_HOUR_FORMAT(hrtc->Init.HourFormat)); + assert_param(IS_RTC_ASYNCH_PREDIV(hrtc->Init.AsynchPrediv)); + assert_param(IS_RTC_SYNCH_PREDIV(hrtc->Init.SynchPrediv)); + assert_param (IS_RTC_OUTPUT(hrtc->Init.OutPut)); + assert_param (IS_RTC_OUTPUT_POL(hrtc->Init.OutPutPolarity)); + assert_param(IS_RTC_OUTPUT_TYPE(hrtc->Init.OutPutType)); + +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + if(hrtc->State == HAL_RTC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hrtc->Lock = HAL_UNLOCKED; + + hrtc->AlarmAEventCallback = HAL_RTC_AlarmAEventCallback; /* Legacy weak AlarmAEventCallback */ + hrtc->AlarmBEventCallback = HAL_RTCEx_AlarmBEventCallback; /* Legacy weak AlarmBEventCallback */ + hrtc->TimeStampEventCallback = HAL_RTCEx_TimeStampEventCallback; /* Legacy weak TimeStampEventCallback */ + hrtc->WakeUpTimerEventCallback = HAL_RTCEx_WakeUpTimerEventCallback; /* Legacy weak WakeUpTimerEventCallback */ + hrtc->Tamper1EventCallback = HAL_RTCEx_Tamper1EventCallback; /* Legacy weak Tamper1EventCallback */ + hrtc->Tamper2EventCallback = HAL_RTCEx_Tamper2EventCallback; /* Legacy weak Tamper2EventCallback */ + hrtc->Tamper3EventCallback = HAL_RTCEx_Tamper3EventCallback; /* Legacy weak Tamper3EventCallback */ + + if(hrtc->MspInitCallback == NULL) + { + hrtc->MspInitCallback = HAL_RTC_MspInit; + } + /* Init the low level hardware */ + hrtc->MspInitCallback(hrtc); + + if(hrtc->MspDeInitCallback == NULL) + { + hrtc->MspDeInitCallback = HAL_RTC_MspDeInit; + } + } +#else + if(hrtc->State == HAL_RTC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hrtc->Lock = HAL_UNLOCKED; + + /* Initialize RTC MSP */ + HAL_RTC_MspInit(hrtc); + } +#endif /* (USE_HAL_RTC_REGISTER_CALLBACKS) */ + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_ERROR; + + return HAL_ERROR; + } + else + { + /* Clear RTC_CR FMT, OSEL and POL Bits */ + hrtc->Instance->CR &= ((uint32_t)~(RTC_CR_FMT | RTC_CR_OSEL | RTC_CR_POL)); + /* Set RTC_CR register */ + hrtc->Instance->CR |= (uint32_t)(hrtc->Init.HourFormat | hrtc->Init.OutPut | hrtc->Init.OutPutPolarity); + + /* Configure the RTC PRER */ + hrtc->Instance->PRER = (uint32_t)(hrtc->Init.SynchPrediv); + hrtc->Instance->PRER |= (uint32_t)(hrtc->Init.AsynchPrediv << 16); + + /* Exit Initialization mode */ + hrtc->Instance->ISR &= (uint32_t)~RTC_ISR_INIT; + + hrtc->Instance->OR &= (uint32_t)~RTC_OR_ALARMTYPE; + hrtc->Instance->OR |= (uint32_t)(hrtc->Init.OutPutType); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; + } +} + +/** + * @brief DeInitializes the RTC peripheral + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @note This function doesn't reset the RTC Backup Data registers. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_DeInit(RTC_HandleTypeDef *hrtc) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_RTC_ALL_INSTANCE(hrtc->Instance)); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_ERROR; + + return HAL_ERROR; + } + else + { + /* Reset TR, DR and CR registers */ + hrtc->Instance->TR = (uint32_t)0x00000000; + hrtc->Instance->DR = (uint32_t)0x00002101; + /* Reset All CR bits except CR[2:0] */ + hrtc->Instance->CR &= (uint32_t)0x00000007; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till WUTWF flag is set and if Time out is reached exit */ + while(((hrtc->Instance->ISR) & RTC_ISR_WUTWF) == (uint32_t)RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Reset all RTC CR register bits */ + hrtc->Instance->CR &= (uint32_t)0x00000000; + hrtc->Instance->WUTR = (uint32_t)0x0000FFFF; + hrtc->Instance->PRER = (uint32_t)0x007F00FF; + hrtc->Instance->ALRMAR = (uint32_t)0x00000000; + hrtc->Instance->ALRMBR = (uint32_t)0x00000000; + hrtc->Instance->SHIFTR = (uint32_t)0x00000000; + hrtc->Instance->CALR = (uint32_t)0x00000000; + hrtc->Instance->ALRMASSR = (uint32_t)0x00000000; + hrtc->Instance->ALRMBSSR = (uint32_t)0x00000000; + + /* Reset ISR register and exit initialization mode */ + hrtc->Instance->ISR = (uint32_t)0x00000000; + + /* Reset Tamper and alternate functions configuration register */ + hrtc->Instance->TAMPCR = 0x00000000; + + /* Reset Option register */ + hrtc->Instance->OR = 0x00000000; + + /* If RTC_CR_BYPSHAD bit = 0, wait for synchro else this check is not needed */ + if((hrtc->Instance->CR & RTC_CR_BYPSHAD) == RESET) + { + if(HAL_RTC_WaitForSynchro(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_ERROR; + + return HAL_ERROR; + } + } + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + if(hrtc->MspDeInitCallback == NULL) + { + hrtc->MspDeInitCallback = HAL_RTC_MspDeInit; + } + + /* DeInit the low level hardware: CLOCK, NVIC.*/ + hrtc->MspDeInitCallback(hrtc); + +#else + /* De-Initialize RTC MSP */ + HAL_RTC_MspDeInit(hrtc); +#endif /* (USE_HAL_RTC_REGISTER_CALLBACKS) */ + + hrtc->State = HAL_RTC_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User RTC Callback + * To be used instead of the weak predefined callback + * @param hrtc RTC handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_RTC_ALARM_A_EVENT_CB_ID Alarm A Event Callback ID + * @arg @ref HAL_RTC_ALARM_B_EVENT_CB_ID Alarm B Event Callback ID + * @arg @ref HAL_RTC_TIMESTAMP_EVENT_CB_ID TimeStamp Event Callback ID + * @arg @ref HAL_RTC_WAKEUPTIMER_EVENT_CB_ID Wake-Up Timer Event Callback ID + * @arg @ref HAL_RTC_TAMPER1_EVENT_CB_ID Tamper 1 Callback ID + * @arg @ref HAL_RTC_TAMPER2_EVENT_CB_ID Tamper 2 Callback ID + * @arg @ref HAL_RTC_TAMPER3_EVENT_CB_ID Tamper 3 Callback ID + * @arg @ref HAL_RTC_MSPINIT_CB_ID Msp Init callback ID + * @arg @ref HAL_RTC_MSPDEINIT_CB_ID Msp DeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_RegisterCallback(RTC_HandleTypeDef *hrtc, HAL_RTC_CallbackIDTypeDef CallbackID, pRTC_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hrtc); + + if(HAL_RTC_STATE_READY == hrtc->State) + { + switch (CallbackID) + { + case HAL_RTC_ALARM_A_EVENT_CB_ID : + hrtc->AlarmAEventCallback = pCallback; + break; + + case HAL_RTC_ALARM_B_EVENT_CB_ID : + hrtc->AlarmBEventCallback = pCallback; + break; + + case HAL_RTC_TIMESTAMP_EVENT_CB_ID : + hrtc->TimeStampEventCallback = pCallback; + break; + + case HAL_RTC_WAKEUPTIMER_EVENT_CB_ID : + hrtc->WakeUpTimerEventCallback = pCallback; + break; + + case HAL_RTC_TAMPER1_EVENT_CB_ID : + hrtc->Tamper1EventCallback = pCallback; + break; + + case HAL_RTC_TAMPER2_EVENT_CB_ID : + hrtc->Tamper2EventCallback = pCallback; + break; + + case HAL_RTC_TAMPER3_EVENT_CB_ID : + hrtc->Tamper3EventCallback = pCallback; + break; + + case HAL_RTC_MSPINIT_CB_ID : + hrtc->MspInitCallback = pCallback; + break; + + case HAL_RTC_MSPDEINIT_CB_ID : + hrtc->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(HAL_RTC_STATE_RESET == hrtc->State) + { + switch (CallbackID) + { + case HAL_RTC_MSPINIT_CB_ID : + hrtc->MspInitCallback = pCallback; + break; + + case HAL_RTC_MSPDEINIT_CB_ID : + hrtc->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrtc); + + return status; +} + +/** + * @brief Unregister an RTC Callback + * RTC callabck is redirected to the weak predefined callback + * @param hrtc RTC handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_RTC_ALARM_A_EVENT_CB_ID Alarm A Event Callback ID + * @arg @ref HAL_RTC_ALARM_B_EVENT_CB_ID Alarm B Event Callback ID + * @arg @ref HAL_RTC_TIMESTAMP_EVENT_CB_ID TimeStamp Event Callback ID + * @arg @ref HAL_RTC_WAKEUPTIMER_EVENT_CB_ID Wake-Up Timer Event Callback ID + * @arg @ref HAL_RTC_TAMPER1_EVENT_CB_ID Tamper 1 Callback ID + * @arg @ref HAL_RTC_TAMPER2_EVENT_CB_ID Tamper 2 Callback ID + * @arg @ref HAL_RTC_TAMPER3_EVENT_CB_ID Tamper 3 Callback ID + * @arg @ref HAL_RTC_MSPINIT_CB_ID Msp Init callback ID + * @arg @ref HAL_RTC_MSPDEINIT_CB_ID Msp DeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_UnRegisterCallback(RTC_HandleTypeDef *hrtc, HAL_RTC_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hrtc); + + if(HAL_RTC_STATE_READY == hrtc->State) + { + switch (CallbackID) + { + case HAL_RTC_ALARM_A_EVENT_CB_ID : + hrtc->AlarmAEventCallback = HAL_RTC_AlarmAEventCallback; /* Legacy weak AlarmAEventCallback */ + break; + + case HAL_RTC_ALARM_B_EVENT_CB_ID : + hrtc->AlarmBEventCallback = HAL_RTCEx_AlarmBEventCallback; /* Legacy weak AlarmBEventCallback */ + break; + + case HAL_RTC_TIMESTAMP_EVENT_CB_ID : + hrtc->TimeStampEventCallback = HAL_RTCEx_TimeStampEventCallback; /* Legacy weak TimeStampEventCallback */ + break; + + case HAL_RTC_WAKEUPTIMER_EVENT_CB_ID : + hrtc->WakeUpTimerEventCallback = HAL_RTCEx_WakeUpTimerEventCallback; /* Legacy weak WakeUpTimerEventCallback */ + break; + + case HAL_RTC_TAMPER1_EVENT_CB_ID : + hrtc->Tamper1EventCallback = HAL_RTCEx_Tamper1EventCallback; /* Legacy weak Tamper1EventCallback */ + break; + + case HAL_RTC_TAMPER2_EVENT_CB_ID : + hrtc->Tamper2EventCallback = HAL_RTCEx_Tamper2EventCallback; /* Legacy weak Tamper2EventCallback */ + break; + + case HAL_RTC_TAMPER3_EVENT_CB_ID : + hrtc->Tamper3EventCallback = HAL_RTCEx_Tamper3EventCallback; /* Legacy weak Tamper3EventCallback */ + break; + + case HAL_RTC_MSPINIT_CB_ID : + hrtc->MspInitCallback = HAL_RTC_MspInit; + break; + + case HAL_RTC_MSPDEINIT_CB_ID : + hrtc->MspDeInitCallback = HAL_RTC_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(HAL_RTC_STATE_RESET == hrtc->State) + { + switch (CallbackID) + { + case HAL_RTC_MSPINIT_CB_ID : + hrtc->MspInitCallback = HAL_RTC_MspInit; + break; + + case HAL_RTC_MSPDEINIT_CB_ID : + hrtc->MspDeInitCallback = HAL_RTC_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hrtc); + + return status; +} +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + +/** + * @brief Initializes the RTC MSP. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTC_MspInit(RTC_HandleTypeDef* hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the RTC MSP. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTC_MspDeInit(RTC_HandleTypeDef* hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_MspDeInit could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup RTC_Group2 RTC Time and Date functions + * @brief RTC Time and Date functions + * +@verbatim + =============================================================================== + ##### RTC Time and Date functions ##### + =============================================================================== + + [..] This section provides functions allowing to configure Time and Date features + +@endverbatim + * @{ + */ + +/** + * @brief Sets RTC current time. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sTime Pointer to Time structure + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg FORMAT_BIN: Binary data format + * @arg FORMAT_BCD: BCD data format + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_SetTime(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef *sTime, uint32_t Format) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + assert_param(IS_RTC_DAYLIGHT_SAVING(sTime->DayLightSaving)); + assert_param(IS_RTC_STORE_OPERATION(sTime->StoreOperation)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + if(Format == RTC_FORMAT_BIN) + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(sTime->Hours)); + assert_param(IS_RTC_HOURFORMAT12(sTime->TimeFormat)); + } + else + { + sTime->TimeFormat = 0x00; + assert_param(IS_RTC_HOUR24(sTime->Hours)); + } + assert_param(IS_RTC_MINUTES(sTime->Minutes)); + assert_param(IS_RTC_SECONDS(sTime->Seconds)); + + tmpreg = (uint32_t)(((uint32_t)RTC_ByteToBcd2(sTime->Hours) << 16) | \ + ((uint32_t)RTC_ByteToBcd2(sTime->Minutes) << 8) | \ + ((uint32_t)RTC_ByteToBcd2(sTime->Seconds)) | \ + (((uint32_t)sTime->TimeFormat) << 16)); + } + else + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(RTC_Bcd2ToByte(sTime->Hours))); + assert_param(IS_RTC_HOURFORMAT12(sTime->TimeFormat)); + } + else + { + sTime->TimeFormat = 0x00; + assert_param(IS_RTC_HOUR24(RTC_Bcd2ToByte(sTime->Hours))); + } + assert_param(IS_RTC_MINUTES(RTC_Bcd2ToByte(sTime->Minutes))); + assert_param(IS_RTC_SECONDS(RTC_Bcd2ToByte(sTime->Seconds))); + tmpreg = (((uint32_t)(sTime->Hours) << 16) | \ + ((uint32_t)(sTime->Minutes) << 8) | \ + ((uint32_t)sTime->Seconds) | \ + ((uint32_t)(sTime->TimeFormat) << 16)); + } + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state */ + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + else + { + /* Set the RTC_TR register */ + hrtc->Instance->TR = (uint32_t)(tmpreg & RTC_TR_RESERVED_MASK); + + /* Clear the bits to be configured */ + hrtc->Instance->CR &= (uint32_t)~RTC_CR_BKP; + + /* Configure the RTC_CR register */ + hrtc->Instance->CR |= (uint32_t)(sTime->DayLightSaving | sTime->StoreOperation); + + /* Exit Initialization mode */ + hrtc->Instance->ISR &= (uint32_t)~RTC_ISR_INIT; + + /* If CR_BYPSHAD bit = 0, wait for synchro else this check is not needed */ + if((hrtc->Instance->CR & RTC_CR_BYPSHAD) == RESET) + { + if(HAL_RTC_WaitForSynchro(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + __HAL_UNLOCK(hrtc); + + return HAL_OK; + } +} + +/** + * @brief Gets RTC current time. + * @param hrtc RTC handle + * @param sTime Pointer to Time structure with Hours, Minutes and Seconds fields returned + * with input format (BIN or BCD), also SubSeconds field returning the + * RTC_SSR register content and SecondFraction field the Synchronous pre-scaler + * factor to be used for second fraction ratio computation. + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg RTC_FORMAT_BIN: Binary data format + * @arg RTC_FORMAT_BCD: BCD data format + * @note You can use SubSeconds and SecondFraction (sTime structure fields returned) to convert SubSeconds + * value in second fraction ratio with time unit following generic formula: + * Second fraction ratio * time_unit= [(SecondFraction-SubSeconds)/(SecondFraction+1)] * time_unit + * This conversion can be performed only if no shift operation is pending (ie. SHFP=0) when PREDIV_S >= SS + * @note You must call HAL_RTC_GetDate() after HAL_RTC_GetTime() to unlock the values + * in the higher-order calendar shadow registers to ensure consistency between the time and date values. + * Reading RTC current time locks the values in calendar shadow registers until Current date is read + * to ensure consistency between the time and date values. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_GetTime(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef *sTime, uint32_t Format) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + + /* Get subseconds values from the correspondent registers*/ + sTime->SubSeconds = (uint32_t)(hrtc->Instance->SSR); + + /* Get SecondFraction structure field from the corresponding register field*/ + sTime->SecondFraction = (uint32_t)(hrtc->Instance->PRER & RTC_PRER_PREDIV_S); + + /* Get the TR register */ + tmpreg = (uint32_t)(hrtc->Instance->TR & RTC_TR_RESERVED_MASK); + + /* Fill the structure fields with the read parameters */ + sTime->Hours = (uint8_t)((tmpreg & (RTC_TR_HT | RTC_TR_HU)) >> 16); + sTime->Minutes = (uint8_t)((tmpreg & (RTC_TR_MNT | RTC_TR_MNU)) >>8); + sTime->Seconds = (uint8_t)(tmpreg & (RTC_TR_ST | RTC_TR_SU)); + sTime->TimeFormat = (uint8_t)((tmpreg & (RTC_TR_PM)) >> 16); + + /* Check the input parameters format */ + if(Format == RTC_FORMAT_BIN) + { + /* Convert the time structure parameters to Binary format */ + sTime->Hours = (uint8_t)RTC_Bcd2ToByte(sTime->Hours); + sTime->Minutes = (uint8_t)RTC_Bcd2ToByte(sTime->Minutes); + sTime->Seconds = (uint8_t)RTC_Bcd2ToByte(sTime->Seconds); + } + + return HAL_OK; +} + +/** + * @brief Sets RTC current date. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sDate Pointer to date structure + * @param Format specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg RTC_FORMAT_BIN: Binary data format + * @arg RTC_FORMAT_BCD: BCD data format + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_SetDate(RTC_HandleTypeDef *hrtc, RTC_DateTypeDef *sDate, uint32_t Format) +{ + uint32_t datetmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + if((Format == RTC_FORMAT_BIN) && ((sDate->Month & 0x10U) == 0x10U)) + { + sDate->Month = (uint8_t)((sDate->Month & (uint8_t)~(0x10U)) + (uint8_t)0x0AU); + } + + assert_param(IS_RTC_WEEKDAY(sDate->WeekDay)); + + if(Format == RTC_FORMAT_BIN) + { + assert_param(IS_RTC_YEAR(sDate->Year)); + assert_param(IS_RTC_MONTH(sDate->Month)); + assert_param(IS_RTC_DATE(sDate->Date)); + + datetmpreg = (((uint32_t)RTC_ByteToBcd2(sDate->Year) << 16) | \ + ((uint32_t)RTC_ByteToBcd2(sDate->Month) << 8) | \ + ((uint32_t)RTC_ByteToBcd2(sDate->Date)) | \ + ((uint32_t)sDate->WeekDay << 13)); + } + else + { + assert_param(IS_RTC_YEAR(RTC_Bcd2ToByte(sDate->Year))); + assert_param(IS_RTC_MONTH(RTC_Bcd2ToByte(sDate->Month))); + assert_param(IS_RTC_DATE(RTC_Bcd2ToByte(sDate->Date))); + + datetmpreg = ((((uint32_t)sDate->Year) << 16) | \ + (((uint32_t)sDate->Month) << 8) | \ + ((uint32_t)sDate->Date) | \ + (((uint32_t)sDate->WeekDay) << 13)); + } + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state*/ + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + else + { + /* Set the RTC_DR register */ + hrtc->Instance->DR = (uint32_t)(datetmpreg & RTC_DR_RESERVED_MASK); + + /* Exit Initialization mode */ + hrtc->Instance->ISR &= (uint32_t)~RTC_ISR_INIT; + + /* If CR_BYPSHAD bit = 0, wait for synchro else this check is not needed */ + if((hrtc->Instance->CR & RTC_CR_BYPSHAD) == RESET) + { + if(HAL_RTC_WaitForSynchro(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY ; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; + } +} + +/** + * @brief Gets RTC current date. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sDate Pointer to Date structure + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg RTC_FORMAT_BIN: Binary data format + * @arg RTC_FORMAT_BCD: BCD data format + * @note You must call HAL_RTC_GetDate() after HAL_RTC_GetTime() to unlock the values + * in the higher-order calendar shadow registers to ensure consistency between the time and date values. + * Reading RTC current time locks the values in calendar shadow registers until Current date is read. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_GetDate(RTC_HandleTypeDef *hrtc, RTC_DateTypeDef *sDate, uint32_t Format) +{ + uint32_t datetmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + + /* Get the DR register */ + datetmpreg = (uint32_t)(hrtc->Instance->DR & RTC_DR_RESERVED_MASK); + + /* Fill the structure fields with the read parameters */ + sDate->Year = (uint8_t)((datetmpreg & (RTC_DR_YT | RTC_DR_YU)) >> 16); + sDate->Month = (uint8_t)((datetmpreg & (RTC_DR_MT | RTC_DR_MU)) >> 8); + sDate->Date = (uint8_t)(datetmpreg & (RTC_DR_DT | RTC_DR_DU)); + sDate->WeekDay = (uint8_t)((datetmpreg & (RTC_DR_WDU)) >> 13); + + /* Check the input parameters format */ + if(Format == RTC_FORMAT_BIN) + { + /* Convert the date structure parameters to Binary format */ + sDate->Year = (uint8_t)RTC_Bcd2ToByte(sDate->Year); + sDate->Month = (uint8_t)RTC_Bcd2ToByte(sDate->Month); + sDate->Date = (uint8_t)RTC_Bcd2ToByte(sDate->Date); + } + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RTC_Group3 RTC Alarm functions + * @brief RTC Alarm functions + * +@verbatim + =============================================================================== + ##### RTC Alarm functions ##### + =============================================================================== + + [..] This section provides functions allowing to configure Alarm feature + +@endverbatim + * @{ + */ +/** + * @brief Sets the specified RTC Alarm. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sAlarm Pointer to Alarm structure + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg FORMAT_BIN: Binary data format + * @arg FORMAT_BCD: BCD data format + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_SetAlarm(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Format) +{ + uint32_t tickstart = 0; + uint32_t tmpreg = 0, subsecondtmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + assert_param(IS_RTC_ALARM(sAlarm->Alarm)); + assert_param(IS_RTC_ALARM_MASK(sAlarm->AlarmMask)); + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_SEL(sAlarm->AlarmDateWeekDaySel)); + assert_param(IS_RTC_ALARM_SUB_SECOND_VALUE(sAlarm->AlarmTime.SubSeconds)); + assert_param(IS_RTC_ALARM_SUB_SECOND_MASK(sAlarm->AlarmSubSecondMask)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + if(Format == RTC_FORMAT_BIN) + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(sAlarm->AlarmTime.Hours)); + assert_param(IS_RTC_HOURFORMAT12(sAlarm->AlarmTime.TimeFormat)); + } + else + { + sAlarm->AlarmTime.TimeFormat = 0x00; + assert_param(IS_RTC_HOUR24(sAlarm->AlarmTime.Hours)); + } + assert_param(IS_RTC_MINUTES(sAlarm->AlarmTime.Minutes)); + assert_param(IS_RTC_SECONDS(sAlarm->AlarmTime.Seconds)); + + if(sAlarm->AlarmDateWeekDaySel == RTC_ALARMDATEWEEKDAYSEL_DATE) + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_DATE(sAlarm->AlarmDateWeekDay)); + } + else + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_WEEKDAY(sAlarm->AlarmDateWeekDay)); + } + + tmpreg = (((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Hours) << 16) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Minutes) << 8) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Seconds)) | \ + ((uint32_t)(sAlarm->AlarmTime.TimeFormat) << 16) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmDateWeekDay) << 24) | \ + ((uint32_t)sAlarm->AlarmDateWeekDaySel) | \ + ((uint32_t)sAlarm->AlarmMask)); + } + else + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(RTC_Bcd2ToByte(sAlarm->AlarmTime.Hours))); + assert_param(IS_RTC_HOURFORMAT12(sAlarm->AlarmTime.TimeFormat)); + } + else + { + sAlarm->AlarmTime.TimeFormat = 0x00; + assert_param(IS_RTC_HOUR24(RTC_Bcd2ToByte(sAlarm->AlarmTime.Hours))); + } + + assert_param(IS_RTC_MINUTES(RTC_Bcd2ToByte(sAlarm->AlarmTime.Minutes))); + assert_param(IS_RTC_SECONDS(RTC_Bcd2ToByte(sAlarm->AlarmTime.Seconds))); + + if(sAlarm->AlarmDateWeekDaySel == RTC_ALARMDATEWEEKDAYSEL_DATE) + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_DATE(RTC_Bcd2ToByte(sAlarm->AlarmDateWeekDay))); + } + else + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_WEEKDAY(RTC_Bcd2ToByte(sAlarm->AlarmDateWeekDay))); + } + + tmpreg = (((uint32_t)(sAlarm->AlarmTime.Hours) << 16) | \ + ((uint32_t)(sAlarm->AlarmTime.Minutes) << 8) | \ + ((uint32_t) sAlarm->AlarmTime.Seconds) | \ + ((uint32_t)(sAlarm->AlarmTime.TimeFormat) << 16) | \ + ((uint32_t)(sAlarm->AlarmDateWeekDay) << 24) | \ + ((uint32_t)sAlarm->AlarmDateWeekDaySel) | \ + ((uint32_t)sAlarm->AlarmMask)); + } + + /* Configure the Alarm A or Alarm B Sub Second registers */ + subsecondtmpreg = (uint32_t)((uint32_t)(sAlarm->AlarmTime.SubSeconds) | (uint32_t)(sAlarm->AlarmSubSecondMask)); + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Configure the Alarm register */ + if(sAlarm->Alarm == RTC_ALARM_A) + { + /* Disable the Alarm A interrupt */ + __HAL_RTC_ALARMA_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_ALARM_DISABLE_IT(hrtc, RTC_IT_ALRA); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC ALRAWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + + hrtc->Instance->ALRMAR = (uint32_t)tmpreg; + /* Configure the Alarm A Sub Second register */ + hrtc->Instance->ALRMASSR = subsecondtmpreg; + /* Configure the Alarm state: Enable Alarm */ + __HAL_RTC_ALARMA_ENABLE(hrtc); + } + else + { + /* Disable the Alarm B interrupt */ + __HAL_RTC_ALARMB_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_ALARM_DISABLE_IT(hrtc, RTC_IT_ALRB); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC ALRBWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + + hrtc->Instance->ALRMBR = (uint32_t)tmpreg; + /* Configure the Alarm B Sub Second register */ + hrtc->Instance->ALRMBSSR = subsecondtmpreg; + /* Configure the Alarm state: Enable Alarm */ + __HAL_RTC_ALARMB_ENABLE(hrtc); + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Sets the specified RTC Alarm with Interrupt + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sAlarm Pointer to Alarm structure + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg FORMAT_BIN: Binary data format + * @arg FORMAT_BCD: BCD data format + * @note The Alarm register can only be written when the corresponding Alarm + * is disabled (Use the HAL_RTC_DeactivateAlarm()). + * @note The HAL_RTC_SetTime() must be called before enabling the Alarm feature. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_SetAlarm_IT(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Format) +{ + uint32_t tmpreg = 0U, subsecondtmpreg = 0U; + __IO uint32_t count = RTC_TIMEOUT_VALUE * (SystemCoreClock / 32U / 1000U) ; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + assert_param(IS_RTC_ALARM(sAlarm->Alarm)); + assert_param(IS_RTC_ALARM_MASK(sAlarm->AlarmMask)); + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_SEL(sAlarm->AlarmDateWeekDaySel)); + assert_param(IS_RTC_ALARM_SUB_SECOND_VALUE(sAlarm->AlarmTime.SubSeconds)); + assert_param(IS_RTC_ALARM_SUB_SECOND_MASK(sAlarm->AlarmSubSecondMask)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + if(Format == RTC_FORMAT_BIN) + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(sAlarm->AlarmTime.Hours)); + assert_param(IS_RTC_HOURFORMAT12(sAlarm->AlarmTime.TimeFormat)); + } + else + { + sAlarm->AlarmTime.TimeFormat = 0x00U; + assert_param(IS_RTC_HOUR24(sAlarm->AlarmTime.Hours)); + } + assert_param(IS_RTC_MINUTES(sAlarm->AlarmTime.Minutes)); + assert_param(IS_RTC_SECONDS(sAlarm->AlarmTime.Seconds)); + + if(sAlarm->AlarmDateWeekDaySel == RTC_ALARMDATEWEEKDAYSEL_DATE) + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_DATE(sAlarm->AlarmDateWeekDay)); + } + else + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_WEEKDAY(sAlarm->AlarmDateWeekDay)); + } + tmpreg = (((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Hours) << 16U) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Minutes) << 8U) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmTime.Seconds)) | \ + ((uint32_t)(sAlarm->AlarmTime.TimeFormat) << 16U) | \ + ((uint32_t)RTC_ByteToBcd2(sAlarm->AlarmDateWeekDay) << 24U) | \ + ((uint32_t)sAlarm->AlarmDateWeekDaySel) | \ + ((uint32_t)sAlarm->AlarmMask)); + } + else + { + if((hrtc->Instance->CR & RTC_CR_FMT) != (uint32_t)RESET) + { + assert_param(IS_RTC_HOUR12(RTC_Bcd2ToByte(sAlarm->AlarmTime.Hours))); + assert_param(IS_RTC_HOURFORMAT12(sAlarm->AlarmTime.TimeFormat)); + } + else + { + sAlarm->AlarmTime.TimeFormat = 0x00U; + assert_param(IS_RTC_HOUR24(RTC_Bcd2ToByte(sAlarm->AlarmTime.Hours))); + } + + assert_param(IS_RTC_MINUTES(RTC_Bcd2ToByte(sAlarm->AlarmTime.Minutes))); + assert_param(IS_RTC_SECONDS(RTC_Bcd2ToByte(sAlarm->AlarmTime.Seconds))); + + if(sAlarm->AlarmDateWeekDaySel == RTC_ALARMDATEWEEKDAYSEL_DATE) + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_DATE(RTC_Bcd2ToByte(sAlarm->AlarmDateWeekDay))); + } + else + { + assert_param(IS_RTC_ALARM_DATE_WEEKDAY_WEEKDAY(RTC_Bcd2ToByte(sAlarm->AlarmDateWeekDay))); + } + tmpreg = (((uint32_t)(sAlarm->AlarmTime.Hours) << 16U) | \ + ((uint32_t)(sAlarm->AlarmTime.Minutes) << 8U) | \ + ((uint32_t) sAlarm->AlarmTime.Seconds) | \ + ((uint32_t)(sAlarm->AlarmTime.TimeFormat) << 16U) | \ + ((uint32_t)(sAlarm->AlarmDateWeekDay) << 24U) | \ + ((uint32_t)sAlarm->AlarmDateWeekDaySel) | \ + ((uint32_t)sAlarm->AlarmMask)); + } + /* Configure the Alarm A or Alarm B Sub Second registers */ + subsecondtmpreg = (uint32_t)((uint32_t)(sAlarm->AlarmTime.SubSeconds) | (uint32_t)(sAlarm->AlarmSubSecondMask)); + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Configure the Alarm register */ + if(sAlarm->Alarm == RTC_ALARM_A) + { + /* Disable the Alarm A interrupt */ + __HAL_RTC_ALARMA_DISABLE(hrtc); + + /* Clear flag alarm A */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRAF); + + /* Wait till RTC ALRAWF flag is set and if Time out is reached exit */ + do + { + if (count-- == 0U) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + while (__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAWF) == RESET); + + hrtc->Instance->ALRMAR = (uint32_t)tmpreg; + /* Configure the Alarm A Sub Second register */ + hrtc->Instance->ALRMASSR = subsecondtmpreg; + /* Configure the Alarm state: Enable Alarm */ + __HAL_RTC_ALARMA_ENABLE(hrtc); + /* Configure the Alarm interrupt */ + __HAL_RTC_ALARM_ENABLE_IT(hrtc,RTC_IT_ALRA); + } + else + { + /* Disable the Alarm B interrupt */ + __HAL_RTC_ALARMB_DISABLE(hrtc); + + /* Clear flag alarm B */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRBF); + + /* Wait till RTC ALRBWF flag is set and if Time out is reached exit */ + do + { + if (count-- == 0U) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + while (__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBWF) == RESET); + + hrtc->Instance->ALRMBR = (uint32_t)tmpreg; + /* Configure the Alarm B Sub Second register */ + hrtc->Instance->ALRMBSSR = subsecondtmpreg; + /* Configure the Alarm state: Enable Alarm */ + __HAL_RTC_ALARMB_ENABLE(hrtc); + /* Configure the Alarm interrupt */ + __HAL_RTC_ALARM_ENABLE_IT(hrtc, RTC_IT_ALRB); + } + + /* RTC Alarm Interrupt Configuration: EXTI configuration */ + __HAL_RTC_ALARM_EXTI_ENABLE_IT(); + + EXTI->RTSR |= RTC_EXTI_LINE_ALARM_EVENT; + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivate the specified RTC Alarm + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Alarm Specifies the Alarm. + * This parameter can be one of the following values: + * @arg RTC_ALARM_A: AlarmA + * @arg RTC_ALARM_B: AlarmB + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_DeactivateAlarm(RTC_HandleTypeDef *hrtc, uint32_t Alarm) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_RTC_ALARM(Alarm)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + if(Alarm == RTC_ALARM_A) + { + /* AlarmA */ + __HAL_RTC_ALARMA_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_ALARM_DISABLE_IT(hrtc, RTC_IT_ALRA); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC ALRxWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + } + else + { + /* AlarmB */ + __HAL_RTC_ALARMB_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_ALARM_DISABLE_IT(hrtc,RTC_IT_ALRB); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC ALRxWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + } + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Gets the RTC Alarm value and masks. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sAlarm Pointer to Date structure + * @param Alarm Specifies the Alarm. + * This parameter can be one of the following values: + * @arg RTC_ALARM_A: AlarmA + * @arg RTC_ALARM_B: AlarmB + * @param Format Specifies the format of the entered parameters. + * This parameter can be one of the following values: + * @arg RTC_FORMAT_BIN: Binary data format + * @arg RTC_FORMAT_BCD: BCD data format + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_GetAlarm(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sAlarm, uint32_t Alarm, uint32_t Format) +{ + uint32_t tmpreg = 0, subsecondtmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + assert_param(IS_RTC_ALARM(Alarm)); + + if(Alarm == RTC_ALARM_A) + { + /* AlarmA */ + sAlarm->Alarm = RTC_ALARM_A; + + tmpreg = (uint32_t)(hrtc->Instance->ALRMAR); + subsecondtmpreg = (uint32_t)((hrtc->Instance->ALRMASSR ) & RTC_ALRMASSR_SS); + } + else + { + sAlarm->Alarm = RTC_ALARM_B; + + tmpreg = (uint32_t)(hrtc->Instance->ALRMBR); + subsecondtmpreg = (uint32_t)((hrtc->Instance->ALRMBSSR) & RTC_ALRMBSSR_SS); + } + + /* Fill the structure with the read parameters */ + sAlarm->AlarmTime.Hours = (uint32_t)((tmpreg & (RTC_ALRMAR_HT | RTC_ALRMAR_HU)) >> 16); + sAlarm->AlarmTime.Minutes = (uint32_t)((tmpreg & (RTC_ALRMAR_MNT | RTC_ALRMAR_MNU)) >> 8); + sAlarm->AlarmTime.Seconds = (uint32_t)(tmpreg & (RTC_ALRMAR_ST | RTC_ALRMAR_SU)); + sAlarm->AlarmTime.TimeFormat = (uint32_t)((tmpreg & RTC_ALRMAR_PM) >> 16); + sAlarm->AlarmTime.SubSeconds = (uint32_t) subsecondtmpreg; + sAlarm->AlarmDateWeekDay = (uint32_t)((tmpreg & (RTC_ALRMAR_DT | RTC_ALRMAR_DU)) >> 24); + sAlarm->AlarmDateWeekDaySel = (uint32_t)(tmpreg & RTC_ALRMAR_WDSEL); + sAlarm->AlarmMask = (uint32_t)(tmpreg & RTC_ALARMMASK_ALL); + + if(Format == RTC_FORMAT_BIN) + { + sAlarm->AlarmTime.Hours = RTC_Bcd2ToByte(sAlarm->AlarmTime.Hours); + sAlarm->AlarmTime.Minutes = RTC_Bcd2ToByte(sAlarm->AlarmTime.Minutes); + sAlarm->AlarmTime.Seconds = RTC_Bcd2ToByte(sAlarm->AlarmTime.Seconds); + sAlarm->AlarmDateWeekDay = RTC_Bcd2ToByte(sAlarm->AlarmDateWeekDay); + } + + return HAL_OK; +} + +/** + * @brief This function handles Alarm interrupt request. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +void HAL_RTC_AlarmIRQHandler(RTC_HandleTypeDef* hrtc) +{ + /* Get the AlarmA interrupt source enable status */ + if(__HAL_RTC_ALARM_GET_IT_SOURCE(hrtc, RTC_IT_ALRA) != (uint32_t)RESET) + { + /* Get the pending status of the AlarmA Interrupt */ + if(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAF) != (uint32_t)RESET) + { + /* AlarmA callback */ + #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->AlarmAEventCallback(hrtc); + #else + HAL_RTC_AlarmAEventCallback(hrtc); + #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the AlarmA interrupt pending bit */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc,RTC_FLAG_ALRAF); + } + } + + /* Get the AlarmB interrupt source enable status */ + if(__HAL_RTC_ALARM_GET_IT_SOURCE(hrtc, RTC_IT_ALRB) != (uint32_t)RESET) + { + /* Get the pending status of the AlarmB Interrupt */ + if(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBF) != (uint32_t)RESET) + { + /* AlarmB callback */ + #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->AlarmBEventCallback(hrtc); + #else + HAL_RTCEx_AlarmBEventCallback(hrtc); + #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the AlarmB interrupt pending bit */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc,RTC_FLAG_ALRBF); + } + } + + /* Clear the EXTI's line Flag for RTC Alarm */ + __HAL_RTC_ALARM_EXTI_CLEAR_FLAG(); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; +} + +/** + * @brief Alarm A callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTC_AlarmAEventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_RTC_AlarmAEventCallback could be implemented in the user file + */ +} + +/** + * @brief This function handles AlarmA Polling request. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_PollForAlarmAEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAF) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + /* Clear the Alarm interrupt pending bit */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRAF); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RTC_Group4 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Wait for RTC Time and Date Synchronization + +@endverbatim + * @{ + */ + +/** + * @brief Waits until the RTC Time and Date registers (RTC_TR and RTC_DR) are + * synchronized with RTC APB clock. + * @note The RTC Resynchronization mode is write protected, use the + * __HAL_RTC_WRITEPROTECTION_DISABLE() before calling this function. + * @note To read the calendar through the shadow registers after Calendar + * initialization, calendar update or after wake-up from low power modes + * the software must first clear the RSF flag. + * The software must then wait until it is set again before reading + * the calendar, which means that the calendar registers have been + * correctly copied into the RTC_TR and RTC_DR shadow registers. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTC_WaitForSynchro(RTC_HandleTypeDef* hrtc) +{ + uint32_t tickstart = 0; + + /* Clear RSF flag */ + hrtc->Instance->ISR &= (uint32_t)RTC_RSF_MASK; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait the registers to be synchronised */ + while((hrtc->Instance->ISR & RTC_ISR_RSF) == (uint32_t)RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RTC_Group5 Peripheral State functions + * @brief Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Get RTC state + +@endverbatim + * @{ + */ +/** + * @brief Returns the RTC state. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL state + */ +HAL_RTCStateTypeDef HAL_RTC_GetState(RTC_HandleTypeDef* hrtc) +{ + return hrtc->State; +} + +/** + * @} + */ + +/** + * @brief Enters the RTC Initialization mode. + * @note The RTC Initialization mode is write protected, use the + * __HAL_RTC_WRITEPROTECTION_DISABLE() before calling this function. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef RTC_EnterInitMode(RTC_HandleTypeDef* hrtc) +{ + uint32_t tickstart = 0; + + /* Check if the Initialization mode is set */ + if((hrtc->Instance->ISR & RTC_ISR_INITF) == (uint32_t)RESET) + { + /* Set the Initialization mode */ + hrtc->Instance->ISR = (uint32_t)RTC_INIT_MASK; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC is in INIT state and if Time out is reached exit */ + while((hrtc->Instance->ISR & RTC_ISR_INITF) == (uint32_t)RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + return HAL_OK; +} + + +/** + * @brief Converts a 2 digit decimal to BCD format. + * @param Value Byte to be converted + * @retval Converted byte + */ +uint8_t RTC_ByteToBcd2(uint8_t Value) +{ + uint32_t bcdhigh = 0; + + while(Value >= 10) + { + bcdhigh++; + Value -= 10; + } + + return ((uint8_t)(bcdhigh << 4) | Value); +} + +/** + * @brief Converts from 2 digit BCD to Binary. + * @param Value BCD value to be converted + * @retval Converted word + */ +uint8_t RTC_Bcd2ToByte(uint8_t Value) +{ + uint32_t tmp = 0; + tmp = ((uint8_t)(Value & (uint8_t)0xF0) >> (uint8_t)0x4) * 10; + return (tmp + (Value & (uint8_t)0x0F)); +} + +/** + * @} + */ + +#endif /* HAL_RTC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c new file mode 100644 index 000000000..93592802c --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rtc_ex.c @@ -0,0 +1,1875 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rtc_ex.c + * @author MCD Application Team + * @brief RTC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Real Time Clock (RTC) Extension peripheral: + * + RTC Time Stamp functions + * + RTC Tamper functions + * + RTC Wake-up functions + * + Extension Control functions + * + Extension RTC features functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (+) Enable the RTC domain access. + (+) Configure the RTC Prescaler (Asynchronous and Synchronous) and RTC hour + format using the HAL_RTC_Init() function. + + *** RTC Wakeup configuration *** + ================================ + [..] + (+) To configure the RTC Wakeup Clock source and Counter use the HAL_RTC_SetWakeUpTimer() + function. You can also configure the RTC Wakeup timer in interrupt mode + using the HAL_RTC_SetWakeUpTimer_IT() function. + (+) To read the RTC WakeUp Counter register, use the HAL_RTC_GetWakeUpTimer() + function. + + *** TimeStamp configuration *** + =============================== + [..] + (+) Enables the RTC TimeStamp using the HAL_RTC_SetTimeStamp() function. + You can also configure the RTC TimeStamp with interrupt mode using the + HAL_RTC_SetTimeStamp_IT() function. + (+) To read the RTC TimeStamp Time and Date register, use the HAL_RTC_GetTimeStamp() + function. + + *** Internal TimeStamp configuration *** + =============================== + [..] + (+) Enables the RTC internal TimeStamp using the HAL_RTC_SetInternalTimeStamp() function. + (+) To read the RTC TimeStamp Time and Date register, use the HAL_RTC_GetTimeStamp() + function. + + *** Tamper configuration *** + ============================ + [..] + (+) Enable the RTC Tamper and Configure the Tamper filter count, trigger Edge + or Level according to the Tamper filter (if equal to 0 Edge else Level) + value, sampling frequency, NoErase, MaskFlag, precharge or discharge and + Pull-UP using the HAL_RTC_SetTamper() function. You can configure RTC Tamper + with interrupt mode using HAL_RTC_SetTamper_IT() function. + (+) The default configuration of the Tamper erases the backup registers. To avoid + erase, enable the NoErase field on the RTC_TAMPCR register. + + *** Backup Data Registers configuration *** + =========================================== + [..] + (+) To write to the RTC Backup Data registers, use the HAL_RTC_BKUPWrite() + function. + (+) To read the RTC Backup Data registers, use the HAL_RTC_BKUPRead() + function. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RTCEx RTCEx + * @brief RTC Extended HAL module driver + * @{ + */ + +#ifdef HAL_RTC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup RTCEx_Exported_Functions RTCEx Exported Functions + * @{ + */ + + +/** @defgroup RTCEx_Group1 RTC TimeStamp and Tamper functions + * @brief RTC TimeStamp and Tamper functions + * +@verbatim + =============================================================================== + ##### RTC TimeStamp and Tamper functions ##### + =============================================================================== + + [..] This section provides functions allowing to configure TimeStamp feature + +@endverbatim + * @{ + */ + +/** + * @brief Sets TimeStamp. + * @note This API must be called before enabling the TimeStamp feature. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param TimeStampEdge Specifies the pin edge on which the TimeStamp is + * activated. + * This parameter can be one of the following values: + * @arg RTC_TIMESTAMPEDGE_RISING: the Time stamp event occurs on the + * rising edge of the related pin. + * @arg RTC_TIMESTAMPEDGE_FALLING: the Time stamp event occurs on the + * falling edge of the related pin. + * @param RTC_TimeStampPin specifies the RTC TimeStamp Pin. + * This parameter can be one of the following values: + * @arg RTC_TIMESTAMPPIN_PC13: PC13 is selected as RTC TimeStamp Pin. + * @arg RTC_TIMESTAMPPIN_PI8: PI8 is selected as RTC TimeStamp Pin. + * @arg RTC_TIMESTAMPPIN_PC1: PC1 is selected as RTC TimeStamp Pin. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetTimeStamp(RTC_HandleTypeDef *hrtc, uint32_t TimeStampEdge, uint32_t RTC_TimeStampPin) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_TIMESTAMP_EDGE(TimeStampEdge)); + assert_param(IS_RTC_TIMESTAMP_PIN(RTC_TimeStampPin)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Get the RTC_CR register and clear the bits to be configured */ + tmpreg = (uint32_t)(hrtc->Instance->CR & (uint32_t)~(RTC_CR_TSEDGE | RTC_CR_TSE)); + + tmpreg|= TimeStampEdge; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + hrtc->Instance->OR &= (uint32_t)~RTC_OR_TSINSEL; + hrtc->Instance->OR |= (uint32_t)(RTC_TimeStampPin); + + /* Configure the Time Stamp TSEDGE and Enable bits */ + hrtc->Instance->CR = (uint32_t)tmpreg; + + __HAL_RTC_TIMESTAMP_ENABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Sets TimeStamp with Interrupt. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @note This API must be called before enabling the TimeStamp feature. + * @param TimeStampEdge Specifies the pin edge on which the TimeStamp is + * activated. + * This parameter can be one of the following values: + * @arg RTC_TIMESTAMPEDGE_RISING: the Time stamp event occurs on the + * rising edge of the related pin. + * @arg RTC_TIMESTAMPEDGE_FALLING: the Time stamp event occurs on the + * falling edge of the related pin. + * @param RTC_TimeStampPin Specifies the RTC TimeStamp Pin. + * This parameter can be one of the following values: + * @arg RTC_TIMESTAMPPIN_PC13: PC13 is selected as RTC TimeStamp Pin. + * @arg RTC_TIMESTAMPPIN_PI8: PI8 is selected as RTC TimeStamp Pin. + * @arg RTC_TIMESTAMPPIN_PC1: PC1 is selected as RTC TimeStamp Pin. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetTimeStamp_IT(RTC_HandleTypeDef *hrtc, uint32_t TimeStampEdge, uint32_t RTC_TimeStampPin) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_TIMESTAMP_EDGE(TimeStampEdge)); + assert_param(IS_RTC_TIMESTAMP_PIN(RTC_TimeStampPin)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Get the RTC_CR register and clear the bits to be configured */ + tmpreg = (uint32_t)(hrtc->Instance->CR & (uint32_t)~(RTC_CR_TSEDGE | RTC_CR_TSE)); + + tmpreg |= TimeStampEdge; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Configure the Time Stamp TSEDGE and Enable bits */ + hrtc->Instance->CR = (uint32_t)tmpreg; + + hrtc->Instance->OR &= (uint32_t)~RTC_OR_TSINSEL; + hrtc->Instance->OR |= (uint32_t)(RTC_TimeStampPin); + + /* Clear RTC Timestamp flag */ + __HAL_RTC_TIMESTAMP_CLEAR_FLAG(hrtc, RTC_FLAG_TSF); + + __HAL_RTC_TIMESTAMP_ENABLE(hrtc); + + /* Enable IT timestamp */ + __HAL_RTC_TIMESTAMP_ENABLE_IT(hrtc,RTC_IT_TS); + + /* RTC timestamp Interrupt Configuration: EXTI configuration */ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT(); + + EXTI->RTSR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT; + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivates TimeStamp. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DeactivateTimeStamp(RTC_HandleTypeDef *hrtc) +{ + uint32_t tmpreg = 0U; + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_TIMESTAMP_DISABLE_IT(hrtc, RTC_IT_TS); + + /* Get the RTC_CR register and clear the bits to be configured */ + tmpreg = (uint32_t)(hrtc->Instance->CR & (uint32_t)~(RTC_CR_TSEDGE | RTC_CR_TSE)); + + /* Configure the Time Stamp TSEDGE and Enable bits */ + hrtc->Instance->CR = (uint32_t)tmpreg; + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Sets Internal TimeStamp. + * @note This API must be called before enabling the internal TimeStamp feature. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetInternalTimeStamp(RTC_HandleTypeDef *hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Configure the internal Time Stamp Enable bits */ + __HAL_RTC_INTERNAL_TIMESTAMP_ENABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivates internal TimeStamp. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DeactivateInternalTimeStamp(RTC_HandleTypeDef *hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Configure the internal Time Stamp Enable bits */ + __HAL_RTC_INTERNAL_TIMESTAMP_DISABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Gets the RTC TimeStamp value. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sTimeStamp Pointer to Time structure + * @param sTimeStampDate Pointer to Date structure + * @param Format specifies the format of the entered parameters. + * This parameter can be one of the following values: + * FORMAT_BIN: Binary data format + * FORMAT_BCD: BCD data format + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_GetTimeStamp(RTC_HandleTypeDef *hrtc, RTC_TimeTypeDef* sTimeStamp, RTC_DateTypeDef* sTimeStampDate, uint32_t Format) +{ + uint32_t tmptime = 0U, tmpdate = 0U; + + /* Check the parameters */ + assert_param(IS_RTC_FORMAT(Format)); + + /* Get the TimeStamp time and date registers values */ + tmptime = (uint32_t)(hrtc->Instance->TSTR & RTC_TR_RESERVED_MASK); + tmpdate = (uint32_t)(hrtc->Instance->TSDR & RTC_DR_RESERVED_MASK); + + /* Fill the Time structure fields with the read parameters */ + sTimeStamp->Hours = (uint8_t)((tmptime & (RTC_TR_HT | RTC_TR_HU)) >> 16U); + sTimeStamp->Minutes = (uint8_t)((tmptime & (RTC_TR_MNT | RTC_TR_MNU)) >> 8U); + sTimeStamp->Seconds = (uint8_t)(tmptime & (RTC_TR_ST | RTC_TR_SU)); + sTimeStamp->TimeFormat = (uint8_t)((tmptime & (RTC_TR_PM)) >> 16U); + sTimeStamp->SubSeconds = (uint32_t) hrtc->Instance->TSSSR; + + /* Fill the Date structure fields with the read parameters */ + sTimeStampDate->Year = 0U; + sTimeStampDate->Month = (uint8_t)((tmpdate & (RTC_DR_MT | RTC_DR_MU)) >> 8U); + sTimeStampDate->Date = (uint8_t)(tmpdate & (RTC_DR_DT | RTC_DR_DU)); + sTimeStampDate->WeekDay = (uint8_t)((tmpdate & (RTC_DR_WDU)) >> 13U); + + /* Check the input parameters format */ + if(Format == RTC_FORMAT_BIN) + { + /* Convert the TimeStamp structure parameters to Binary format */ + sTimeStamp->Hours = (uint8_t)RTC_Bcd2ToByte(sTimeStamp->Hours); + sTimeStamp->Minutes = (uint8_t)RTC_Bcd2ToByte(sTimeStamp->Minutes); + sTimeStamp->Seconds = (uint8_t)RTC_Bcd2ToByte(sTimeStamp->Seconds); + + /* Convert the DateTimeStamp structure parameters to Binary format */ + sTimeStampDate->Month = (uint8_t)RTC_Bcd2ToByte(sTimeStampDate->Month); + sTimeStampDate->Date = (uint8_t)RTC_Bcd2ToByte(sTimeStampDate->Date); + sTimeStampDate->WeekDay = (uint8_t)RTC_Bcd2ToByte(sTimeStampDate->WeekDay); + } + + /* Clear the TIMESTAMP Flag */ + __HAL_RTC_TIMESTAMP_CLEAR_FLAG(hrtc, RTC_FLAG_TSF); + + return HAL_OK; +} + +/** + * @brief Sets Tamper + * @note By calling this API we disable the tamper interrupt for all tampers. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sTamper Pointer to Tamper Structure. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetTamper(RTC_HandleTypeDef *hrtc, RTC_TamperTypeDef* sTamper) +{ + uint32_t tmpreg = 0U; + + /* Check the parameters */ + assert_param(IS_RTC_TAMPER(sTamper->Tamper)); + assert_param(IS_RTC_TAMPER_TRIGGER(sTamper->Trigger)); + assert_param(IS_RTC_TAMPER_ERASE_MODE(sTamper->NoErase)); + assert_param(IS_RTC_TAMPER_MASKFLAG_STATE(sTamper->MaskFlag)); + assert_param(IS_RTC_TAMPER_FILTER(sTamper->Filter)); + assert_param(IS_RTC_TAMPER_SAMPLING_FREQ(sTamper->SamplingFrequency)); + assert_param(IS_RTC_TAMPER_PRECHARGE_DURATION(sTamper->PrechargeDuration)); + assert_param(IS_RTC_TAMPER_PULLUP_STATE(sTamper->TamperPullUp)); + assert_param(IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION(sTamper->TimeStampOnTamperDetection)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + if(sTamper->Trigger != RTC_TAMPERTRIGGER_RISINGEDGE) + { + sTamper->Trigger = (uint32_t)(sTamper->Tamper << 1U); + } + + if(sTamper->NoErase != RTC_TAMPER_ERASE_BACKUP_ENABLE) + { + sTamper->NoErase = 0; + if((sTamper->Tamper & RTC_TAMPER_1) != 0U) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP1NOERASE; + } + if((sTamper->Tamper & RTC_TAMPER_2) != 0U) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP2NOERASE; + } + if((sTamper->Tamper & RTC_TAMPER_3) != 0U) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP3NOERASE; + } + } + + if(sTamper->MaskFlag != RTC_TAMPERMASK_FLAG_DISABLE) + { + sTamper->MaskFlag = 0; + if((sTamper->Tamper & RTC_TAMPER_1) != 0U) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP1MF; + } + if((sTamper->Tamper & RTC_TAMPER_2) != 0U) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP2MF; + } + if((sTamper->Tamper & RTC_TAMPER_3) != 0U) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP3MF; + } + } + + tmpreg = ((uint32_t)sTamper->Tamper | (uint32_t)sTamper->Trigger | (uint32_t)sTamper->NoErase |\ + (uint32_t)sTamper->MaskFlag | (uint32_t)sTamper->Filter | (uint32_t)sTamper->SamplingFrequency |\ + (uint32_t)sTamper->PrechargeDuration | (uint32_t)sTamper->TamperPullUp | sTamper->TimeStampOnTamperDetection); + + hrtc->Instance->TAMPCR &= (uint32_t)~((uint32_t)sTamper->Tamper | (uint32_t)(sTamper->Tamper << 1) | (uint32_t)RTC_TAMPCR_TAMPTS |\ + (uint32_t)RTC_TAMPCR_TAMPFREQ | (uint32_t)RTC_TAMPCR_TAMPFLT | (uint32_t)RTC_TAMPCR_TAMPPRCH |\ + (uint32_t)RTC_TAMPCR_TAMPPUDIS | (uint32_t)RTC_TAMPCR_TAMPIE | (uint32_t)RTC_TAMPCR_TAMP1IE |\ + (uint32_t)RTC_TAMPCR_TAMP2IE | (uint32_t)RTC_TAMPCR_TAMP3IE | (uint32_t)RTC_TAMPCR_TAMP1NOERASE |\ + (uint32_t)RTC_TAMPCR_TAMP2NOERASE | (uint32_t)RTC_TAMPCR_TAMP3NOERASE | (uint32_t)RTC_TAMPCR_TAMP1MF |\ + (uint32_t)RTC_TAMPCR_TAMP2MF | (uint32_t)RTC_TAMPCR_TAMP3MF); + + hrtc->Instance->TAMPCR |= tmpreg; + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Sets Tamper with interrupt. + * @note By calling this API we force the tamper interrupt for all tampers. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param sTamper Pointer to RTC Tamper. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetTamper_IT(RTC_HandleTypeDef *hrtc, RTC_TamperTypeDef* sTamper) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_RTC_TAMPER(sTamper->Tamper)); + assert_param(IS_RTC_TAMPER_INTERRUPT(sTamper->Interrupt)); + assert_param(IS_RTC_TAMPER_TRIGGER(sTamper->Trigger)); + assert_param(IS_RTC_TAMPER_ERASE_MODE(sTamper->NoErase)); + assert_param(IS_RTC_TAMPER_MASKFLAG_STATE(sTamper->MaskFlag)); + assert_param(IS_RTC_TAMPER_FILTER(sTamper->Filter)); + assert_param(IS_RTC_TAMPER_SAMPLING_FREQ(sTamper->SamplingFrequency)); + assert_param(IS_RTC_TAMPER_PRECHARGE_DURATION(sTamper->PrechargeDuration)); + assert_param(IS_RTC_TAMPER_PULLUP_STATE(sTamper->TamperPullUp)); + assert_param(IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION(sTamper->TimeStampOnTamperDetection)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Configure the tamper trigger */ + if(sTamper->Trigger != RTC_TAMPERTRIGGER_RISINGEDGE) + { + sTamper->Trigger = (uint32_t)(sTamper->Tamper << 1); + } + + if(sTamper->NoErase != RTC_TAMPER_ERASE_BACKUP_ENABLE) + { + sTamper->NoErase = 0; + if((sTamper->Tamper & RTC_TAMPER_1) != 0) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP1NOERASE; + } + if((sTamper->Tamper & RTC_TAMPER_2) != 0) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP2NOERASE; + } + if((sTamper->Tamper & RTC_TAMPER_3) != 0) + { + sTamper->NoErase |= RTC_TAMPCR_TAMP3NOERASE; + } + } + + if(sTamper->MaskFlag != RTC_TAMPERMASK_FLAG_DISABLE) + { + sTamper->MaskFlag = 0; + if((sTamper->Tamper & RTC_TAMPER_1) != 0) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP1MF; + } + if((sTamper->Tamper & RTC_TAMPER_2) != 0) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP2MF; + } + if((sTamper->Tamper & RTC_TAMPER_3) != 0) + { + sTamper->MaskFlag |= RTC_TAMPCR_TAMP3MF; + } + } + + tmpreg = ((uint32_t)sTamper->Tamper | (uint32_t)sTamper->Interrupt | (uint32_t)sTamper->Trigger | (uint32_t)sTamper->NoErase |\ + (uint32_t)sTamper->MaskFlag | (uint32_t)sTamper->Filter | (uint32_t)sTamper->SamplingFrequency |\ + (uint32_t)sTamper->PrechargeDuration | (uint32_t)sTamper->TamperPullUp | sTamper->TimeStampOnTamperDetection); + + hrtc->Instance->TAMPCR &= (uint32_t)~((uint32_t)sTamper->Tamper | (uint32_t)(sTamper->Tamper << 1) | (uint32_t)RTC_TAMPCR_TAMPTS |\ + (uint32_t)RTC_TAMPCR_TAMPFREQ | (uint32_t)RTC_TAMPCR_TAMPFLT | (uint32_t)RTC_TAMPCR_TAMPPRCH |\ + (uint32_t)RTC_TAMPCR_TAMPPUDIS | (uint32_t)RTC_TAMPCR_TAMPIE | (uint32_t)RTC_TAMPCR_TAMP1IE |\ + (uint32_t)RTC_TAMPCR_TAMP2IE | (uint32_t)RTC_TAMPCR_TAMP3IE | (uint32_t)RTC_TAMPCR_TAMP1NOERASE |\ + (uint32_t)RTC_TAMPCR_TAMP2NOERASE | (uint32_t)RTC_TAMPCR_TAMP3NOERASE | (uint32_t)RTC_TAMPCR_TAMP1MF |\ + (uint32_t)RTC_TAMPCR_TAMP2MF | (uint32_t)RTC_TAMPCR_TAMP3MF); + + hrtc->Instance->TAMPCR |= tmpreg; + + if(sTamper->Tamper == RTC_TAMPER_1) + { + /* Clear RTC Tamper 1 flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP1F); + } + else if(sTamper->Tamper == RTC_TAMPER_2) + { + /* Clear RTC Tamper 2 flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP2F); + } + else + { + /* Clear RTC Tamper 3 flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP3F); + } + + /* RTC Tamper Interrupt Configuration: EXTI configuration */ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT(); + + EXTI->RTSR |= RTC_EXTI_LINE_TAMPER_TIMESTAMP_EVENT; + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivates Tamper. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Tamper Selected tamper pin. + * This parameter can be RTC_Tamper_1 and/or RTC_TAMPER_2. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DeactivateTamper(RTC_HandleTypeDef *hrtc, uint32_t Tamper) +{ + assert_param(IS_RTC_TAMPER(Tamper)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + +/* Disable the selected Tamper pin */ + hrtc->Instance->TAMPCR &= (uint32_t)~Tamper; + + if ((Tamper & RTC_TAMPER_1) != 0) + { + /* Disable the Tamper1 interrupt */ + hrtc->Instance->TAMPCR &= (uint32_t)~(RTC_IT_TAMP | RTC_IT_TAMP1); + } + if ((Tamper & RTC_TAMPER_2) != 0) + { + /* Disable the Tamper2 interrupt */ + hrtc->Instance->TAMPCR &= (uint32_t)~(RTC_IT_TAMP | RTC_IT_TAMP2); + } + if ((Tamper & RTC_TAMPER_3) != 0) + { + /* Disable the Tamper2 interrupt */ + hrtc->Instance->TAMPCR &= (uint32_t)~(RTC_IT_TAMP | RTC_IT_TAMP3); + } + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief This function handles TimeStamp interrupt request. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc) +{ + /* Get the TimeStamp interrupt source enable status */ + if(__HAL_RTC_TIMESTAMP_GET_IT_SOURCE(hrtc, RTC_IT_TS) != (uint32_t)RESET) + { + /* Get the pending status of the TIMESTAMP Interrupt */ + if(__HAL_RTC_TIMESTAMP_GET_FLAG(hrtc, RTC_FLAG_TSF) != (uint32_t)RESET) + { + /* TIMESTAMP callback */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->TimeStampEventCallback(hrtc); +#else + HAL_RTCEx_TimeStampEventCallback(hrtc); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the TIMESTAMP interrupt pending bit */ + __HAL_RTC_TIMESTAMP_CLEAR_FLAG(hrtc,RTC_FLAG_TSF); + } + } + + /* Get the Tamper1 interrupt source enable status */ + if(__HAL_RTC_TAMPER_GET_IT_SOURCE(hrtc, RTC_IT_TAMP | RTC_IT_TAMP1) != (uint32_t)RESET) + { + /* Get the pending status of the Tamper1 Interrupt */ + if(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP1F) != (uint32_t)RESET) + { + /* Tamper callback */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->Tamper1EventCallback(hrtc); +#else + HAL_RTCEx_Tamper1EventCallback(hrtc); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the Tamper interrupt pending bit */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc,RTC_FLAG_TAMP1F); + } + } + + /* Get the Tamper2 interrupt source enable status */ + if(__HAL_RTC_TAMPER_GET_IT_SOURCE(hrtc, RTC_IT_TAMP | RTC_IT_TAMP2) != (uint32_t)RESET) + { + /* Get the pending status of the Tamper2 Interrupt */ + if(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP2F) != (uint32_t)RESET) + { + /* Tamper callback */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->Tamper2EventCallback(hrtc); +#else + HAL_RTCEx_Tamper2EventCallback(hrtc); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the Tamper interrupt pending bit */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP2F); + } + } + + /* Get the Tamper3 interrupt source enable status */ + if(__HAL_RTC_TAMPER_GET_IT_SOURCE(hrtc, RTC_IT_TAMP | RTC_IT_TAMP3) != (uint32_t)RESET) + { + /* Get the pending status of the Tamper3 Interrupt */ + if(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP3F) != (uint32_t)RESET) + { + /* Tamper callback */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->Tamper3EventCallback(hrtc); +#else + HAL_RTCEx_Tamper3EventCallback(hrtc); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the Tamper interrupt pending bit */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP3F); + } + } + + /* Clear the EXTI's Flag for RTC TimeStamp and Tamper */ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG(); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; +} + +/** + * @brief TimeStamp callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTCEx_TimeStampEventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_TimeStampEventCallback could be implemented in the user file + */ +} + +/** + * @brief Tamper 1 callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTCEx_Tamper1EventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_Tamper1EventCallback could be implemented in the user file + */ +} + +/** + * @brief Tamper 2 callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTCEx_Tamper2EventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_Tamper2EventCallback could be implemented in the user file + */ +} + +/** + * @brief Tamper 3 callback. + * @param hrtc RTC handle + * @retval None + */ +__weak void HAL_RTCEx_Tamper3EventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTCEx_Tamper3EventCallback could be implemented in the user file + */ +} + +/** + * @brief This function handles TimeStamp polling request. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForTimeStampEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_RTC_TIMESTAMP_GET_FLAG(hrtc, RTC_FLAG_TSF) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + if(__HAL_RTC_TIMESTAMP_GET_FLAG(hrtc, RTC_FLAG_TSOVF) != RESET) + { + /* Clear the TIMESTAMP OverRun Flag */ + __HAL_RTC_TIMESTAMP_CLEAR_FLAG(hrtc, RTC_FLAG_TSOVF); + + /* Change TIMESTAMP state */ + hrtc->State = HAL_RTC_STATE_ERROR; + + return HAL_ERROR; + } + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @brief This function handles Tamper1 Polling. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForTamper1Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0U; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Get the status of the Interrupt */ + while(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP1F)== RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0U)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + /* Clear the Tamper Flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc,RTC_FLAG_TAMP1F); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @brief This function handles Tamper2 Polling. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForTamper2Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Get the status of the Interrupt */ + while(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP2F) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + /* Clear the Tamper Flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc,RTC_FLAG_TAMP2F); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @brief This function handles Tamper3 Polling. + * @param hrtc RTC handle + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForTamper3Event(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = HAL_GetTick(); + + /* Get the status of the Interrupt */ + while(__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP3F) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + /* Clear the Tamper Flag */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc,RTC_FLAG_TAMP3F); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RTCEx_Group2 RTC Wake-up functions + * @brief RTC Wake-up functions + * +@verbatim + =============================================================================== + ##### RTC Wake-up functions ##### + =============================================================================== + + [..] This section provides functions allowing to configure Wake-up feature + +@endverbatim + * @{ + */ + +/** + * @brief Sets wake up timer. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param WakeUpCounter Wake up counter + * @param WakeUpClock Wake up clock + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer(RTC_HandleTypeDef *hrtc, uint32_t WakeUpCounter, uint32_t WakeUpClock) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_RTC_WAKEUP_CLOCK(WakeUpClock)); + assert_param(IS_RTC_WAKEUP_COUNTER(WakeUpCounter)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + __HAL_RTC_WAKEUPTIMER_DISABLE(hrtc); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /*Check RTC WUTWF flag is reset only when wake up timer enabled*/ + if((hrtc->Instance->CR & RTC_CR_WUTE) != RESET) + { + /* Wait till RTC WUTWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + } + + /* Clear the Wakeup Timer clock source bits in CR register */ + hrtc->Instance->CR &= (uint32_t)~RTC_CR_WUCKSEL; + + /* Configure the clock source */ + hrtc->Instance->CR |= (uint32_t)WakeUpClock; + + /* Configure the Wakeup Timer counter */ + hrtc->Instance->WUTR = (uint32_t)WakeUpCounter; + + /* Enable the Wakeup Timer */ + __HAL_RTC_WAKEUPTIMER_ENABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Sets wake up timer with interrupt + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param WakeUpCounter Wake up counter + * @param WakeUpClock Wake up clock + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer_IT(RTC_HandleTypeDef *hrtc, uint32_t WakeUpCounter, uint32_t WakeUpClock) +{ + __IO uint32_t count; + + /* Check the parameters */ + assert_param(IS_RTC_WAKEUP_CLOCK(WakeUpClock)); + assert_param(IS_RTC_WAKEUP_COUNTER(WakeUpCounter)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + __HAL_RTC_WAKEUPTIMER_DISABLE(hrtc); + + /* Check RTC WUTWF flag is reset only when wake up timer enabled */ + if((hrtc->Instance->CR & RTC_CR_WUTE) != RESET) + { + /* Wait till RTC WUTWF flag is reset and if Time out is reached exit */ + count = RTC_TIMEOUT_VALUE * (SystemCoreClock / 32U / 1000U); + do + { + if(count-- == 0U) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + while(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTWF) == SET); + } + + __HAL_RTC_WAKEUPTIMER_DISABLE(hrtc); + + /* Wait till RTC WUTWF flag is set and if Time out is reached exit */ + count = RTC_TIMEOUT_VALUE * (SystemCoreClock / 32U / 1000U); + do + { + if(count-- == 0U) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + while(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTWF) == RESET); + + /* Configure the Wake-up Timer counter */ + hrtc->Instance->WUTR = (uint32_t)WakeUpCounter; + + /* Clear the Wakeup Timer clock source bits in CR register */ + hrtc->Instance->CR &= (uint32_t)~RTC_CR_WUCKSEL; + + /* Configure the clock source */ + hrtc->Instance->CR |= (uint32_t)WakeUpClock; + + /* RTC WakeUpTimer Interrupt Configuration: EXTI configuration */ + __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT(); + + EXTI->RTSR |= RTC_EXTI_LINE_WAKEUPTIMER_EVENT; + + /* Clear RTC Wake Up timer Flag */ + __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(hrtc, RTC_FLAG_WUTF); + + /* Configure the Interrupt in the RTC_CR register */ + __HAL_RTC_WAKEUPTIMER_ENABLE_IT(hrtc,RTC_IT_WUT); + + /* Enable the Wakeup Timer */ + __HAL_RTC_WAKEUPTIMER_ENABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivates wake up timer counter. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +uint32_t HAL_RTCEx_DeactivateWakeUpTimer(RTC_HandleTypeDef *hrtc) +{ + uint32_t tickstart = 0U; + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Disable the Wakeup Timer */ + __HAL_RTC_WAKEUPTIMER_DISABLE(hrtc); + + /* In case of interrupt mode is used, the interrupt source must disabled */ + __HAL_RTC_WAKEUPTIMER_DISABLE_IT(hrtc,RTC_IT_WUT); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till RTC WUTWF flag is set and if Time out is reached exit */ + while(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTWF) == RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Gets wake up timer counter. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval Counter value + */ +uint32_t HAL_RTCEx_GetWakeUpTimer(RTC_HandleTypeDef *hrtc) +{ + /* Get the counter value */ + return ((uint32_t)(hrtc->Instance->WUTR & RTC_WUTR_WUT)); +} + +/** + * @brief This function handles Wake Up Timer interrupt request. + * @note Unlike alarm interrupt line (shared by AlarmA and AlarmB) and tamper + * interrupt line (shared by timestamp and tampers) wakeup timer + * interrupt line is exclusive to the wakeup timer. + * There is no need in this case to check on the interrupt enable + * status via __HAL_RTC_WAKEUPTIMER_GET_IT_SOURCE(). + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +void HAL_RTCEx_WakeUpTimerIRQHandler(RTC_HandleTypeDef *hrtc) +{ + /* Get the pending status of the WAKEUPTIMER Interrupt */ + if(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTF) != (uint32_t)RESET) + { + /* WAKEUPTIMER callback */ +#if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) + hrtc->WakeUpTimerEventCallback(hrtc); +#else + HAL_RTCEx_WakeUpTimerEventCallback(hrtc); +#endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ + + /* Clear the WAKEUPTIMER interrupt pending bit */ + __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(hrtc, RTC_FLAG_WUTF); + } + + /* Clear the EXTI's line Flag for RTC WakeUpTimer */ + __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG(); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; +} + +/** + * @brief Wake Up Timer callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTCEx_WakeUpTimerEventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_WakeUpTimerEventCallback could be implemented in the user file + */ +} + +/** + * @brief This function handles Wake Up Timer Polling. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForWakeUpTimerEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTF) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + } + + /* Clear the WAKEUPTIMER Flag */ + __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(hrtc, RTC_FLAG_WUTF); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @} + */ + + +/** @defgroup RTCEx_Group3 Extension Peripheral Control functions + * @brief Extension Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Extension Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Write a data in a specified RTC Backup data register + (+) Read a data in a specified RTC Backup data register + (+) Set the Coarse calibration parameters. + (+) Deactivate the Coarse calibration parameters + (+) Set the Smooth calibration parameters. + (+) Configure the Synchronization Shift Control Settings. + (+) Configure the Calibration Pinout (RTC_CALIB) Selection (1Hz or 512Hz). + (+) Deactivate the Calibration Pinout (RTC_CALIB) Selection (1Hz or 512Hz). + (+) Enable the RTC reference clock detection. + (+) Disable the RTC reference clock detection. + (+) Enable the Bypass Shadow feature. + (+) Disable the Bypass Shadow feature. + +@endverbatim + * @{ + */ + +/** + * @brief Writes a data in a specified RTC Backup data register. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param BackupRegister RTC Backup data Register number. + * This parameter can be: RTC_BKP_DRx where x can be from 0 to 19 to + * specify the register. + * @param Data Data to be written in the specified RTC Backup data register. + * @retval None + */ +void HAL_RTCEx_BKUPWrite(RTC_HandleTypeDef *hrtc, uint32_t BackupRegister, uint32_t Data) +{ + uint32_t tmp = 0; + + /* Check the parameters */ + assert_param(IS_RTC_BKP(BackupRegister)); + + tmp = (uint32_t)&(hrtc->Instance->BKP0R); + tmp += (BackupRegister * 4); + + /* Write the specified register */ + *(__IO uint32_t *)tmp = (uint32_t)Data; +} + +/** + * @brief Reads data from the specified RTC Backup data Register. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param BackupRegister RTC Backup data Register number. + * This parameter can be: RTC_BKP_DRx where x can be from 0 to 19 to + * specify the register. + * @retval Read value + */ +uint32_t HAL_RTCEx_BKUPRead(RTC_HandleTypeDef *hrtc, uint32_t BackupRegister) +{ + uint32_t tmp = 0; + + /* Check the parameters */ + assert_param(IS_RTC_BKP(BackupRegister)); + + tmp = (uint32_t)&(hrtc->Instance->BKP0R); + tmp += (BackupRegister * 4); + + /* Read the specified register */ + return (*(__IO uint32_t *)tmp); +} + +/** + * @brief Sets the Smooth calibration parameters. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param SmoothCalibPeriod Select the Smooth Calibration Period. + * This parameter can be can be one of the following values : + * @arg RTC_SMOOTHCALIB_PERIOD_32SEC: The smooth calibration period is 32s. + * @arg RTC_SMOOTHCALIB_PERIOD_16SEC: The smooth calibration period is 16s. + * @arg RTC_SMOOTHCALIB_PERIOD_8SEC: The smooth calibration period is 8s. + * @param SmoothCalibPlusPulses Select to Set or reset the CALP bit. + * This parameter can be one of the following values: + * @arg RTC_SMOOTHCALIB_PLUSPULSES_SET: Add one RTCCLK pulses every 2*11 pulses. + * @arg RTC_SMOOTHCALIB_PLUSPULSES_RESET: No RTCCLK pulses are added. + * @param SmouthCalibMinusPulsesValue Select the value of CALM[80] bits. + * This parameter can be one any value from 0 to 0x000001FF. + * @note To deactivate the smooth calibration, the field SmoothCalibPlusPulses + * must be equal to SMOOTHCALIB_PLUSPULSES_RESET and the field + * SmouthCalibMinusPulsesValue must be equal to 0. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetSmoothCalib(RTC_HandleTypeDef* hrtc, uint32_t SmoothCalibPeriod, uint32_t SmoothCalibPlusPulses, uint32_t SmouthCalibMinusPulsesValue) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_RTC_SMOOTH_CALIB_PERIOD(SmoothCalibPeriod)); + assert_param(IS_RTC_SMOOTH_CALIB_PLUS(SmoothCalibPlusPulses)); + assert_param(IS_RTC_SMOOTH_CALIB_MINUS(SmouthCalibMinusPulsesValue)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* check if a calibration is pending*/ + if((hrtc->Instance->ISR & RTC_ISR_RECALPF) != RESET) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* check if a calibration is pending*/ + while((hrtc->Instance->ISR & RTC_ISR_RECALPF) != RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + } + + /* Configure the Smooth calibration settings */ + hrtc->Instance->CALR = (uint32_t)((uint32_t)SmoothCalibPeriod | (uint32_t)SmoothCalibPlusPulses | (uint32_t)SmouthCalibMinusPulsesValue); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Configures the Synchronization Shift Control Settings. + * @note When REFCKON is set, firmware must not write to Shift control register. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param ShiftAdd1S Select to add or not 1 second to the time calendar. + * This parameter can be one of the following values : + * @arg RTC_SHIFTADD1S_SET: Add one second to the clock calendar. + * @arg RTC_SHIFTADD1S_RESET: No effect. + * @param ShiftSubFS Select the number of Second Fractions to substitute. + * This parameter can be one any value from 0 to 0x7FFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetSynchroShift(RTC_HandleTypeDef* hrtc, uint32_t ShiftAdd1S, uint32_t ShiftSubFS) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_RTC_SHIFT_ADD1S(ShiftAdd1S)); + assert_param(IS_RTC_SHIFT_SUBFS(ShiftSubFS)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the shift is completed*/ + while((hrtc->Instance->ISR & RTC_ISR_SHPF) != RESET) + { + if((HAL_GetTick() - tickstart ) > RTC_TIMEOUT_VALUE) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_TIMEOUT; + } + } + + /* Check if the reference clock detection is disabled */ + if((hrtc->Instance->CR & RTC_CR_REFCKON) == RESET) + { + /* Configure the Shift settings */ + hrtc->Instance->SHIFTR = (uint32_t)(uint32_t)(ShiftSubFS) | (uint32_t)(ShiftAdd1S); + + /* If RTC_CR_BYPSHAD bit = 0, wait for synchro else this check is not needed */ + if((hrtc->Instance->CR & RTC_CR_BYPSHAD) == RESET) + { + if(HAL_RTC_WaitForSynchro(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + } + } + else + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Configures the Calibration Pinout (RTC_CALIB) Selection (1Hz or 512Hz). + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param CalibOutput Select the Calibration output Selection . + * This parameter can be one of the following values: + * @arg RTC_CALIBOUTPUT_512HZ: A signal has a regular waveform at 512Hz. + * @arg RTC_CALIBOUTPUT_1HZ: A signal has a regular waveform at 1Hz. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetCalibrationOutPut(RTC_HandleTypeDef* hrtc, uint32_t CalibOutput) +{ + /* Check the parameters */ + assert_param(IS_RTC_CALIB_OUTPUT(CalibOutput)); + + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Clear flags before config */ + hrtc->Instance->CR &= (uint32_t)~RTC_CR_COSEL; + + /* Configure the RTC_CR register */ + hrtc->Instance->CR |= (uint32_t)CalibOutput; + + __HAL_RTC_CALIBRATION_OUTPUT_ENABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Deactivates the Calibration Pinout (RTC_CALIB) Selection (1Hz or 512Hz). + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DeactivateCalibrationOutPut(RTC_HandleTypeDef* hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + __HAL_RTC_CALIBRATION_OUTPUT_DISABLE(hrtc); + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Enables the RTC reference clock detection. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_SetRefClock(RTC_HandleTypeDef* hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state*/ + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + else + { + __HAL_RTC_CLOCKREF_DETECTION_ENABLE(hrtc); + + /* Exit Initialization mode */ + hrtc->Instance->ISR &= (uint32_t)~RTC_ISR_INIT; + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Disable the RTC reference clock detection. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DeactivateRefClock(RTC_HandleTypeDef* hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set Initialization mode */ + if(RTC_EnterInitMode(hrtc) != HAL_OK) + { + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Set RTC state*/ + hrtc->State = HAL_RTC_STATE_ERROR; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_ERROR; + } + else + { + __HAL_RTC_CLOCKREF_DETECTION_DISABLE(hrtc); + + /* Exit Initialization mode */ + hrtc->Instance->ISR &= (uint32_t)~RTC_ISR_INIT; + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Enables the Bypass Shadow feature. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @note When the Bypass Shadow is enabled the calendar value are taken + * directly from the Calendar counter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_EnableBypassShadow(RTC_HandleTypeDef* hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Set the BYPSHAD bit */ + hrtc->Instance->CR |= (uint8_t)RTC_CR_BYPSHAD; + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @brief Disables the Bypass Shadow feature. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @note When the Bypass Shadow is enabled the calendar value are taken + * directly from the Calendar counter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_DisableBypassShadow(RTC_HandleTypeDef* hrtc) +{ + /* Process Locked */ + __HAL_LOCK(hrtc); + + hrtc->State = HAL_RTC_STATE_BUSY; + + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + + /* Reset the BYPSHAD bit */ + hrtc->Instance->CR &= (uint8_t)~RTC_CR_BYPSHAD; + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hrtc); + + return HAL_OK; +} + +/** + * @} + */ + + /** @defgroup RTCEx_Group4 Extended features functions + * @brief Extended features functions + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) RTC Alram B callback + (+) RTC Poll for Alarm B request + +@endverbatim + * @{ + */ + +/** + * @brief Alarm B callback. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @retval None + */ +__weak void HAL_RTCEx_AlarmBEventCallback(RTC_HandleTypeDef *hrtc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hrtc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RTC_AlarmBEventCallback could be implemented in the user file + */ +} + +/** + * @brief This function handles AlarmB Polling request. + * @param hrtc pointer to a RTC_HandleTypeDef structure that contains + * the configuration information for RTC. + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RTCEx_PollForAlarmBEvent(RTC_HandleTypeDef *hrtc, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBF) == RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + hrtc->State = HAL_RTC_STATE_TIMEOUT; + return HAL_TIMEOUT; + } + } + } + + /* Clear the Alarm Flag */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRBF); + + /* Change RTC state */ + hrtc->State = HAL_RTC_STATE_READY; + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RTC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c new file mode 100644 index 000000000..33a086184 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sd.c @@ -0,0 +1,3243 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_sd.c + * @author MCD Application Team + * @brief SD card HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Secure Digital (SD) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + This driver implements a high level communication layer for read and write from/to + this memory. The needed STM32 hardware resources (SDMMC and GPIO) are performed by + the user in HAL_SD_MspInit() function (MSP layer). + Basically, the MSP layer configuration should be the same as we provide in the + examples. + You can easily tailor this configuration according to hardware resources. + + [..] + This driver is a generic layered driver for SDMMC memories which uses the HAL + SDMMC driver functions to interface with SD and uSD cards devices. + It is used as follows: + + (#)Initialize the SDMMC low level resources by implementing the HAL_SD_MspInit() API: + (##) Enable the SDMMC interface clock using __HAL_RCC_SDMMC_CLK_ENABLE(); + (##) SDMMC pins configuration for SD card + (+++) Enable the clock for the SDMMC GPIOs using the functions __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these SDMMC pins as alternate function pull-up using HAL_GPIO_Init() + and according to your pin assignment; + (##) DMA configuration if you need to use DMA process (HAL_SD_ReadBlocks_DMA() + and HAL_SD_WriteBlocks_DMA() APIs). + (+++) Enable the DMAx interface clock using __HAL_RCC_DMAx_CLK_ENABLE(); + (+++) Configure the DMA using the function HAL_DMA_Init() with predeclared and filled. + (##) NVIC configuration if you need to use interrupt process when using DMA transfer. + (+++) Configure the SDMMC and DMA interrupt priorities using functions + HAL_NVIC_SetPriority(); DMA priority is superior to SDMMC's priority + (+++) Enable the NVIC DMA and SDMMC IRQs using function HAL_NVIC_EnableIRQ() + (+++) SDMMC interrupts are managed using the macros __HAL_SD_ENABLE_IT() + and __HAL_SD_DISABLE_IT() inside the communication process. + (+++) SDMMC interrupts pending bits are managed using the macros __HAL_SD_GET_IT() + and __HAL_SD_CLEAR_IT() + (##) NVIC configuration if you need to use interrupt process (HAL_SD_ReadBlocks_IT() + and HAL_SD_WriteBlocks_IT() APIs). + (+++) Configure the SDMMC interrupt priorities using function HAL_NVIC_SetPriority(); + (+++) Enable the NVIC SDMMC IRQs using function HAL_NVIC_EnableIRQ() + (+++) SDMMC interrupts are managed using the macros __HAL_SD_ENABLE_IT() + and __HAL_SD_DISABLE_IT() inside the communication process. + (+++) SDMMC interrupts pending bits are managed using the macros __HAL_SD_GET_IT() + and __HAL_SD_CLEAR_IT() + (#) At this stage, you can perform SD read/write/erase operations after SD card initialization + + + *** SD Card Initialization and configuration *** + ================================================ + [..] + To initialize the SD Card, use the HAL_SD_Init() function. It Initializes + SDMMC Peripheral(STM32 side) and the SD Card, and put it into StandBy State (Ready for data transfer). + This function provide the following operations: + + (#) Apply the SD Card initialization process at 400KHz and check the SD Card + type (Standard Capacity or High Capacity). You can change or adapt this + frequency by adjusting the "ClockDiv" field. + The SD Card frequency (SDMMC_CK) is computed as follows: + + SDMMC_CK = SDMMCCLK / (ClockDiv + 2) + + In initialization mode and according to the SD Card standard, + make sure that the SDMMC_CK frequency doesn't exceed 400KHz. + + This phase of initialization is done through SDMMC_Init() and + SDMMC_PowerState_ON() SDMMC low level APIs. + + (#) Initialize the SD card. The API used is HAL_SD_InitCard(). + This phase allows the card initialization and identification + and check the SD Card type (Standard Capacity or High Capacity) + The initialization flow is compatible with SD standard. + + This API (HAL_SD_InitCard()) could be used also to reinitialize the card in case + of plug-off plug-in. + + (#) Configure the SD Card Data transfer frequency. You can change or adapt this + frequency by adjusting the "ClockDiv" field. + In transfer mode and according to the SD Card standard, make sure that the + SDMMC_CK frequency doesn't exceed 25MHz and 50MHz in High-speed mode switch. + To be able to use a frequency higher than 24MHz, you should use the SDMMC + peripheral in bypass mode. Refer to the corresponding reference manual + for more details. + + (#) Select the corresponding SD Card according to the address read with the step 2. + + (#) Configure the SD Card in wide bus mode: 4-bits data. + + *** SD Card Read operation *** + ============================== + [..] + (+) You can read from SD card in polling mode by using function HAL_SD_ReadBlocks(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + + (+) You can read from SD card in DMA mode by using function HAL_SD_ReadBlocks_DMA(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + You could also check the DMA transfer process through the SD Rx interrupt event. + + (+) You can read from SD card in Interrupt mode by using function HAL_SD_ReadBlocks_IT(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + You could also check the IT transfer process through the SD Rx interrupt event. + + *** SD Card Write operation *** + =============================== + [..] + (+) You can write to SD card in polling mode by using function HAL_SD_WriteBlocks(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + + (+) You can write to SD card in DMA mode by using function HAL_SD_WriteBlocks_DMA(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + You could also check the DMA transfer process through the SD Tx interrupt event. + + (+) You can write to SD card in Interrupt mode by using function HAL_SD_WriteBlocks_IT(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_SD_GetCardState() function for SD card state. + You could also check the IT transfer process through the SD Tx interrupt event. + + *** SD card status *** + ====================== + [..] + (+) The SD Status contains status bits that are related to the SD Memory + Card proprietary features. To get SD card status use the HAL_SD_GetCardStatus(). + + *** SD card information *** + =========================== + [..] + (+) To get SD card information, you can use the function HAL_SD_GetCardInfo(). + It returns useful information about the SD card such as block size, card type, + block number ... + + *** SD card CSD register *** + ============================ + (+) The HAL_SD_GetCardCSD() API allows to get the parameters of the CSD register. + Some of the CSD parameters are useful for card initialization and identification. + + *** SD card CID register *** + ============================ + (+) The HAL_SD_GetCardCID() API allows to get the parameters of the CID register. + Some of the CSD parameters are useful for card initialization and identification. + + *** SD HAL driver macros list *** + ================================== + [..] + Below the list of most used macros in SD HAL driver. + + (+) __HAL_SD_ENABLE : Enable the SD device + (+) __HAL_SD_DISABLE : Disable the SD device + (+) __HAL_SD_DMA_ENABLE: Enable the SDMMC DMA transfer + (+) __HAL_SD_DMA_DISABLE: Disable the SDMMC DMA transfer + (+) __HAL_SD_ENABLE_IT: Enable the SD device interrupt + (+) __HAL_SD_DISABLE_IT: Disable the SD device interrupt + (+) __HAL_SD_GET_FLAG:Check whether the specified SD flag is set or not + (+) __HAL_SD_CLEAR_FLAG: Clear the SD's pending flags + + (@) You can refer to the SD HAL driver header file for more useful macros + + *** Callback registration *** + ============================================= + [..] + The compilation define USE_HAL_SD_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + Use Functions @ref HAL_SD_RegisterCallback() to register a user callback, + it allows to register following callbacks: + (+) TxCpltCallback : callback when a transmission transfer is completed. + (+) RxCpltCallback : callback when a reception transfer is completed. + (+) ErrorCallback : callback when error occurs. + (+) AbortCpltCallback : callback when abort is completed. + (+) MspInitCallback : SD MspInit. + (+) MspDeInitCallback : SD MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_SD_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. It allows to reset following callbacks: + (+) TxCpltCallback : callback when a transmission transfer is completed. + (+) RxCpltCallback : callback when a reception transfer is completed. + (+) ErrorCallback : callback when error occurs. + (+) AbortCpltCallback : callback when abort is completed. + (+) MspInitCallback : SD MspInit. + (+) MspDeInitCallback : SD MspDeInit. + This function) takes as parameters the HAL peripheral handle and the Callback ID. + + By default, after the @ref HAL_SD_Init and if the state is HAL_SD_STATE_RESET + all callbacks are reset to the corresponding legacy weak (surcharged) functions. + Exception done for MspInit and MspDeInit callbacks that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_SD_Init + and @ref HAL_SD_DeInit only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the @ref HAL_SD_Init and @ref HAL_SD_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in READY state only. + Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered + in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used + during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_SD_RegisterCallback before calling @ref HAL_SD_DeInit + or @ref HAL_SD_Init function. + + When The compilation define USE_HAL_SD_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +#if defined(SDMMC1) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup SD + * @{ + */ + +#ifdef HAL_SD_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup SD_Private_Defines + * @{ + */ + +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup SD_Private_Functions SD Private Functions + * @{ + */ +static uint32_t SD_InitCard(SD_HandleTypeDef *hsd); +static uint32_t SD_PowerON(SD_HandleTypeDef *hsd); +static uint32_t SD_SendSDStatus(SD_HandleTypeDef *hsd, uint32_t *pSDstatus); +static uint32_t SD_SendStatus(SD_HandleTypeDef *hsd, uint32_t *pCardStatus); +static uint32_t SD_WideBus_Enable(SD_HandleTypeDef *hsd); +static uint32_t SD_WideBus_Disable(SD_HandleTypeDef *hsd); +static uint32_t SD_FindSCR(SD_HandleTypeDef *hsd, uint32_t *pSCR); +static void SD_PowerOFF(SD_HandleTypeDef *hsd); +static void SD_Write_IT(SD_HandleTypeDef *hsd); +static void SD_Read_IT(SD_HandleTypeDef *hsd); +static void SD_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void SD_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void SD_DMAError(DMA_HandleTypeDef *hdma); +static void SD_DMATxAbort(DMA_HandleTypeDef *hdma); +static void SD_DMARxAbort(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup SD_Exported_Functions + * @{ + */ + +/** @addtogroup SD_Exported_Functions_Group1 + * @brief Initialization and de-initialization functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to initialize/de-initialize the SD + card device to be ready for use. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the SD according to the specified parameters in the + SD_HandleTypeDef and create the associated handle. + * @param hsd: Pointer to the SD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_Init(SD_HandleTypeDef *hsd) +{ + /* Check the SD handle allocation */ + if(hsd == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SDMMC_ALL_INSTANCE(hsd->Instance)); + assert_param(IS_SDMMC_CLOCK_EDGE(hsd->Init.ClockEdge)); + assert_param(IS_SDMMC_CLOCK_BYPASS(hsd->Init.ClockBypass)); + assert_param(IS_SDMMC_CLOCK_POWER_SAVE(hsd->Init.ClockPowerSave)); + assert_param(IS_SDMMC_BUS_WIDE(hsd->Init.BusWide)); + assert_param(IS_SDMMC_HARDWARE_FLOW_CONTROL(hsd->Init.HardwareFlowControl)); + assert_param(IS_SDMMC_CLKDIV(hsd->Init.ClockDiv)); + + if(hsd->State == HAL_SD_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hsd->Lock = HAL_UNLOCKED; +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + /* Reset Callback pointers in HAL_SD_STATE_RESET only */ + hsd->TxCpltCallback = HAL_SD_TxCpltCallback; + hsd->RxCpltCallback = HAL_SD_RxCpltCallback; + hsd->ErrorCallback = HAL_SD_ErrorCallback; + hsd->AbortCpltCallback = HAL_SD_AbortCallback; + + if(hsd->MspInitCallback == NULL) + { + hsd->MspInitCallback = HAL_SD_MspInit; + } + + /* Init the low level hardware */ + hsd->MspInitCallback(hsd); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + HAL_SD_MspInit(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize the Card parameters */ + if (HAL_SD_InitCard(hsd) != HAL_OK) + { + return HAL_ERROR; + } + + /* Initialize the error code */ + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + /* Initialize the SD operation */ + hsd->Context = SD_CONTEXT_NONE; + + /* Initialize the SD state */ + hsd->State = HAL_SD_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Initializes the SD Card. + * @param hsd: Pointer to SD handle + * @note This function initializes the SD card. It could be used when a card + re-initialization is needed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_InitCard(SD_HandleTypeDef *hsd) +{ + uint32_t errorstate; + HAL_StatusTypeDef status; + SD_InitTypeDef Init; + + /* Default SDMMC peripheral configuration for SD card initialization */ + Init.ClockEdge = SDMMC_CLOCK_EDGE_RISING; + Init.ClockBypass = SDMMC_CLOCK_BYPASS_DISABLE; + Init.ClockPowerSave = SDMMC_CLOCK_POWER_SAVE_DISABLE; + Init.BusWide = SDMMC_BUS_WIDE_1B; + Init.HardwareFlowControl = SDMMC_HARDWARE_FLOW_CONTROL_DISABLE; + Init.ClockDiv = SDMMC_INIT_CLK_DIV; + + /* Initialize SDMMC peripheral interface with default configuration */ + status = SDMMC_Init(hsd->Instance, Init); + if(status != HAL_OK) + { + return HAL_ERROR; + } + + /* Disable SDMMC Clock */ + __HAL_SD_DISABLE(hsd); + + /* Set Power State to ON */ + (void)SDMMC_PowerState_ON(hsd->Instance); + + /* Enable SDMMC Clock */ + __HAL_SD_ENABLE(hsd); + + /* Identify card operating voltage */ + errorstate = SD_PowerON(hsd); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->State = HAL_SD_STATE_READY; + hsd->ErrorCode |= errorstate; + return HAL_ERROR; + } + + /* Card initialization */ + errorstate = SD_InitCard(hsd); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->State = HAL_SD_STATE_READY; + hsd->ErrorCode |= errorstate; + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief De-Initializes the SD card. + * @param hsd: Pointer to SD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_DeInit(SD_HandleTypeDef *hsd) +{ + /* Check the SD handle allocation */ + if(hsd == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SDMMC_ALL_INSTANCE(hsd->Instance)); + + hsd->State = HAL_SD_STATE_BUSY; + + /* Set SD power state to off */ + SD_PowerOFF(hsd); + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + if(hsd->MspDeInitCallback == NULL) + { + hsd->MspDeInitCallback = HAL_SD_MspDeInit; + } + + /* DeInit the low level hardware */ + hsd->MspDeInitCallback(hsd); +#else + /* De-Initialize the MSP layer */ + HAL_SD_MspDeInit(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + + hsd->ErrorCode = HAL_SD_ERROR_NONE; + hsd->State = HAL_SD_STATE_RESET; + + return HAL_OK; +} + + +/** + * @brief Initializes the SD MSP. + * @param hsd: Pointer to SD handle + * @retval None + */ +__weak void HAL_SD_MspInit(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_MspInit could be implemented in the user file + */ +} + +/** + * @brief De-Initialize SD MSP. + * @param hsd: Pointer to SD handle + * @retval None + */ +__weak void HAL_SD_MspDeInit(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_MspDeInit could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @addtogroup SD_Exported_Functions_Group2 + * @brief Data transfer functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to manage the data + transfer from/to SD card. + +@endverbatim + * @{ + */ + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed by polling mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @param hsd: Pointer to SD handle + * @param pData: pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of SD blocks to read + * @param Timeout: Specify timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_ReadBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count, data, dataremaining; + uint32_t add = BlockAdd; + uint8_t *tempbuff = pData; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = NumberOfBlocks * BLOCKSIZE; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_SDMMC; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Read block(s) in polling mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = SD_CONTEXT_READ_MULTIPLE_BLOCK; + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = SD_CONTEXT_READ_SINGLE_BLOCK; + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + /* Poll on SDMMC flags */ + dataremaining = config.DataLength; + while(!__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR | SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_DATAEND)) + { + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXFIFOHF) && (dataremaining > 0U)) + { + /* Read data from SDMMC Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = SDMMC_ReadFIFO(hsd->Instance); + *tempbuff = (uint8_t)(data & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 8U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 16U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 24U) & 0xFFU); + tempbuff++; + dataremaining--; + } + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_TIMEOUT; + hsd->State= HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_TIMEOUT; + } + } + + /* Send stop transmission command in case of multiblock read */ + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DATAEND) && (NumberOfBlocks > 1U)) + { + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Send stop transmission command */ + errorstate = SDMMC_CmdStopTransfer(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + } + } + + /* Get error state */ + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DTIMEOUT)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DATA_TIMEOUT; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DATA_CRC_FAIL; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_RX_OVERRUN; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else + { + /* Nothing to do */ + } + + /* Empty FIFO if there is still any data */ + while ((__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXDAVL)) && (dataremaining > 0U)) + { + data = SDMMC_ReadFIFO(hsd->Instance); + *tempbuff = (uint8_t)(data & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 8U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 16U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 24U) & 0xFFU); + tempbuff++; + dataremaining--; + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_TIMEOUT; + hsd->State= HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + } + + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + hsd->State = HAL_SD_STATE_READY; + + return HAL_OK; + } + else + { + hsd->ErrorCode |= HAL_SD_ERROR_BUSY; + return HAL_ERROR; + } +} + +/** + * @brief Allows to write block(s) to a specified address in a card. The Data + * transfer is managed by polling mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @param hsd: Pointer to SD handle + * @param pData: pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of SD blocks to write + * @param Timeout: Specify timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_WriteBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count, data, dataremaining; + uint32_t add = BlockAdd; + uint8_t *tempbuff = pData; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = NumberOfBlocks * BLOCKSIZE; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = SD_CONTEXT_WRITE_MULTIPLE_BLOCK; + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = SD_CONTEXT_WRITE_SINGLE_BLOCK; + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + /* Write block(s) in polling mode */ + dataremaining = config.DataLength; + while(!__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_TXUNDERR | SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_DATAEND)) + { + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_TXFIFOHE) && (dataremaining > 0U)) + { + /* Write data to SDMMC Tx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = (uint32_t)(*tempbuff); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 8U); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 16U); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 24U); + tempbuff++; + dataremaining--; + (void)SDMMC_WriteFIFO(hsd->Instance, &data); + } + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_TIMEOUT; + } + } + + /* Send stop transmission command in case of multiblock write */ + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DATAEND) && (NumberOfBlocks > 1U)) + { + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Send stop transmission command */ + errorstate = SDMMC_CmdStopTransfer(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + } + } + + /* Get error state */ + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DTIMEOUT)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DATA_TIMEOUT; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DATA_CRC_FAIL; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_TXUNDERR)) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_TX_UNDERRUN; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else + { + /* Nothing to do */ + } + + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + hsd->State = HAL_SD_STATE_READY; + + return HAL_OK; + } + else + { + hsd->ErrorCode |= HAL_SD_ERROR_BUSY; + return HAL_ERROR; + } +} + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed in interrupt mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @note You could also check the IT transfer process through the SD Rx + * interrupt event. + * @param hsd: Pointer to SD handle + * @param pData: Pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of blocks to read. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_ReadBlocks_IT(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + hsd->pRxBuffPtr = pData; + hsd->RxXferSize = BLOCKSIZE * NumberOfBlocks; + + __HAL_SD_ENABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_RXOVERR | SDMMC_IT_DATAEND | SDMMC_FLAG_RXFIFOHF)); + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_SDMMC; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Read Blocks in IT mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = (SD_CONTEXT_READ_MULTIPLE_BLOCK | SD_CONTEXT_IT); + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = (SD_CONTEXT_READ_SINGLE_BLOCK | SD_CONTEXT_IT); + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Writes block(s) to a specified address in a card. The Data transfer + * is managed in interrupt mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @note You could also check the IT transfer process through the SD Tx + * interrupt event. + * @param hsd: Pointer to SD handle + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of blocks to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_WriteBlocks_IT(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + hsd->pTxBuffPtr = pData; + hsd->TxXferSize = BLOCKSIZE * NumberOfBlocks; + + /* Enable transfer interrupts */ + __HAL_SD_ENABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_TXUNDERR | SDMMC_IT_DATAEND | SDMMC_FLAG_TXFIFOHE)); + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = (SD_CONTEXT_WRITE_MULTIPLE_BLOCK| SD_CONTEXT_IT); + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = (SD_CONTEXT_WRITE_SINGLE_BLOCK | SD_CONTEXT_IT); + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed by DMA mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @note You could also check the DMA transfer process through the SD Rx + * interrupt event. + * @param hsd: Pointer SD handle + * @param pData: Pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of blocks to read. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_ReadBlocks_DMA(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + __HAL_SD_ENABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_RXOVERR | SDMMC_IT_DATAEND)); + + /* Set the DMA transfer complete callback */ + hsd->hdmarx->XferCpltCallback = SD_DMAReceiveCplt; + + /* Set the DMA error callback */ + hsd->hdmarx->XferErrorCallback = SD_DMAError; + + /* Set the DMA Abort callback */ + hsd->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA Channel */ + if(HAL_DMA_Start_IT(hsd->hdmarx, (uint32_t)&hsd->Instance->FIFO, (uint32_t)pData, (uint32_t)(BLOCKSIZE * NumberOfBlocks)/4U) != HAL_OK) + { + __HAL_SD_DISABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_RXOVERR | SDMMC_IT_DATAEND)); + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DMA; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + else + { + /* Enable SD DMA transfer */ + __HAL_SD_DMA_ENABLE(hsd); + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_SDMMC; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Read Blocks in DMA mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = (SD_CONTEXT_READ_MULTIPLE_BLOCK | SD_CONTEXT_DMA); + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = (SD_CONTEXT_READ_SINGLE_BLOCK | SD_CONTEXT_DMA); + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + return HAL_OK; + } + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Writes block(s) to a specified address in a card. The Data transfer + * is managed by DMA mode. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @note You could also check the DMA transfer process through the SD Tx + * interrupt event. + * @param hsd: Pointer to SD handle + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of blocks to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_WriteBlocks_DMA(SD_HandleTypeDef *hsd, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if((add + NumberOfBlocks) > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Initialize data control register */ + hsd->Instance->DCTRL = 0U; + + /* Enable SD Error interrupts */ + __HAL_SD_ENABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_TXUNDERR)); + + /* Set the DMA transfer complete callback */ + hsd->hdmatx->XferCpltCallback = SD_DMATransmitCplt; + + /* Set the DMA error callback */ + hsd->hdmatx->XferErrorCallback = SD_DMAError; + + /* Set the DMA Abort callback */ + hsd->hdmatx->XferAbortCallback = NULL; + + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + add *= 512U; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, BLOCKSIZE); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hsd->Context = (SD_CONTEXT_WRITE_MULTIPLE_BLOCK | SD_CONTEXT_DMA); + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hsd->Instance, add); + } + else + { + hsd->Context = (SD_CONTEXT_WRITE_SINGLE_BLOCK | SD_CONTEXT_DMA); + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hsd->Instance, add); + } + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + + /* Enable SDMMC DMA transfer */ + __HAL_SD_DMA_ENABLE(hsd); + + /* Enable the DMA Channel */ + if(HAL_DMA_Start_IT(hsd->hdmatx, (uint32_t)pData, (uint32_t)&hsd->Instance->FIFO, (uint32_t)(BLOCKSIZE * NumberOfBlocks)/4U) != HAL_OK) + { + __HAL_SD_DISABLE_IT(hsd, (SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT | SDMMC_IT_TXUNDERR)); + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_DMA; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + return HAL_ERROR; + } + else + { + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_512B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + return HAL_OK; + } + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Erases the specified memory area of the given SD card. + * @note This API should be followed by a check on the card state through + * HAL_SD_GetCardState(). + * @param hsd: Pointer to SD handle + * @param BlockStartAdd: Start Block address + * @param BlockEndAdd: End Block address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_Erase(SD_HandleTypeDef *hsd, uint32_t BlockStartAdd, uint32_t BlockEndAdd) +{ + uint32_t errorstate; + uint32_t start_add = BlockStartAdd; + uint32_t end_add = BlockEndAdd; + + if(hsd->State == HAL_SD_STATE_READY) + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + + if(end_add < start_add) + { + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + return HAL_ERROR; + } + + if(end_add > (hsd->SdCard.LogBlockNbr)) + { + hsd->ErrorCode |= HAL_SD_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_BUSY; + + /* Check if the card command class supports erase command */ + if(((hsd->SdCard.Class) & SDMMC_CCCC_ERASE) == 0U) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_REQUEST_NOT_APPLICABLE; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + if((SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1) & SDMMC_CARD_LOCKED) == SDMMC_CARD_LOCKED) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_LOCK_UNLOCK_FAILED; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Get start and end block for high capacity cards */ + if(hsd->SdCard.CardType != CARD_SDHC_SDXC) + { + start_add *= 512U; + end_add *= 512U; + } + + /* According to sd-card spec 1.0 ERASE_GROUP_START (CMD32) and erase_group_end(CMD33) */ + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Send CMD32 SD_ERASE_GRP_START with argument as addr */ + errorstate = SDMMC_CmdSDEraseStartAdd(hsd->Instance, start_add); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + /* Send CMD33 SD_ERASE_GRP_END with argument as addr */ + errorstate = SDMMC_CmdSDEraseEndAdd(hsd->Instance, end_add); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + } + + /* Send CMD38 ERASE */ + errorstate = SDMMC_CmdErase(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + hsd->State = HAL_SD_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief This function handles SD card interrupt request. + * @param hsd: Pointer to SD handle + * @retval None + */ +void HAL_SD_IRQHandler(SD_HandleTypeDef *hsd) +{ + uint32_t errorstate; + uint32_t context = hsd->Context; + + /* Check for SDMMC interrupt flags */ + if((__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXFIFOHF) != RESET) && ((context & SD_CONTEXT_IT) != 0U)) + { + SD_Read_IT(hsd); + } + + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DATAEND) != RESET) + { + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_FLAG_DATAEND); + + __HAL_SD_DISABLE_IT(hsd, SDMMC_IT_DATAEND | SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT|\ + SDMMC_IT_TXUNDERR | SDMMC_IT_RXOVERR | SDMMC_IT_TXFIFOHE |\ + SDMMC_IT_RXFIFOHF); + + hsd->Instance->DCTRL &= ~(SDMMC_DCTRL_DTEN); + + if((context & SD_CONTEXT_IT) != 0U) + { + if(((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) != 0U) || ((context & SD_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U)) + { + errorstate = SDMMC_CmdStopTransfer(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= errorstate; +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + if(((context & SD_CONTEXT_READ_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) != 0U)) + { +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->RxCpltCallback(hsd); +#else + HAL_SD_RxCpltCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + else + { +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->TxCpltCallback(hsd); +#else + HAL_SD_TxCpltCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + else if((context & SD_CONTEXT_DMA) != 0U) + { + if((context & SD_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U) + { + errorstate = SDMMC_CmdStopTransfer(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= errorstate; +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + if(((context & SD_CONTEXT_READ_SINGLE_BLOCK) == 0U) && ((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) == 0U)) + { + /* Disable the DMA transfer for transmit request by setting the DMAEN bit + in the SD DCTRL register */ + hsd->Instance->DCTRL &= (uint32_t)~((uint32_t)SDMMC_DCTRL_DMAEN); + + hsd->State = HAL_SD_STATE_READY; + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->TxCpltCallback(hsd); +#else + HAL_SD_TxCpltCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + else + { + /* Nothing to do */ + } + } + + else if((__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_TXFIFOHE) != RESET) && ((context & SD_CONTEXT_IT) != 0U)) + { + SD_Write_IT(hsd); + } + + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_RXOVERR | SDMMC_FLAG_TXUNDERR) != RESET) + { + /* Set Error code */ + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL) != RESET) + { + hsd->ErrorCode |= HAL_SD_ERROR_DATA_CRC_FAIL; + } + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DTIMEOUT) != RESET) + { + hsd->ErrorCode |= HAL_SD_ERROR_DATA_TIMEOUT; + } + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR) != RESET) + { + hsd->ErrorCode |= HAL_SD_ERROR_RX_OVERRUN; + } + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_TXUNDERR) != RESET) + { + hsd->ErrorCode |= HAL_SD_ERROR_TX_UNDERRUN; + } + + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + /* Disable all interrupts */ + __HAL_SD_DISABLE_IT(hsd, SDMMC_IT_DATAEND | SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT|\ + SDMMC_IT_TXUNDERR| SDMMC_IT_RXOVERR); + + hsd->ErrorCode |= SDMMC_CmdStopTransfer(hsd->Instance); + + if((context & SD_CONTEXT_IT) != 0U) + { + /* Set the SD state to ready to be able to start again the process */ + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + else if((context & SD_CONTEXT_DMA) != 0U) + { + /* Abort the SD DMA channel */ + if(((context & SD_CONTEXT_WRITE_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U)) + { + /* Set the DMA Tx abort callback */ + hsd->hdmatx->XferAbortCallback = SD_DMATxAbort; + /* Abort DMA in IT mode */ + if(HAL_DMA_Abort_IT(hsd->hdmatx) != HAL_OK) + { + SD_DMATxAbort(hsd->hdmatx); + } + } + else if(((context & SD_CONTEXT_READ_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) != 0U)) + { + /* Set the DMA Rx abort callback */ + hsd->hdmarx->XferAbortCallback = SD_DMARxAbort; + /* Abort DMA in IT mode */ + if(HAL_DMA_Abort_IT(hsd->hdmarx) != HAL_OK) + { + SD_DMARxAbort(hsd->hdmarx); + } + } + else + { + hsd->ErrorCode = HAL_SD_ERROR_NONE; + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->AbortCpltCallback(hsd); +#else + HAL_SD_AbortCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + else + { + /* Nothing to do */ + } + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief return the SD state + * @param hsd: Pointer to sd handle + * @retval HAL state + */ +HAL_SD_StateTypeDef HAL_SD_GetState(SD_HandleTypeDef *hsd) +{ + return hsd->State; +} + +/** +* @brief Return the SD error code +* @param hsd : Pointer to a SD_HandleTypeDef structure that contains + * the configuration information. +* @retval SD Error Code +*/ +uint32_t HAL_SD_GetError(SD_HandleTypeDef *hsd) +{ + return hsd->ErrorCode; +} + +/** + * @brief Tx Transfer completed callbacks + * @param hsd: Pointer to SD handle + * @retval None + */ +__weak void HAL_SD_TxCpltCallback(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_TxCpltCallback can be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks + * @param hsd: Pointer SD handle + * @retval None + */ +__weak void HAL_SD_RxCpltCallback(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_RxCpltCallback can be implemented in the user file + */ +} + +/** + * @brief SD error callbacks + * @param hsd: Pointer SD handle + * @retval None + */ +__weak void HAL_SD_ErrorCallback(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_ErrorCallback can be implemented in the user file + */ +} + +/** + * @brief SD Abort callbacks + * @param hsd: Pointer SD handle + * @retval None + */ +__weak void HAL_SD_AbortCallback(SD_HandleTypeDef *hsd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SD_AbortCallback can be implemented in the user file + */ +} + +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User SD Callback + * To be used instead of the weak (surcharged) predefined callback + * @param hsd : SD handle + * @param CallbackID : ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_SD_TX_CPLT_CB_ID SD Tx Complete Callback ID + * @arg @ref HAL_SD_RX_CPLT_CB_ID SD Rx Complete Callback ID + * @arg @ref HAL_SD_ERROR_CB_ID SD Error Callback ID + * @arg @ref HAL_SD_ABORT_CB_ID SD Abort Callback ID + * @arg @ref HAL_SD_MSP_INIT_CB_ID SD MspInit Callback ID + * @arg @ref HAL_SD_MSP_DEINIT_CB_ID SD MspDeInit Callback ID + * @param pCallback : pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_SD_RegisterCallback(SD_HandleTypeDef *hsd, HAL_SD_CallbackIDTypeDef CallbackID, pSD_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hsd); + + if(hsd->State == HAL_SD_STATE_READY) + { + switch (CallbackID) + { + case HAL_SD_TX_CPLT_CB_ID : + hsd->TxCpltCallback = pCallback; + break; + case HAL_SD_RX_CPLT_CB_ID : + hsd->RxCpltCallback = pCallback; + break; + case HAL_SD_ERROR_CB_ID : + hsd->ErrorCallback = pCallback; + break; + case HAL_SD_ABORT_CB_ID : + hsd->AbortCpltCallback = pCallback; + break; + case HAL_SD_MSP_INIT_CB_ID : + hsd->MspInitCallback = pCallback; + break; + case HAL_SD_MSP_DEINIT_CB_ID : + hsd->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hsd->State == HAL_SD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_SD_MSP_INIT_CB_ID : + hsd->MspInitCallback = pCallback; + break; + case HAL_SD_MSP_DEINIT_CB_ID : + hsd->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hsd); + return status; +} + +/** + * @brief Unregister a User SD Callback + * SD Callback is redirected to the weak (surcharged) predefined callback + * @param hsd : SD handle + * @param CallbackID : ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_SD_TX_CPLT_CB_ID SD Tx Complete Callback ID + * @arg @ref HAL_SD_RX_CPLT_CB_ID SD Rx Complete Callback ID + * @arg @ref HAL_SD_ERROR_CB_ID SD Error Callback ID + * @arg @ref HAL_SD_ABORT_CB_ID SD Abort Callback ID + * @arg @ref HAL_SD_MSP_INIT_CB_ID SD MspInit Callback ID + * @arg @ref HAL_SD_MSP_DEINIT_CB_ID SD MspDeInit Callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_SD_UnRegisterCallback(SD_HandleTypeDef *hsd, HAL_SD_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hsd); + + if(hsd->State == HAL_SD_STATE_READY) + { + switch (CallbackID) + { + case HAL_SD_TX_CPLT_CB_ID : + hsd->TxCpltCallback = HAL_SD_TxCpltCallback; + break; + case HAL_SD_RX_CPLT_CB_ID : + hsd->RxCpltCallback = HAL_SD_RxCpltCallback; + break; + case HAL_SD_ERROR_CB_ID : + hsd->ErrorCallback = HAL_SD_ErrorCallback; + break; + case HAL_SD_ABORT_CB_ID : + hsd->AbortCpltCallback = HAL_SD_AbortCallback; + break; + case HAL_SD_MSP_INIT_CB_ID : + hsd->MspInitCallback = HAL_SD_MspInit; + break; + case HAL_SD_MSP_DEINIT_CB_ID : + hsd->MspDeInitCallback = HAL_SD_MspDeInit; + break; + default : + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hsd->State == HAL_SD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_SD_MSP_INIT_CB_ID : + hsd->MspInitCallback = HAL_SD_MspInit; + break; + case HAL_SD_MSP_DEINIT_CB_ID : + hsd->MspDeInitCallback = HAL_SD_MspDeInit; + break; + default : + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hsd->ErrorCode |= HAL_SD_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hsd); + return status; +} +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup SD_Exported_Functions_Group3 + * @brief management functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the SD card + operations and get the related information + +@endverbatim + * @{ + */ + +/** + * @brief Returns information the information of the card which are stored on + * the CID register. + * @param hsd: Pointer to SD handle + * @param pCID: Pointer to a HAL_SD_CardCIDTypeDef structure that + * contains all CID register parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_GetCardCID(SD_HandleTypeDef *hsd, HAL_SD_CardCIDTypeDef *pCID) +{ + pCID->ManufacturerID = (uint8_t)((hsd->CID[0] & 0xFF000000U) >> 24U); + + pCID->OEM_AppliID = (uint16_t)((hsd->CID[0] & 0x00FFFF00U) >> 8U); + + pCID->ProdName1 = (((hsd->CID[0] & 0x000000FFU) << 24U) | ((hsd->CID[1] & 0xFFFFFF00U) >> 8U)); + + pCID->ProdName2 = (uint8_t)(hsd->CID[1] & 0x000000FFU); + + pCID->ProdRev = (uint8_t)((hsd->CID[2] & 0xFF000000U) >> 24U); + + pCID->ProdSN = (((hsd->CID[2] & 0x00FFFFFFU) << 8U) | ((hsd->CID[3] & 0xFF000000U) >> 24U)); + + pCID->Reserved1 = (uint8_t)((hsd->CID[3] & 0x00F00000U) >> 20U); + + pCID->ManufactDate = (uint16_t)((hsd->CID[3] & 0x000FFF00U) >> 8U); + + pCID->CID_CRC = (uint8_t)((hsd->CID[3] & 0x000000FEU) >> 1U); + + pCID->Reserved2 = 1U; + + return HAL_OK; +} + +/** + * @brief Returns information the information of the card which are stored on + * the CSD register. + * @param hsd: Pointer to SD handle + * @param pCSD: Pointer to a HAL_SD_CardCSDTypeDef structure that + * contains all CSD register parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_GetCardCSD(SD_HandleTypeDef *hsd, HAL_SD_CardCSDTypeDef *pCSD) +{ + pCSD->CSDStruct = (uint8_t)((hsd->CSD[0] & 0xC0000000U) >> 30U); + + pCSD->SysSpecVersion = (uint8_t)((hsd->CSD[0] & 0x3C000000U) >> 26U); + + pCSD->Reserved1 = (uint8_t)((hsd->CSD[0] & 0x03000000U) >> 24U); + + pCSD->TAAC = (uint8_t)((hsd->CSD[0] & 0x00FF0000U) >> 16U); + + pCSD->NSAC = (uint8_t)((hsd->CSD[0] & 0x0000FF00U) >> 8U); + + pCSD->MaxBusClkFrec = (uint8_t)(hsd->CSD[0] & 0x000000FFU); + + pCSD->CardComdClasses = (uint16_t)((hsd->CSD[1] & 0xFFF00000U) >> 20U); + + pCSD->RdBlockLen = (uint8_t)((hsd->CSD[1] & 0x000F0000U) >> 16U); + + pCSD->PartBlockRead = (uint8_t)((hsd->CSD[1] & 0x00008000U) >> 15U); + + pCSD->WrBlockMisalign = (uint8_t)((hsd->CSD[1] & 0x00004000U) >> 14U); + + pCSD->RdBlockMisalign = (uint8_t)((hsd->CSD[1] & 0x00002000U) >> 13U); + + pCSD->DSRImpl = (uint8_t)((hsd->CSD[1] & 0x00001000U) >> 12U); + + pCSD->Reserved2 = 0U; /*!< Reserved */ + + if(hsd->SdCard.CardType == CARD_SDSC) + { + pCSD->DeviceSize = (((hsd->CSD[1] & 0x000003FFU) << 2U) | ((hsd->CSD[2] & 0xC0000000U) >> 30U)); + + pCSD->MaxRdCurrentVDDMin = (uint8_t)((hsd->CSD[2] & 0x38000000U) >> 27U); + + pCSD->MaxRdCurrentVDDMax = (uint8_t)((hsd->CSD[2] & 0x07000000U) >> 24U); + + pCSD->MaxWrCurrentVDDMin = (uint8_t)((hsd->CSD[2] & 0x00E00000U) >> 21U); + + pCSD->MaxWrCurrentVDDMax = (uint8_t)((hsd->CSD[2] & 0x001C0000U) >> 18U); + + pCSD->DeviceSizeMul = (uint8_t)((hsd->CSD[2] & 0x00038000U) >> 15U); + + hsd->SdCard.BlockNbr = (pCSD->DeviceSize + 1U) ; + hsd->SdCard.BlockNbr *= (1UL << ((pCSD->DeviceSizeMul & 0x07U) + 2U)); + hsd->SdCard.BlockSize = (1UL << (pCSD->RdBlockLen & 0x0FU)); + + hsd->SdCard.LogBlockNbr = (hsd->SdCard.BlockNbr) * ((hsd->SdCard.BlockSize) / 512U); + hsd->SdCard.LogBlockSize = 512U; + } + else if(hsd->SdCard.CardType == CARD_SDHC_SDXC) + { + /* Byte 7 */ + pCSD->DeviceSize = (((hsd->CSD[1] & 0x0000003FU) << 16U) | ((hsd->CSD[2] & 0xFFFF0000U) >> 16U)); + + hsd->SdCard.BlockNbr = ((pCSD->DeviceSize + 1U) * 1024U); + hsd->SdCard.LogBlockNbr = hsd->SdCard.BlockNbr; + hsd->SdCard.BlockSize = 512U; + hsd->SdCard.LogBlockSize = hsd->SdCard.BlockSize; + } + else + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= HAL_SD_ERROR_UNSUPPORTED_FEATURE; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + + pCSD->EraseGrSize = (uint8_t)((hsd->CSD[2] & 0x00004000U) >> 14U); + + pCSD->EraseGrMul = (uint8_t)((hsd->CSD[2] & 0x00003F80U) >> 7U); + + pCSD->WrProtectGrSize = (uint8_t)(hsd->CSD[2] & 0x0000007FU); + + pCSD->WrProtectGrEnable = (uint8_t)((hsd->CSD[3] & 0x80000000U) >> 31U); + + pCSD->ManDeflECC = (uint8_t)((hsd->CSD[3] & 0x60000000U) >> 29U); + + pCSD->WrSpeedFact = (uint8_t)((hsd->CSD[3] & 0x1C000000U) >> 26U); + + pCSD->MaxWrBlockLen= (uint8_t)((hsd->CSD[3] & 0x03C00000U) >> 22U); + + pCSD->WriteBlockPaPartial = (uint8_t)((hsd->CSD[3] & 0x00200000U) >> 21U); + + pCSD->Reserved3 = 0; + + pCSD->ContentProtectAppli = (uint8_t)((hsd->CSD[3] & 0x00010000U) >> 16U); + + pCSD->FileFormatGroup = (uint8_t)((hsd->CSD[3] & 0x00008000U) >> 15U); + + pCSD->CopyFlag = (uint8_t)((hsd->CSD[3] & 0x00004000U) >> 14U); + + pCSD->PermWrProtect = (uint8_t)((hsd->CSD[3] & 0x00002000U) >> 13U); + + pCSD->TempWrProtect = (uint8_t)((hsd->CSD[3] & 0x00001000U) >> 12U); + + pCSD->FileFormat = (uint8_t)((hsd->CSD[3] & 0x00000C00U) >> 10U); + + pCSD->ECC= (uint8_t)((hsd->CSD[3] & 0x00000300U) >> 8U); + + pCSD->CSD_CRC = (uint8_t)((hsd->CSD[3] & 0x000000FEU) >> 1U); + + pCSD->Reserved4 = 1; + + return HAL_OK; +} + +/** + * @brief Gets the SD status info. + * @param hsd: Pointer to SD handle + * @param pStatus: Pointer to the HAL_SD_CardStatusTypeDef structure that + * will contain the SD card status information + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_GetCardStatus(SD_HandleTypeDef *hsd, HAL_SD_CardStatusTypeDef *pStatus) +{ + uint32_t sd_status[16]; + uint32_t errorstate; + + errorstate = SD_SendSDStatus(hsd, sd_status); + if(errorstate != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->ErrorCode |= errorstate; + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + else + { + pStatus->DataBusWidth = (uint8_t)((sd_status[0] & 0xC0U) >> 6U); + + pStatus->SecuredMode = (uint8_t)((sd_status[0] & 0x20U) >> 5U); + + pStatus->CardType = (uint16_t)(((sd_status[0] & 0x00FF0000U) >> 8U) | ((sd_status[0] & 0xFF000000U) >> 24U)); + + pStatus->ProtectedAreaSize = (((sd_status[1] & 0xFFU) << 24U) | ((sd_status[1] & 0xFF00U) << 8U) | + ((sd_status[1] & 0xFF0000U) >> 8U) | ((sd_status[1] & 0xFF000000U) >> 24U)); + + pStatus->SpeedClass = (uint8_t)(sd_status[2] & 0xFFU); + + pStatus->PerformanceMove = (uint8_t)((sd_status[2] & 0xFF00U) >> 8U); + + pStatus->AllocationUnitSize = (uint8_t)((sd_status[2] & 0xF00000U) >> 20U); + + pStatus->EraseSize = (uint16_t)(((sd_status[2] & 0xFF000000U) >> 16U) | (sd_status[3] & 0xFFU)); + + pStatus->EraseTimeout = (uint8_t)((sd_status[3] & 0xFC00U) >> 10U); + + pStatus->EraseOffset = (uint8_t)((sd_status[3] & 0x0300U) >> 8U); + } + + return HAL_OK; +} + +/** + * @brief Gets the SD card info. + * @param hsd: Pointer to SD handle + * @param pCardInfo: Pointer to the HAL_SD_CardInfoTypeDef structure that + * will contain the SD card status information + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_GetCardInfo(SD_HandleTypeDef *hsd, HAL_SD_CardInfoTypeDef *pCardInfo) +{ + pCardInfo->CardType = (uint32_t)(hsd->SdCard.CardType); + pCardInfo->CardVersion = (uint32_t)(hsd->SdCard.CardVersion); + pCardInfo->Class = (uint32_t)(hsd->SdCard.Class); + pCardInfo->RelCardAdd = (uint32_t)(hsd->SdCard.RelCardAdd); + pCardInfo->BlockNbr = (uint32_t)(hsd->SdCard.BlockNbr); + pCardInfo->BlockSize = (uint32_t)(hsd->SdCard.BlockSize); + pCardInfo->LogBlockNbr = (uint32_t)(hsd->SdCard.LogBlockNbr); + pCardInfo->LogBlockSize = (uint32_t)(hsd->SdCard.LogBlockSize); + + return HAL_OK; +} + +/** + * @brief Enables wide bus operation for the requested card if supported by + * card. + * @param hsd: Pointer to SD handle + * @param WideMode: Specifies the SD card wide bus mode + * This parameter can be one of the following values: + * @arg SDMMC_BUS_WIDE_8B: 8-bit data transfer + * @arg SDMMC_BUS_WIDE_4B: 4-bit data transfer + * @arg SDMMC_BUS_WIDE_1B: 1-bit data transfer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_ConfigWideBusOperation(SD_HandleTypeDef *hsd, uint32_t WideMode) +{ + SDMMC_InitTypeDef Init; + uint32_t errorstate; + + /* Check the parameters */ + assert_param(IS_SDMMC_BUS_WIDE(WideMode)); + + /* Change State */ + hsd->State = HAL_SD_STATE_BUSY; + + if(hsd->SdCard.CardType != CARD_SECURED) + { + if(WideMode == SDMMC_BUS_WIDE_8B) + { + hsd->ErrorCode |= HAL_SD_ERROR_UNSUPPORTED_FEATURE; + } + else if(WideMode == SDMMC_BUS_WIDE_4B) + { + errorstate = SD_WideBus_Enable(hsd); + + hsd->ErrorCode |= errorstate; + } + else if(WideMode == SDMMC_BUS_WIDE_1B) + { + errorstate = SD_WideBus_Disable(hsd); + + hsd->ErrorCode |= errorstate; + } + else + { + /* WideMode is not a valid argument*/ + hsd->ErrorCode |= HAL_SD_ERROR_PARAM; + } + } + else + { + /* MMC Card does not support this feature */ + hsd->ErrorCode |= HAL_SD_ERROR_UNSUPPORTED_FEATURE; + } + + if(hsd->ErrorCode != HAL_SD_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + hsd->State = HAL_SD_STATE_READY; + return HAL_ERROR; + } + else + { + /* Configure the SDMMC peripheral */ + Init.ClockEdge = hsd->Init.ClockEdge; + Init.ClockBypass = hsd->Init.ClockBypass; + Init.ClockPowerSave = hsd->Init.ClockPowerSave; + Init.BusWide = WideMode; + Init.HardwareFlowControl = hsd->Init.HardwareFlowControl; + Init.ClockDiv = hsd->Init.ClockDiv; + (void)SDMMC_Init(hsd->Instance, Init); + } + + /* Change State */ + hsd->State = HAL_SD_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Gets the current sd card data state. + * @param hsd: pointer to SD handle + * @retval Card state + */ +HAL_SD_CardStateTypeDef HAL_SD_GetCardState(SD_HandleTypeDef *hsd) +{ + uint32_t cardstate; + uint32_t errorstate; + uint32_t resp1 = 0; + + errorstate = SD_SendStatus(hsd, &resp1); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= errorstate; + } + + cardstate = ((resp1 >> 9U) & 0x0FU); + + return (HAL_SD_CardStateTypeDef)cardstate; +} + +/** + * @brief Abort the current transfer and disable the SD. + * @param hsd: pointer to a SD_HandleTypeDef structure that contains + * the configuration information for SD module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_Abort(SD_HandleTypeDef *hsd) +{ + HAL_SD_CardStateTypeDef CardState; + uint32_t context = hsd->Context; + + /* DIsable All interrupts */ + __HAL_SD_DISABLE_IT(hsd, SDMMC_IT_DATAEND | SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT|\ + SDMMC_IT_TXUNDERR| SDMMC_IT_RXOVERR); + + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + CLEAR_BIT(hsd->Instance->DCTRL, SDMMC_DCTRL_DTEN); + + if ((context & SD_CONTEXT_DMA) != 0U) + { + /* Disable the SD DMA request */ + hsd->Instance->DCTRL &= (uint32_t)~((uint32_t)SDMMC_DCTRL_DMAEN); + + /* Abort the SD DMA Tx channel */ + if (((context & SD_CONTEXT_WRITE_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U)) + { + if(HAL_DMA_Abort(hsd->hdmatx) != HAL_OK) + { + hsd->ErrorCode |= HAL_SD_ERROR_DMA; + } + } + /* Abort the SD DMA Rx channel */ + else if (((context & SD_CONTEXT_READ_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) != 0U)) + { + if(HAL_DMA_Abort(hsd->hdmarx) != HAL_OK) + { + hsd->ErrorCode |= HAL_SD_ERROR_DMA; + } + } + else + { + /* Nothing to do */ + } + } + + hsd->State = HAL_SD_STATE_READY; + + /* Initialize the SD operation */ + hsd->Context = SD_CONTEXT_NONE; + + CardState = HAL_SD_GetCardState(hsd); + if((CardState == HAL_SD_CARD_RECEIVING) || (CardState == HAL_SD_CARD_SENDING)) + { + hsd->ErrorCode = SDMMC_CmdStopTransfer(hsd->Instance); + } + if(hsd->ErrorCode != HAL_SD_ERROR_NONE) + { + return HAL_ERROR; + } + return HAL_OK; +} + +/** + * @brief Abort the current transfer and disable the SD (IT mode). + * @param hsd: pointer to a SD_HandleTypeDef structure that contains + * the configuration information for SD module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SD_Abort_IT(SD_HandleTypeDef *hsd) +{ + HAL_SD_CardStateTypeDef CardState; + uint32_t context = hsd->Context; + + /* Disable All interrupts */ + __HAL_SD_DISABLE_IT(hsd, SDMMC_IT_DATAEND | SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT|\ + SDMMC_IT_TXUNDERR| SDMMC_IT_RXOVERR); + + CLEAR_BIT(hsd->Instance->DCTRL, SDMMC_DCTRL_DTEN); + + if ((context & SD_CONTEXT_DMA) != 0U) + { + /* Disable the SD DMA request */ + hsd->Instance->DCTRL &= (uint32_t)~((uint32_t)SDMMC_DCTRL_DMAEN); + + /* Abort the SD DMA Tx channel */ + if (((context & SD_CONTEXT_WRITE_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U)) + { + hsd->hdmatx->XferAbortCallback = SD_DMATxAbort; + if(HAL_DMA_Abort_IT(hsd->hdmatx) != HAL_OK) + { + hsd->hdmatx = NULL; + } + } + /* Abort the SD DMA Rx channel */ + else if (((context & SD_CONTEXT_READ_SINGLE_BLOCK) != 0U) || ((context & SD_CONTEXT_READ_MULTIPLE_BLOCK) != 0U)) + { + hsd->hdmarx->XferAbortCallback = SD_DMARxAbort; + if(HAL_DMA_Abort_IT(hsd->hdmarx) != HAL_OK) + { + hsd->hdmarx = NULL; + } + } + else + { + /* Nothing to do */ + } + } + /* No transfer ongoing on both DMA channels*/ + else + { + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + CardState = HAL_SD_GetCardState(hsd); + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + if((CardState == HAL_SD_CARD_RECEIVING) || (CardState == HAL_SD_CARD_SENDING)) + { + hsd->ErrorCode = SDMMC_CmdStopTransfer(hsd->Instance); + } + if(hsd->ErrorCode != HAL_SD_ERROR_NONE) + { + return HAL_ERROR; + } + else + { +#if defined (USE_HAL_SD_REGISTER_CALLBACKS) && (USE_HAL_SD_REGISTER_CALLBACKS == 1U) + hsd->AbortCpltCallback(hsd); +#else + HAL_SD_AbortCallback(hsd); +#endif /* USE_HAL_SD_REGISTER_CALLBACKS */ + } + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/* Private function ----------------------------------------------------------*/ +/** @addtogroup SD_Private_Functions + * @{ + */ + +/** + * @brief DMA SD transmit process complete callback + * @param hdma: DMA handle + * @retval None + */ +static void SD_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + SD_HandleTypeDef* hsd = (SD_HandleTypeDef* )(hdma->Parent); + + /* Enable DATAEND Interrupt */ + __HAL_SD_ENABLE_IT(hsd, (SDMMC_IT_DATAEND)); +} + +/** + * @brief DMA SD receive process complete callback + * @param hdma: DMA handle + * @retval None + */ +static void SD_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SD_HandleTypeDef* hsd = (SD_HandleTypeDef* )(hdma->Parent); + uint32_t errorstate; + + /* Send stop command in multiblock write */ + if(hsd->Context == (SD_CONTEXT_READ_MULTIPLE_BLOCK | SD_CONTEXT_DMA)) + { + errorstate = SDMMC_CmdStopTransfer(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= errorstate; +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif + } + } + + /* Disable the DMA transfer for transmit request by setting the DMAEN bit + in the SD DCTRL register */ + hsd->Instance->DCTRL &= (uint32_t)~((uint32_t)SDMMC_DCTRL_DMAEN); + + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->RxCpltCallback(hsd); +#else + HAL_SD_RxCpltCallback(hsd); +#endif +} + +/** + * @brief DMA SD communication error callback + * @param hdma: DMA handle + * @retval None + */ +static void SD_DMAError(DMA_HandleTypeDef *hdma) +{ + SD_HandleTypeDef* hsd = (SD_HandleTypeDef* )(hdma->Parent); + HAL_SD_CardStateTypeDef CardState; + uint32_t RxErrorCode, TxErrorCode; + + /* if DMA error is FIFO error ignore it */ + if(HAL_DMA_GetError(hdma) != HAL_DMA_ERROR_FE) + { + RxErrorCode = hsd->hdmarx->ErrorCode; + TxErrorCode = hsd->hdmatx->ErrorCode; + if((RxErrorCode == HAL_DMA_ERROR_TE) || (TxErrorCode == HAL_DMA_ERROR_TE)) + { + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_FLAGS); + + /* Disable All interrupts */ + __HAL_SD_DISABLE_IT(hsd, SDMMC_IT_DATAEND | SDMMC_IT_DCRCFAIL | SDMMC_IT_DTIMEOUT|\ + SDMMC_IT_TXUNDERR| SDMMC_IT_RXOVERR); + + hsd->ErrorCode |= HAL_SD_ERROR_DMA; + CardState = HAL_SD_GetCardState(hsd); + if((CardState == HAL_SD_CARD_RECEIVING) || (CardState == HAL_SD_CARD_SENDING)) + { + hsd->ErrorCode |= SDMMC_CmdStopTransfer(hsd->Instance); + } + + hsd->State= HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + } + +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif + } +} + +/** + * @brief DMA SD Tx Abort callback + * @param hdma: DMA handle + * @retval None + */ +static void SD_DMATxAbort(DMA_HandleTypeDef *hdma) +{ + SD_HandleTypeDef* hsd = (SD_HandleTypeDef* )(hdma->Parent); + HAL_SD_CardStateTypeDef CardState; + + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + CardState = HAL_SD_GetCardState(hsd); + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + if((CardState == HAL_SD_CARD_RECEIVING) || (CardState == HAL_SD_CARD_SENDING)) + { + hsd->ErrorCode |= SDMMC_CmdStopTransfer(hsd->Instance); + } + + if(hsd->ErrorCode == HAL_SD_ERROR_NONE) + { +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->AbortCpltCallback(hsd); +#else + HAL_SD_AbortCallback(hsd); +#endif + } + else + { +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif + } +} + +/** + * @brief DMA SD Rx Abort callback + * @param hdma: DMA handle + * @retval None + */ +static void SD_DMARxAbort(DMA_HandleTypeDef *hdma) +{ + SD_HandleTypeDef* hsd = (SD_HandleTypeDef* )(hdma->Parent); + HAL_SD_CardStateTypeDef CardState; + + /* Clear All flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + CardState = HAL_SD_GetCardState(hsd); + hsd->State = HAL_SD_STATE_READY; + hsd->Context = SD_CONTEXT_NONE; + if((CardState == HAL_SD_CARD_RECEIVING) || (CardState == HAL_SD_CARD_SENDING)) + { + hsd->ErrorCode |= SDMMC_CmdStopTransfer(hsd->Instance); + } + + if(hsd->ErrorCode == HAL_SD_ERROR_NONE) + { +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->AbortCpltCallback(hsd); +#else + HAL_SD_AbortCallback(hsd); +#endif + } + else + { +#if (USE_HAL_SD_REGISTER_CALLBACKS == 1) + hsd->ErrorCallback(hsd); +#else + HAL_SD_ErrorCallback(hsd); +#endif + } +} + +/** + * @brief Initializes the sd card. + * @param hsd: Pointer to SD handle + * @retval SD Card error state + */ +static uint32_t SD_InitCard(SD_HandleTypeDef *hsd) +{ + HAL_SD_CardCSDTypeDef CSD; + uint32_t errorstate; + uint16_t sd_rca = 1U; + + /* Check the power State */ + if(SDMMC_GetPowerState(hsd->Instance) == 0U) + { + /* Power off */ + return HAL_SD_ERROR_REQUEST_NOT_APPLICABLE; + } + + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Send CMD2 ALL_SEND_CID */ + errorstate = SDMMC_CmdSendCID(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + else + { + /* Get Card identification number data */ + hsd->CID[0U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1); + hsd->CID[1U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP2); + hsd->CID[2U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP3); + hsd->CID[3U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP4); + } + } + + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Send CMD3 SET_REL_ADDR with argument 0 */ + /* SD Card publishes its RCA. */ + errorstate = SDMMC_CmdSetRelAdd(hsd->Instance, &sd_rca); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + } + if(hsd->SdCard.CardType != CARD_SECURED) + { + /* Get the SD card RCA */ + hsd->SdCard.RelCardAdd = sd_rca; + + /* Send CMD9 SEND_CSD with argument as card's RCA */ + errorstate = SDMMC_CmdSendCSD(hsd->Instance, (uint32_t)(hsd->SdCard.RelCardAdd << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + else + { + /* Get Card Specific Data */ + hsd->CSD[0U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1); + hsd->CSD[1U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP2); + hsd->CSD[2U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP3); + hsd->CSD[3U] = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP4); + } + } + + /* Get the Card Class */ + hsd->SdCard.Class = (SDMMC_GetResponse(hsd->Instance, SDMMC_RESP2) >> 20U); + + /* Get CSD parameters */ + if (HAL_SD_GetCardCSD(hsd, &CSD) != HAL_OK) + { + return HAL_SD_ERROR_UNSUPPORTED_FEATURE; + } + + /* Select the Card */ + errorstate = SDMMC_CmdSelDesel(hsd->Instance, (uint32_t)(((uint32_t)hsd->SdCard.RelCardAdd) << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Configure SDMMC peripheral interface */ + (void)SDMMC_Init(hsd->Instance, hsd->Init); + + /* All cards are initialized */ + return HAL_SD_ERROR_NONE; +} + +/** + * @brief Enquires cards about their operating voltage and configures clock + * controls and stores SD information that will be needed in future + * in the SD handle. + * @param hsd: Pointer to SD handle + * @retval error state + */ +static uint32_t SD_PowerON(SD_HandleTypeDef *hsd) +{ + __IO uint32_t count = 0U; + uint32_t response = 0U, validvoltage = 0U; + uint32_t errorstate; + + /* CMD0: GO_IDLE_STATE */ + errorstate = SDMMC_CmdGoIdleState(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* CMD8: SEND_IF_COND: Command available only on V2.0 cards */ + errorstate = SDMMC_CmdOperCond(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->SdCard.CardVersion = CARD_V1_X; + /* CMD0: GO_IDLE_STATE */ + errorstate = SDMMC_CmdGoIdleState(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + } + else + { + hsd->SdCard.CardVersion = CARD_V2_X; + } + + if( hsd->SdCard.CardVersion == CARD_V2_X) + { + /* SEND CMD55 APP_CMD with RCA as 0 */ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, 0); + if(errorstate != HAL_SD_ERROR_NONE) + { + return HAL_SD_ERROR_UNSUPPORTED_FEATURE; + } + } + /* SD CARD */ + /* Send ACMD41 SD_APP_OP_COND with Argument 0x80100000 */ + while((count < SDMMC_MAX_VOLT_TRIAL) && (validvoltage == 0U)) + { + /* SEND CMD55 APP_CMD with RCA as 0 */ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, 0); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Send CMD41 */ + errorstate = SDMMC_CmdAppOperCommand(hsd->Instance, SDMMC_VOLTAGE_WINDOW_SD | SDMMC_HIGH_CAPACITY | SD_SWITCH_1_8V_CAPACITY); + if(errorstate != HAL_SD_ERROR_NONE) + { + return HAL_SD_ERROR_UNSUPPORTED_FEATURE; + } + + /* Get command response */ + response = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1); + + /* Get operating voltage*/ + validvoltage = (((response >> 31U) == 1U) ? 1U : 0U); + + count++; + } + + if(count >= SDMMC_MAX_VOLT_TRIAL) + { + return HAL_SD_ERROR_INVALID_VOLTRANGE; + } + + if((response & SDMMC_HIGH_CAPACITY) == SDMMC_HIGH_CAPACITY) /* (response &= SD_HIGH_CAPACITY) */ + { + hsd->SdCard.CardType = CARD_SDHC_SDXC; + } + else + { + hsd->SdCard.CardType = CARD_SDSC; + } + + + return HAL_SD_ERROR_NONE; +} + +/** + * @brief Turns the SDMMC output signals off. + * @param hsd: Pointer to SD handle + * @retval None + */ +static void SD_PowerOFF(SD_HandleTypeDef *hsd) +{ + /* Set Power State to OFF */ + (void)SDMMC_PowerState_OFF(hsd->Instance); +} + +/** + * @brief Send Status info command. + * @param hsd: pointer to SD handle + * @param pSDstatus: Pointer to the buffer that will contain the SD card status + * SD Status register) + * @retval error state + */ +static uint32_t SD_SendSDStatus(SD_HandleTypeDef *hsd, uint32_t *pSDstatus) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count; + uint32_t *pData = pSDstatus; + + /* Check SD response */ + if((SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1) & SDMMC_CARD_LOCKED) == SDMMC_CARD_LOCKED) + { + return HAL_SD_ERROR_LOCK_UNLOCK_FAILED; + } + + /* Set block size for card if it is not equal to current block size for card */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, 64U); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= HAL_SD_ERROR_NONE; + return errorstate; + } + + /* Send CMD55 */ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, (uint32_t)(hsd->SdCard.RelCardAdd << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= HAL_SD_ERROR_NONE; + return errorstate; + } + + /* Configure the SD DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = 64U; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_64B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_SDMMC; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Send ACMD13 (SD_APP_STAUS) with argument as card's RCA */ + errorstate = SDMMC_CmdStatusRegister(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + hsd->ErrorCode |= HAL_SD_ERROR_NONE; + return errorstate; + } + + /* Get status data */ + while(!__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR | SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_DBCKEND)) + { + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXFIFOHF)) + { + for(count = 0U; count < 8U; count++) + { + *pData = SDMMC_ReadFIFO(hsd->Instance); + pData++; + } + } + + if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + { + return HAL_SD_ERROR_TIMEOUT; + } + } + + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DTIMEOUT)) + { + return HAL_SD_ERROR_DATA_TIMEOUT; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL)) + { + return HAL_SD_ERROR_DATA_CRC_FAIL; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR)) + { + return HAL_SD_ERROR_RX_OVERRUN; + } + else + { + /* Nothing to do */ + } + + while ((__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXDAVL))) + { + *pData = SDMMC_ReadFIFO(hsd->Instance); + pData++; + + if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + { + return HAL_SD_ERROR_TIMEOUT; + } + } + + /* Clear all the static status flags*/ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + return HAL_SD_ERROR_NONE; +} + +/** + * @brief Returns the current card's status. + * @param hsd: Pointer to SD handle + * @param pCardStatus: pointer to the buffer that will contain the SD card + * status (Card Status register) + * @retval error state + */ +static uint32_t SD_SendStatus(SD_HandleTypeDef *hsd, uint32_t *pCardStatus) +{ + uint32_t errorstate; + + if(pCardStatus == NULL) + { + return HAL_SD_ERROR_PARAM; + } + + /* Send Status command */ + errorstate = SDMMC_CmdSendStatus(hsd->Instance, (uint32_t)(hsd->SdCard.RelCardAdd << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Get SD card status */ + *pCardStatus = SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1); + + return HAL_SD_ERROR_NONE; +} + +/** + * @brief Enables the SDMMC wide bus mode. + * @param hsd: pointer to SD handle + * @retval error state + */ +static uint32_t SD_WideBus_Enable(SD_HandleTypeDef *hsd) +{ + uint32_t scr[2U] = {0U, 0U}; + uint32_t errorstate; + + if((SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1) & SDMMC_CARD_LOCKED) == SDMMC_CARD_LOCKED) + { + return HAL_SD_ERROR_LOCK_UNLOCK_FAILED; + } + + /* Get SCR Register */ + errorstate = SD_FindSCR(hsd, scr); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* If requested card supports wide bus operation */ + if((scr[1U] & SDMMC_WIDE_BUS_SUPPORT) != SDMMC_ALLZERO) + { + /* Send CMD55 APP_CMD with argument as card's RCA.*/ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, (uint32_t)(hsd->SdCard.RelCardAdd << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Send ACMD6 APP_CMD with argument as 2 for wide bus mode */ + errorstate = SDMMC_CmdBusWidth(hsd->Instance, 2U); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + return HAL_SD_ERROR_NONE; + } + else + { + return HAL_SD_ERROR_REQUEST_NOT_APPLICABLE; + } +} + +/** + * @brief Disables the SDMMC wide bus mode. + * @param hsd: Pointer to SD handle + * @retval error state + */ +static uint32_t SD_WideBus_Disable(SD_HandleTypeDef *hsd) +{ + uint32_t scr[2U] = {0U, 0U}; + uint32_t errorstate; + + if((SDMMC_GetResponse(hsd->Instance, SDMMC_RESP1) & SDMMC_CARD_LOCKED) == SDMMC_CARD_LOCKED) + { + return HAL_SD_ERROR_LOCK_UNLOCK_FAILED; + } + + /* Get SCR Register */ + errorstate = SD_FindSCR(hsd, scr); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* If requested card supports 1 bit mode operation */ + if((scr[1U] & SDMMC_SINGLE_BUS_SUPPORT) != SDMMC_ALLZERO) + { + /* Send CMD55 APP_CMD with argument as card's RCA */ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, (uint32_t)(hsd->SdCard.RelCardAdd << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Send ACMD6 APP_CMD with argument as 0 for single bus mode */ + errorstate = SDMMC_CmdBusWidth(hsd->Instance, 0U); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + return HAL_SD_ERROR_NONE; + } + else + { + return HAL_SD_ERROR_REQUEST_NOT_APPLICABLE; + } +} + + +/** + * @brief Finds the SD card SCR register value. + * @param hsd: Pointer to SD handle + * @param pSCR: pointer to the buffer that will contain the SCR value + * @retval error state + */ +static uint32_t SD_FindSCR(SD_HandleTypeDef *hsd, uint32_t *pSCR) +{ + SDMMC_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t index = 0U; + uint32_t tempscr[2U] = {0U, 0U}; + uint32_t *scr = pSCR; + + /* Set Block Size To 8 Bytes */ + errorstate = SDMMC_CmdBlockLength(hsd->Instance, 8U); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + /* Send CMD55 APP_CMD with argument as card's RCA */ + errorstate = SDMMC_CmdAppCommand(hsd->Instance, (uint32_t)((hsd->SdCard.RelCardAdd) << 16U)); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = 8U; + config.DataBlockSize = SDMMC_DATABLOCK_SIZE_8B; + config.TransferDir = SDMMC_TRANSFER_DIR_TO_SDMMC; + config.TransferMode = SDMMC_TRANSFER_MODE_BLOCK; + config.DPSM = SDMMC_DPSM_ENABLE; + (void)SDMMC_ConfigData(hsd->Instance, &config); + + /* Send ACMD51 SD_APP_SEND_SCR with argument as 0 */ + errorstate = SDMMC_CmdSendSCR(hsd->Instance); + if(errorstate != HAL_SD_ERROR_NONE) + { + return errorstate; + } + + while(!__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR | SDMMC_FLAG_DCRCFAIL | SDMMC_FLAG_DTIMEOUT | SDMMC_FLAG_DBCKEND)) + { + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXDAVL)) + { + *(tempscr + index) = SDMMC_ReadFIFO(hsd->Instance); + index++; + } + + if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + { + return HAL_SD_ERROR_TIMEOUT; + } + } + + if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DTIMEOUT)) + { + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_FLAG_DTIMEOUT); + + return HAL_SD_ERROR_DATA_TIMEOUT; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_DCRCFAIL)) + { + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_FLAG_DCRCFAIL); + + return HAL_SD_ERROR_DATA_CRC_FAIL; + } + else if(__HAL_SD_GET_FLAG(hsd, SDMMC_FLAG_RXOVERR)) + { + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_FLAG_RXOVERR); + + return HAL_SD_ERROR_RX_OVERRUN; + } + else + { + /* No error flag set */ + /* Clear all the static flags */ + __HAL_SD_CLEAR_FLAG(hsd, SDMMC_STATIC_DATA_FLAGS); + + *scr = (((tempscr[1] & SDMMC_0TO7BITS) << 24) | ((tempscr[1] & SDMMC_8TO15BITS) << 8) |\ + ((tempscr[1] & SDMMC_16TO23BITS) >> 8) | ((tempscr[1] & SDMMC_24TO31BITS) >> 24)); + scr++; + *scr = (((tempscr[0] & SDMMC_0TO7BITS) << 24) | ((tempscr[0] & SDMMC_8TO15BITS) << 8) |\ + ((tempscr[0] & SDMMC_16TO23BITS) >> 8) | ((tempscr[0] & SDMMC_24TO31BITS) >> 24)); + + } + + return HAL_SD_ERROR_NONE; +} + +/** + * @brief Wrap up reading in non-blocking mode. + * @param hsd: pointer to a SD_HandleTypeDef structure that contains + * the configuration information. + * @retval None + */ +static void SD_Read_IT(SD_HandleTypeDef *hsd) +{ + uint32_t count, data, dataremaining; + uint8_t* tmp; + + tmp = hsd->pRxBuffPtr; + dataremaining = hsd->RxXferSize; + + if (dataremaining > 0U) + { + /* Read data from SDMMC Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = SDMMC_ReadFIFO(hsd->Instance); + *tmp = (uint8_t)(data & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 8U) & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 16U) & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 24U) & 0xFFU); + tmp++; + dataremaining--; + } + + hsd->pRxBuffPtr = tmp; + hsd->RxXferSize = dataremaining; + } +} + +/** + * @brief Wrap up writing in non-blocking mode. + * @param hsd: pointer to a SD_HandleTypeDef structure that contains + * the configuration information. + * @retval None + */ +static void SD_Write_IT(SD_HandleTypeDef *hsd) +{ + uint32_t count, data, dataremaining; + uint8_t* tmp; + + tmp = hsd->pTxBuffPtr; + dataremaining = hsd->TxXferSize; + + if (dataremaining > 0U) + { + /* Write data to SDMMC Tx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = (uint32_t)(*tmp); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 8U); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 16U); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 24U); + tmp++; + dataremaining--; + (void)SDMMC_WriteFIFO(hsd->Instance, &data); + } + + hsd->pTxBuffPtr = tmp; + hsd->TxXferSize = dataremaining; + } +} + +/** + * @} + */ + +#endif /* HAL_SD_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* SDMMC1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c new file mode 100644 index 000000000..d2a8408f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_sdram.c @@ -0,0 +1,1105 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_sdram.c + * @author MCD Application Team + * @brief SDRAM HAL module driver. + * This file provides a generic firmware to drive SDRAM memories mounted + * as external device. + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + This driver is a generic layered driver which contains a set of APIs used to + control SDRAM memories. It uses the FMC layer functions to interface + with SDRAM devices. + The following sequence should be followed to configure the FMC to interface + with SDRAM memories: + + (#) Declare a SDRAM_HandleTypeDef handle structure, for example: + SDRAM_HandleTypeDef hdsram + + (++) Fill the SDRAM_HandleTypeDef handle "Init" field with the allowed + values of the structure member. + + (++) Fill the SDRAM_HandleTypeDef handle "Instance" field with a predefined + base register instance for NOR or SDRAM device + + (#) Declare a FMC_SDRAM_TimingTypeDef structure; for example: + FMC_SDRAM_TimingTypeDef Timing; + and fill its fields with the allowed values of the structure member. + + (#) Initialize the SDRAM Controller by calling the function HAL_SDRAM_Init(). This function + performs the following sequence: + + (##) MSP hardware layer configuration using the function HAL_SDRAM_MspInit() + (##) Control register configuration using the FMC SDRAM interface function + FMC_SDRAM_Init() + (##) Timing register configuration using the FMC SDRAM interface function + FMC_SDRAM_Timing_Init() + (##) Program the SDRAM external device by applying its initialization sequence + according to the device plugged in your hardware. This step is mandatory + for accessing the SDRAM device. + + (#) At this stage you can perform read/write accesses from/to the memory connected + to the SDRAM Bank. You can perform either polling or DMA transfer using the + following APIs: + (++) HAL_SDRAM_Read()/HAL_SDRAM_Write() for polling read/write access + (++) HAL_SDRAM_Read_DMA()/HAL_SDRAM_Write_DMA() for DMA read/write transfer + + (#) You can also control the SDRAM device by calling the control APIs HAL_SDRAM_WriteOperation_Enable()/ + HAL_SDRAM_WriteOperation_Disable() to respectively enable/disable the SDRAM write operation or + the function HAL_SDRAM_SendCommand() to send a specified command to the SDRAM + device. The command to be sent must be configured with the FMC_SDRAM_CommandTypeDef + structure. + + (#) You can continuously monitor the SDRAM device HAL state by calling the function + HAL_SDRAM_GetState() + + *** Callback registration *** + ============================================= + [..] + The compilation define USE_HAL_SDRAM_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + Use Functions @ref HAL_SDRAM_RegisterCallback() to register a user callback, + it allows to register following callbacks: + (+) MspInitCallback : SDRAM MspInit. + (+) MspDeInitCallback : SDRAM MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_SDRAM_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. It allows to reset following callbacks: + (+) MspInitCallback : SDRAM MspInit. + (+) MspDeInitCallback : SDRAM MspDeInit. + This function) takes as parameters the HAL peripheral handle and the Callback ID. + + By default, after the @ref HAL_SDRAM_Init and if the state is HAL_SDRAM_STATE_RESET + all callbacks are reset to the corresponding legacy weak (surcharged) functions. + Exception done for MspInit and MspDeInit callbacks that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_SDRAM_Init + and @ref HAL_SDRAM_DeInit only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the @ref HAL_SDRAM_Init and @ref HAL_SDRAM_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in READY state only. + Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered + in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used + during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_SDRAM_RegisterCallback before calling @ref HAL_SDRAM_DeInit + or @ref HAL_SDRAM_Init function. + + When The compilation define USE_HAL_SDRAM_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup SDRAM SDRAM + * @brief SDRAM driver modules + * @{ + */ +#ifdef HAL_SDRAM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SDRAM_Exported_Functions SDRAM Exported Functions + * @{ + */ + +/** @defgroup SDRAM_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + ============================================================================== + ##### SDRAM Initialization and de_initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to initialize/de-initialize + the SDRAM memory + +@endverbatim + * @{ + */ + +/** + * @brief Performs the SDRAM device initialization sequence. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param Timing Pointer to SDRAM control timing structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Init(SDRAM_HandleTypeDef *hsdram, FMC_SDRAM_TimingTypeDef *Timing) +{ + /* Check the SDRAM handle parameter */ + if(hsdram == NULL) + { + return HAL_ERROR; + } + + if(hsdram->State == HAL_SDRAM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hsdram->Lock = HAL_UNLOCKED; +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) + if(hsdram->MspInitCallback == NULL) + { + hsdram->MspInitCallback = HAL_SDRAM_MspInit; + } + hsdram->RefreshErrorCallback = HAL_SDRAM_RefreshErrorCallback; + hsdram->DmaXferCpltCallback = HAL_SDRAM_DMA_XferCpltCallback; + hsdram->DmaXferErrorCallback = HAL_SDRAM_DMA_XferErrorCallback; + + /* Init the low level hardware */ + hsdram->MspInitCallback(hsdram); +#else + /* Initialize the low level hardware (MSP) */ + HAL_SDRAM_MspInit(hsdram); +#endif + } + + /* Initialize the SDRAM controller state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Initialize SDRAM control Interface */ + FMC_SDRAM_Init(hsdram->Instance, &(hsdram->Init)); + + /* Initialize SDRAM timing Interface */ + FMC_SDRAM_Timing_Init(hsdram->Instance, Timing, hsdram->Init.SDBank); + + /* Update the SDRAM controller state */ + hsdram->State = HAL_SDRAM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Perform the SDRAM device initialization sequence. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_DeInit(SDRAM_HandleTypeDef *hsdram) +{ +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) + if(hsdram->MspDeInitCallback == NULL) + { + hsdram->MspDeInitCallback = HAL_SDRAM_MspDeInit; + } + + /* DeInit the low level hardware */ + hsdram->MspDeInitCallback(hsdram); +#else + /* Initialize the low level hardware (MSP) */ + HAL_SDRAM_MspDeInit(hsdram); +#endif + + /* Configure the SDRAM registers with their reset values */ + FMC_SDRAM_DeInit(hsdram->Instance, hsdram->Init.SDBank); + + /* Reset the SDRAM controller state */ + hsdram->State = HAL_SDRAM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief SDRAM MSP Init. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval None + */ +__weak void HAL_SDRAM_MspInit(SDRAM_HandleTypeDef *hsdram) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsdram); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_SDRAM_MspInit could be implemented in the user file + */ +} + +/** + * @brief SDRAM MSP DeInit. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval None + */ +__weak void HAL_SDRAM_MspDeInit(SDRAM_HandleTypeDef *hsdram) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsdram); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_SDRAM_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief This function handles SDRAM refresh error interrupt request. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval HAL status +*/ +void HAL_SDRAM_IRQHandler(SDRAM_HandleTypeDef *hsdram) +{ + /* Check SDRAM interrupt Rising edge flag */ + if(__FMC_SDRAM_GET_FLAG(hsdram->Instance, FMC_SDRAM_FLAG_REFRESH_IT)) + { + /* SDRAM refresh error interrupt callback */ +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) + hsdram->RefreshErrorCallback(hsdram); +#else + HAL_SDRAM_RefreshErrorCallback(hsdram); +#endif + + /* Clear SDRAM refresh error interrupt pending bit */ + __FMC_SDRAM_CLEAR_FLAG(hsdram->Instance, FMC_SDRAM_FLAG_REFRESH_ERROR); + } +} + +/** + * @brief SDRAM Refresh error callback. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval None + */ +__weak void HAL_SDRAM_RefreshErrorCallback(SDRAM_HandleTypeDef *hsdram) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hsdram); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_SDRAM_RefreshErrorCallback could be implemented in the user file + */ +} + +/** + * @brief DMA transfer complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +__weak void HAL_SDRAM_DMA_XferCpltCallback(DMA_HandleTypeDef *hdma) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_SDRAM_DMA_XferCpltCallback could be implemented in the user file + */ +} + +/** + * @brief DMA transfer complete error callback. + * @param hdma DMA handle + * @retval None + */ +__weak void HAL_SDRAM_DMA_XferErrorCallback(DMA_HandleTypeDef *hdma) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hdma); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_SDRAM_DMA_XferErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup SDRAM_Exported_Functions_Group2 Input and Output functions + * @brief Input Output and memory control functions + * + @verbatim + ============================================================================== + ##### SDRAM Input and Output functions ##### + ============================================================================== + [..] + This section provides functions allowing to use and control the SDRAM memory + +@endverbatim + * @{ + */ + +/** + * @brief Reads 8-bit data buffer from the SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to read start address + * @param pDstBuffer Pointer to destination buffer + * @param BufferSize Size of the buffer to read from memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Read_8b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint8_t *pDstBuffer, uint32_t BufferSize) +{ + __IO uint8_t *pSdramAddress = (uint8_t *)pAddress; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if(hsdram->State == HAL_SDRAM_STATE_PRECHARGED) + { + return HAL_ERROR; + } + + /* Read data from source */ + for(; BufferSize != 0; BufferSize--) + { + *pDstBuffer = *(__IO uint8_t *)pSdramAddress; + pDstBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + + +/** + * @brief Writes 8-bit data buffer to SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to write start address + * @param pSrcBuffer Pointer to source buffer to write + * @param BufferSize Size of the buffer to write to memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Write_8b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint8_t *pSrcBuffer, uint32_t BufferSize) +{ + __IO uint8_t *pSdramAddress = (uint8_t *)pAddress; + uint32_t tmp = 0; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + tmp = hsdram->State; + + if(tmp == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if((tmp == HAL_SDRAM_STATE_PRECHARGED) || (tmp == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + return HAL_ERROR; + } + + /* Write data to memory */ + for(; BufferSize != 0; BufferSize--) + { + *(__IO uint8_t *)pSdramAddress = *pSrcBuffer; + pSrcBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + + +/** + * @brief Reads 16-bit data buffer from the SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to read start address + * @param pDstBuffer Pointer to destination buffer + * @param BufferSize Size of the buffer to read from memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Read_16b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint16_t *pDstBuffer, uint32_t BufferSize) +{ + __IO uint16_t *pSdramAddress = (uint16_t *)pAddress; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if(hsdram->State == HAL_SDRAM_STATE_PRECHARGED) + { + return HAL_ERROR; + } + + /* Read data from source */ + for(; BufferSize != 0; BufferSize--) + { + *pDstBuffer = *(__IO uint16_t *)pSdramAddress; + pDstBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief Writes 16-bit data buffer to SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to write start address + * @param pSrcBuffer Pointer to source buffer to write + * @param BufferSize Size of the buffer to write to memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Write_16b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint16_t *pSrcBuffer, uint32_t BufferSize) +{ + __IO uint16_t *pSdramAddress = (uint16_t *)pAddress; + uint32_t tmp = 0; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + tmp = hsdram->State; + + if(tmp == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if((tmp == HAL_SDRAM_STATE_PRECHARGED) || (tmp == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + return HAL_ERROR; + } + + /* Write data to memory */ + for(; BufferSize != 0; BufferSize--) + { + *(__IO uint16_t *)pSdramAddress = *pSrcBuffer; + pSrcBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief Reads 32-bit data buffer from the SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to read start address + * @param pDstBuffer Pointer to destination buffer + * @param BufferSize Size of the buffer to read from memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Read_32b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pDstBuffer, uint32_t BufferSize) +{ + __IO uint32_t *pSdramAddress = (uint32_t *)pAddress; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if(hsdram->State == HAL_SDRAM_STATE_PRECHARGED) + { + return HAL_ERROR; + } + + /* Read data from source */ + for(; BufferSize != 0; BufferSize--) + { + *pDstBuffer = *(__IO uint32_t *)pSdramAddress; + pDstBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief Writes 32-bit data buffer to SDRAM memory. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to write start address + * @param pSrcBuffer Pointer to source buffer to write + * @param BufferSize Size of the buffer to write to memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Write_32b(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize) +{ + __IO uint32_t *pSdramAddress = (uint32_t *)pAddress; + uint32_t tmp = 0; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + tmp = hsdram->State; + + if(tmp == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if((tmp == HAL_SDRAM_STATE_PRECHARGED) || (tmp == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + return HAL_ERROR; + } + + /* Write data to memory */ + for(; BufferSize != 0; BufferSize--) + { + *(__IO uint32_t *)pSdramAddress = *pSrcBuffer; + pSrcBuffer++; + pSdramAddress++; + } + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief Reads a Words data from the SDRAM memory using DMA transfer. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to read start address + * @param pDstBuffer Pointer to destination buffer + * @param BufferSize Size of the buffer to read from memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Read_DMA(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pDstBuffer, uint32_t BufferSize) +{ + uint32_t tmp = 0; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + tmp = hsdram->State; + + if(tmp == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if(tmp == HAL_SDRAM_STATE_PRECHARGED) + { + return HAL_ERROR; + } + + /* Configure DMA user callbacks */ + hsdram->hdma->XferCpltCallback = HAL_SDRAM_DMA_XferCpltCallback; + hsdram->hdma->XferErrorCallback = HAL_SDRAM_DMA_XferErrorCallback; + + /* Enable the DMA Stream */ + HAL_DMA_Start_IT(hsdram->hdma, (uint32_t)pAddress, (uint32_t)pDstBuffer, (uint32_t)BufferSize); + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} + +/** + * @brief Writes a Words data buffer to SDRAM memory using DMA transfer. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param pAddress Pointer to write start address + * @param pSrcBuffer Pointer to source buffer to write + * @param BufferSize Size of the buffer to write to memory + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_Write_DMA(SDRAM_HandleTypeDef *hsdram, uint32_t *pAddress, uint32_t *pSrcBuffer, uint32_t BufferSize) +{ + uint32_t tmp = 0; + + /* Process Locked */ + __HAL_LOCK(hsdram); + + /* Check the SDRAM controller state */ + tmp = hsdram->State; + + if(tmp == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + else if((tmp == HAL_SDRAM_STATE_PRECHARGED) || (tmp == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + return HAL_ERROR; + } + + /* Configure DMA user callbacks */ + hsdram->hdma->XferCpltCallback = HAL_SDRAM_DMA_XferCpltCallback; + hsdram->hdma->XferErrorCallback = HAL_SDRAM_DMA_XferErrorCallback; + + /* Enable the DMA Stream */ + HAL_DMA_Start_IT(hsdram->hdma, (uint32_t)pSrcBuffer, (uint32_t)pAddress, (uint32_t)BufferSize); + + /* Process Unlocked */ + __HAL_UNLOCK(hsdram); + + return HAL_OK; +} +#if (USE_HAL_SDRAM_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User SDRAM Callback + * To be used instead of the weak (surcharged) predefined callback + * @param hsdram : SDRAM handle + * @param CallbackId : ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_SDRAM_MSP_INIT_CB_ID SDRAM MspInit callback ID + * @arg @ref HAL_SDRAM_MSP_DEINIT_CB_ID SDRAM MspDeInit callback ID + * @arg @ref HAL_SDRAM_REFRESH_ERR_CB_ID SDRAM Refresh Error callback ID + * @param pCallback : pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_SDRAM_RegisterCallback (SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId, pSDRAM_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_SDRAM_StateTypeDef state; + + if(pCallback == NULL) + { + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hsdram); + + state = hsdram->State; + if((state == HAL_SDRAM_STATE_READY) || (state == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + switch (CallbackId) + { + case HAL_SDRAM_MSP_INIT_CB_ID : + hsdram->MspInitCallback = pCallback; + break; + case HAL_SDRAM_MSP_DEINIT_CB_ID : + hsdram->MspDeInitCallback = pCallback; + break; + case HAL_SDRAM_REFRESH_ERR_CB_ID : + hsdram->RefreshErrorCallback = pCallback; + break; + default : + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if(hsdram->State == HAL_SDRAM_STATE_RESET) + { + switch (CallbackId) + { + case HAL_SDRAM_MSP_INIT_CB_ID : + hsdram->MspInitCallback = pCallback; + break; + case HAL_SDRAM_MSP_DEINIT_CB_ID : + hsdram->MspDeInitCallback = pCallback; + break; + default : + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hsdram); + return status; +} +/** + * @brief Unregister a User SDRAM Callback + * SDRAM Callback is redirected to the weak (surcharged) predefined callback + * @param hsdram : SDRAM handle + * @param CallbackId : ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_SDRAM_MSP_INIT_CB_ID SDRAM MspInit callback ID + * @arg @ref HAL_SDRAM_MSP_DEINIT_CB_ID SDRAM MspDeInit callback ID + * @arg @ref HAL_SDRAM_REFRESH_ERR_CB_ID SDRAM Refresh Error callback ID + * @arg @ref HAL_SDRAM_DMA_XFER_CPLT_CB_ID SDRAM DMA Xfer Complete callback ID + * @arg @ref HAL_SDRAM_DMA_XFER_ERR_CB_ID SDRAM DMA Xfer Error callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_SDRAM_UnRegisterCallback (SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_SDRAM_StateTypeDef state; + + /* Process locked */ + __HAL_LOCK(hsdram); + + state = hsdram->State; + if((state == HAL_SDRAM_STATE_READY) || (state == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + switch (CallbackId) + { + case HAL_SDRAM_MSP_INIT_CB_ID : + hsdram->MspInitCallback = HAL_SDRAM_MspInit; + break; + case HAL_SDRAM_MSP_DEINIT_CB_ID : + hsdram->MspDeInitCallback = HAL_SDRAM_MspDeInit; + break; + case HAL_SDRAM_REFRESH_ERR_CB_ID : + hsdram->RefreshErrorCallback = HAL_SDRAM_RefreshErrorCallback; + break; + case HAL_SDRAM_DMA_XFER_CPLT_CB_ID : + hsdram->DmaXferCpltCallback = HAL_SDRAM_DMA_XferCpltCallback; + break; + case HAL_SDRAM_DMA_XFER_ERR_CB_ID : + hsdram->DmaXferErrorCallback = HAL_SDRAM_DMA_XferErrorCallback; + break; + default : + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if(hsdram->State == HAL_SDRAM_STATE_RESET) + { + switch (CallbackId) + { + case HAL_SDRAM_MSP_INIT_CB_ID : + hsdram->MspInitCallback = HAL_SDRAM_MspInit; + break; + case HAL_SDRAM_MSP_DEINIT_CB_ID : + hsdram->MspDeInitCallback = HAL_SDRAM_MspDeInit; + break; + default : + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hsdram); + return status; +} + +/** + * @brief Register a User SDRAM Callback for DMA transfers + * To be used instead of the weak (surcharged) predefined callback + * @param hsdram : SDRAM handle + * @param CallbackId : ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_SDRAM_DMA_XFER_CPLT_CB_ID SDRAM DMA Xfer Complete callback ID + * @arg @ref HAL_SDRAM_DMA_XFER_ERR_CB_ID SDRAM DMA Xfer Error callback ID + * @param pCallback : pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_SDRAM_RegisterDmaCallback(SDRAM_HandleTypeDef *hsdram, HAL_SDRAM_CallbackIDTypeDef CallbackId, pSDRAM_DmaCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_SDRAM_StateTypeDef state; + + if(pCallback == NULL) + { + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hsdram); + + state = hsdram->State; + if((state == HAL_SDRAM_STATE_READY) || (state == HAL_SDRAM_STATE_WRITE_PROTECTED)) + { + switch (CallbackId) + { + case HAL_SDRAM_DMA_XFER_CPLT_CB_ID : + hsdram->DmaXferCpltCallback = pCallback; + break; + case HAL_SDRAM_DMA_XFER_ERR_CB_ID : + hsdram->DmaXferErrorCallback = pCallback; + break; + default : + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hsdram); + return status; +} +#endif + +/** + * @} + */ + +/** @defgroup SDRAM_Exported_Functions_Group3 Control functions + * @brief management functions + * +@verbatim + ============================================================================== + ##### SDRAM Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control dynamically + the SDRAM interface. + +@endverbatim + * @{ + */ + +/** + * @brief Enables dynamically SDRAM write protection. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_WriteProtection_Enable(SDRAM_HandleTypeDef *hsdram) +{ + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Enable write protection */ + FMC_SDRAM_WriteProtection_Enable(hsdram->Instance, hsdram->Init.SDBank); + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_WRITE_PROTECTED; + + return HAL_OK; +} + +/** + * @brief Disables dynamically SDRAM write protection. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_WriteProtection_Disable(SDRAM_HandleTypeDef *hsdram) +{ + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Disable write protection */ + FMC_SDRAM_WriteProtection_Disable(hsdram->Instance, hsdram->Init.SDBank); + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Sends Command to the SDRAM bank. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param Command SDRAM command structure + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_SendCommand(SDRAM_HandleTypeDef *hsdram, FMC_SDRAM_CommandTypeDef *Command, uint32_t Timeout) +{ + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Send SDRAM command */ + FMC_SDRAM_SendCommand(hsdram->Instance, Command, Timeout); + + /* Update the SDRAM controller state state */ + if(Command->CommandMode == FMC_SDRAM_CMD_PALL) + { + hsdram->State = HAL_SDRAM_STATE_PRECHARGED; + } + else + { + hsdram->State = HAL_SDRAM_STATE_READY; + } + + return HAL_OK; +} + +/** + * @brief Programs the SDRAM Memory Refresh rate. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param RefreshRate The SDRAM refresh rate value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_ProgramRefreshRate(SDRAM_HandleTypeDef *hsdram, uint32_t RefreshRate) +{ + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Program the refresh rate */ + FMC_SDRAM_ProgramRefreshRate(hsdram->Instance ,RefreshRate); + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Sets the Number of consecutive SDRAM Memory auto Refresh commands. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @param AutoRefreshNumber The SDRAM auto Refresh number + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SDRAM_SetAutoRefreshNumber(SDRAM_HandleTypeDef *hsdram, uint32_t AutoRefreshNumber) +{ + /* Check the SDRAM controller state */ + if(hsdram->State == HAL_SDRAM_STATE_BUSY) + { + return HAL_BUSY; + } + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_BUSY; + + /* Set the Auto-Refresh number */ + FMC_SDRAM_SetAutoRefreshNumber(hsdram->Instance ,AutoRefreshNumber); + + /* Update the SDRAM state */ + hsdram->State = HAL_SDRAM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Returns the SDRAM memory current mode. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval The SDRAM memory mode. + */ +uint32_t HAL_SDRAM_GetModeStatus(SDRAM_HandleTypeDef *hsdram) +{ + /* Return the SDRAM memory current mode */ + return(FMC_SDRAM_GetModeStatus(hsdram->Instance, hsdram->Init.SDBank)); +} + +/** + * @} + */ + +/** @defgroup SDRAM_Exported_Functions_Group4 State functions + * @brief Peripheral State functions + * +@verbatim + ============================================================================== + ##### SDRAM State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the SDRAM controller + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Returns the SDRAM state. + * @param hsdram pointer to a SDRAM_HandleTypeDef structure that contains + * the configuration information for SDRAM module. + * @retval HAL state + */ +HAL_SDRAM_StateTypeDef HAL_SDRAM_GetState(SDRAM_HandleTypeDef *hsdram) +{ + return hsdram->State; +} + +/** + * @} + */ + +/** + * @} + */ +#endif /* HAL_SDRAM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c new file mode 100644 index 000000000..2e14cea07 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi.c @@ -0,0 +1,4273 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_spi.c + * @author MCD Application Team + * @brief SPI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Serial Peripheral Interface (SPI) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The SPI HAL driver can be used as follows: + + (#) Declare a SPI_HandleTypeDef handle structure, for example: + SPI_HandleTypeDef hspi; + + (#)Initialize the SPI low level resources by implementing the HAL_SPI_MspInit() API: + (##) Enable the SPIx interface clock + (##) SPI pins configuration + (+++) Enable the clock for the SPI GPIOs + (+++) Configure these SPI pins as alternate function push-pull + (##) NVIC configuration if you need to use interrupt process + (+++) Configure the SPIx interrupt priority + (+++) Enable the NVIC SPI IRQ handle + (##) DMA Configuration if you need to use DMA process + (+++) Declare a DMA_HandleTypeDef handle structure for the transmit or receive Stream/Channel + (+++) Enable the DMAx clock + (+++) Configure the DMA handle parameters + (+++) Configure the DMA Tx or Rx Stream/Channel + (+++) Associate the initialized hdma_tx(or _rx) handle to the hspi DMA Tx or Rx handle + (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx or Rx Stream/Channel + + (#) Program the Mode, BidirectionalMode , Data size, Baudrate Prescaler, NSS + management, Clock polarity and phase, FirstBit and CRC configuration in the hspi Init structure. + + (#) Initialize the SPI registers by calling the HAL_SPI_Init() API: + (++) This API configures also the low level Hardware GPIO, CLOCK, CORTEX...etc) + by calling the customized HAL_SPI_MspInit() API. + [..] + Circular mode restriction: + (#) The DMA circular mode cannot be used when the SPI is configured in these modes: + (##) Master 2Lines RxOnly + (##) Master 1Line Rx + (#) The CRC feature is not managed when the DMA circular mode is enabled + (#) When the SPI DMA Pause/Stop features are used, we must use the following APIs + the HAL_SPI_DMAPause()/ HAL_SPI_DMAStop() only under the SPI callbacks + [..] + Master Receive mode restriction: + (#) In Master unidirectional receive-only mode (MSTR =1, BIDIMODE=0, RXONLY=1) or + bidirectional receive mode (MSTR=1, BIDIMODE=1, BIDIOE=0), to ensure that the SPI + does not initiate a new transfer the following procedure has to be respected: + (##) HAL_SPI_DeInit() + (##) HAL_SPI_Init() + [..] + Callback registration: + + (#) The compilation flag USE_HAL_SPI_REGISTER_CALLBACKS when set to 1U + allows the user to configure dynamically the driver callbacks. + Use Functions HAL_SPI_RegisterCallback() to register an interrupt callback. + + Function HAL_SPI_RegisterCallback() allows to register following callbacks: + (+) TxCpltCallback : SPI Tx Completed callback + (+) RxCpltCallback : SPI Rx Completed callback + (+) TxRxCpltCallback : SPI TxRx Completed callback + (+) TxHalfCpltCallback : SPI Tx Half Completed callback + (+) RxHalfCpltCallback : SPI Rx Half Completed callback + (+) TxRxHalfCpltCallback : SPI TxRx Half Completed callback + (+) ErrorCallback : SPI Error callback + (+) AbortCpltCallback : SPI Abort callback + (+) MspInitCallback : SPI Msp Init callback + (+) MspDeInitCallback : SPI Msp DeInit callback + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + + (#) Use function HAL_SPI_UnRegisterCallback to reset a callback to the default + weak function. + HAL_SPI_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxCpltCallback : SPI Tx Completed callback + (+) RxCpltCallback : SPI Rx Completed callback + (+) TxRxCpltCallback : SPI TxRx Completed callback + (+) TxHalfCpltCallback : SPI Tx Half Completed callback + (+) RxHalfCpltCallback : SPI Rx Half Completed callback + (+) TxRxHalfCpltCallback : SPI TxRx Half Completed callback + (+) ErrorCallback : SPI Error callback + (+) AbortCpltCallback : SPI Abort callback + (+) MspInitCallback : SPI Msp Init callback + (+) MspDeInitCallback : SPI Msp DeInit callback + + By default, after the HAL_SPI_Init() and when the state is HAL_SPI_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples HAL_SPI_MasterTxCpltCallback(), HAL_SPI_MasterRxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak functions in the HAL_SPI_Init()/ HAL_SPI_DeInit() only when + these callbacks are null (not registered beforehand). + If MspInit or MspDeInit are not null, the HAL_SPI_Init()/ HAL_SPI_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. + + Callbacks can be registered/unregistered in HAL_SPI_STATE_READY state only. + Exception done MspInit/MspDeInit functions that can be registered/unregistered + in HAL_SPI_STATE_READY or HAL_SPI_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + Then, the user first registers the MspInit/MspDeInit user callbacks + using HAL_SPI_RegisterCallback() before calling HAL_SPI_DeInit() + or HAL_SPI_Init() function. + + When The compilation define USE_HAL_PPP_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + [..] + Using the HAL it is not possible to reach all supported SPI frequency with the different SPI Modes, + the following table resume the max SPI frequency reached with data size 8bits/16bits, + according to frequency of the APBx Peripheral Clock (fPCLK) used by the SPI instance. + + @endverbatim + + Additional table : + + DataSize = SPI_DATASIZE_8BIT: + +----------------------------------------------------------------------------------------------+ + | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | + | Process | Tranfert mode |---------------------|----------------------|----------------------| + | | | Master | Slave | Master | Slave | Master | Slave | + |==============================================================================================| + | T | Polling | Fpclk/4 | Fpclk/8 | NA | NA | NA | NA | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | / | Interrupt | Fpclk/4 | Fpclk/16 | NA | NA | NA | NA | + | R |----------------|----------|----------|-----------|----------|-----------|----------| + | X | DMA | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/4 | Fpclk/8 | Fpclk/16 | Fpclk/8 | Fpclk/8 | Fpclk/8 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | R | Interrupt | Fpclk/8 | Fpclk/16 | Fpclk/8 | Fpclk/8 | Fpclk/8 | Fpclk/4 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/4 | Fpclk/2 | Fpclk/2 | Fpclk/16 | Fpclk/2 | Fpclk/16 | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/8 | Fpclk/2 | NA | NA | Fpclk/8 | Fpclk/8 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | T | Interrupt | Fpclk/2 | Fpclk/4 | NA | NA | Fpclk/16 | Fpclk/8 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/8 | Fpclk/16 | + +----------------------------------------------------------------------------------------------+ + + DataSize = SPI_DATASIZE_16BIT: + +----------------------------------------------------------------------------------------------+ + | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | + | Process | Tranfert mode |---------------------|----------------------|----------------------| + | | | Master | Slave | Master | Slave | Master | Slave | + |==============================================================================================| + | T | Polling | Fpclk/4 | Fpclk/8 | NA | NA | NA | NA | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | / | Interrupt | Fpclk/4 | Fpclk/16 | NA | NA | NA | NA | + | R |----------------|----------|----------|-----------|----------|-----------|----------| + | X | DMA | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/4 | Fpclk/8 | Fpclk/16 | Fpclk/8 | Fpclk/8 | Fpclk/8 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | R | Interrupt | Fpclk/8 | Fpclk/16 | Fpclk/8 | Fpclk/8 | Fpclk/8 | Fpclk/4 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/4 | Fpclk/2 | Fpclk/2 | Fpclk/16 | Fpclk/2 | Fpclk/16 | + |=========|================|==========|==========|===========|==========|===========|==========| + | | Polling | Fpclk/8 | Fpclk/2 | NA | NA | Fpclk/8 | Fpclk/8 | + | |----------------|----------|----------|-----------|----------|-----------|----------| + | T | Interrupt | Fpclk/2 | Fpclk/4 | NA | NA | Fpclk/16 | Fpclk/8 | + | X |----------------|----------|----------|-----------|----------|-----------|----------| + | | DMA | Fpclk/2 | Fpclk/2 | NA | NA | Fpclk/8 | Fpclk/16 | + +----------------------------------------------------------------------------------------------+ + @note The max SPI frequency depend on SPI data size (4bits, 5bits,..., 8bits,...15bits, 16bits), + SPI mode(2 Lines fullduplex, 2 lines RxOnly, 1 line TX/RX) and Process mode (Polling, IT, DMA). + @note + (#) TX/RX processes are HAL_SPI_TransmitReceive(), HAL_SPI_TransmitReceive_IT() and HAL_SPI_TransmitReceive_DMA() + (#) RX processes are HAL_SPI_Receive(), HAL_SPI_Receive_IT() and HAL_SPI_Receive_DMA() + (#) TX processes are HAL_SPI_Transmit(), HAL_SPI_Transmit_IT() and HAL_SPI_Transmit_DMA() + + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup SPI SPI + * @brief SPI HAL module driver + * @{ + */ +#ifdef HAL_SPI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup SPI_Private_Constants SPI Private Constants + * @{ + */ +#define SPI_DEFAULT_TIMEOUT 100U +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup SPI_Private_Functions SPI Private Functions + * @{ + */ +static void SPI_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfTransmitCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAHalfTransmitReceiveCplt(DMA_HandleTypeDef *hdma); +static void SPI_DMAError(DMA_HandleTypeDef *hdma); +static void SPI_DMAAbortOnError(DMA_HandleTypeDef *hdma); +static void SPI_DMATxAbortCallback(DMA_HandleTypeDef *hdma); +static void SPI_DMARxAbortCallback(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Flag, FlagStatus State, + uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef SPI_WaitFifoStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Fifo, uint32_t State, + uint32_t Timeout, uint32_t Tickstart); +static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesTxISR_8BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_16BIT(struct __SPI_HandleTypeDef *hspi); +#if (USE_SPI_CRC != 0U) +static void SPI_RxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_RxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi); +static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi); +#endif /* USE_SPI_CRC */ +static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_AbortTx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseRxTx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseRx_ISR(SPI_HandleTypeDef *hspi); +static void SPI_CloseTx_ISR(SPI_HandleTypeDef *hspi); +static HAL_StatusTypeDef SPI_EndRxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef SPI_EndRxTxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SPI_Exported_Functions SPI Exported Functions + * @{ + */ + +/** @defgroup SPI_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + de-initialize the SPIx peripheral: + + (+) User must implement HAL_SPI_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO, DMA, IT and NVIC ). + + (+) Call the function HAL_SPI_Init() to configure the selected device with + the selected configuration: + (++) Mode + (++) Direction + (++) Data Size + (++) Clock Polarity and Phase + (++) NSS Management + (++) BaudRate Prescaler + (++) FirstBit + (++) TIMode + (++) CRC Calculation + (++) CRC Polynomial if CRC enabled + (++) CRC Length, used only with Data8 and Data16 + (++) FIFO reception threshold + + (+) Call the function HAL_SPI_DeInit() to restore the default configuration + of the selected SPIx peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the SPI according to the specified parameters + * in the SPI_InitTypeDef and initialize the associated handle. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi) +{ + uint32_t frxth; + + /* Check the SPI handle allocation */ + if (hspi == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SPI_ALL_INSTANCE(hspi->Instance)); + assert_param(IS_SPI_MODE(hspi->Init.Mode)); + assert_param(IS_SPI_DIRECTION(hspi->Init.Direction)); + assert_param(IS_SPI_DATASIZE(hspi->Init.DataSize)); + assert_param(IS_SPI_NSS(hspi->Init.NSS)); + assert_param(IS_SPI_NSSP(hspi->Init.NSSPMode)); + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + assert_param(IS_SPI_FIRST_BIT(hspi->Init.FirstBit)); + assert_param(IS_SPI_TIMODE(hspi->Init.TIMode)); + if (hspi->Init.TIMode == SPI_TIMODE_DISABLE) + { + assert_param(IS_SPI_CPOL(hspi->Init.CLKPolarity)); + assert_param(IS_SPI_CPHA(hspi->Init.CLKPhase)); + } +#if (USE_SPI_CRC != 0U) + assert_param(IS_SPI_CRC_CALCULATION(hspi->Init.CRCCalculation)); + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + assert_param(IS_SPI_CRC_POLYNOMIAL(hspi->Init.CRCPolynomial)); + assert_param(IS_SPI_CRC_LENGTH(hspi->Init.CRCLength)); + } +#else + hspi->Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; +#endif /* USE_SPI_CRC */ + + if (hspi->State == HAL_SPI_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hspi->Lock = HAL_UNLOCKED; + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + /* Init the SPI Callback settings */ + hspi->TxCpltCallback = HAL_SPI_TxCpltCallback; /* Legacy weak TxCpltCallback */ + hspi->RxCpltCallback = HAL_SPI_RxCpltCallback; /* Legacy weak RxCpltCallback */ + hspi->TxRxCpltCallback = HAL_SPI_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ + hspi->TxHalfCpltCallback = HAL_SPI_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + hspi->RxHalfCpltCallback = HAL_SPI_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + hspi->TxRxHalfCpltCallback = HAL_SPI_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ + hspi->ErrorCallback = HAL_SPI_ErrorCallback; /* Legacy weak ErrorCallback */ + hspi->AbortCpltCallback = HAL_SPI_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + + if (hspi->MspInitCallback == NULL) + { + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + hspi->MspInitCallback(hspi); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + HAL_SPI_MspInit(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + + hspi->State = HAL_SPI_STATE_BUSY; + + /* Disable the selected SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Align by default the rs fifo threshold on the data size */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + frxth = SPI_RXFIFO_THRESHOLD_HF; + } + else + { + frxth = SPI_RXFIFO_THRESHOLD_QF; + } + + /* CRC calculation is valid only for 16Bit and 8 Bit */ + if ((hspi->Init.DataSize != SPI_DATASIZE_16BIT) && (hspi->Init.DataSize != SPI_DATASIZE_8BIT)) + { + /* CRC must be disabled */ + hspi->Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE; + } + + /* Align the CRC Length on the data size */ + if (hspi->Init.CRCLength == SPI_CRC_LENGTH_DATASIZE) + { + /* CRC Length aligned on the data size : value set by default */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->Init.CRCLength = SPI_CRC_LENGTH_16BIT; + } + else + { + hspi->Init.CRCLength = SPI_CRC_LENGTH_8BIT; + } + } + + /*----------------------- SPIx CR1 & CR2 Configuration ---------------------*/ + /* Configure : SPI Mode, Communication Mode, Clock polarity and phase, NSS management, + Communication speed, First bit and CRC calculation state */ + WRITE_REG(hspi->Instance->CR1, (hspi->Init.Mode | hspi->Init.Direction | + hspi->Init.CLKPolarity | hspi->Init.CLKPhase | (hspi->Init.NSS & SPI_CR1_SSM) | + hspi->Init.BaudRatePrescaler | hspi->Init.FirstBit | hspi->Init.CRCCalculation)); +#if (USE_SPI_CRC != 0U) + /* Configure : CRC Length */ + if (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT) + { + hspi->Instance->CR1 |= SPI_CR1_CRCL; + } +#endif /* USE_SPI_CRC */ + + /* Configure : NSS management, TI Mode, NSS Pulse, Data size and Rx Fifo threshold */ + WRITE_REG(hspi->Instance->CR2, (((hspi->Init.NSS >> 16U) & SPI_CR2_SSOE) | hspi->Init.TIMode | + hspi->Init.NSSPMode | hspi->Init.DataSize) | frxth); + +#if (USE_SPI_CRC != 0U) + /*---------------------------- SPIx CRCPOLY Configuration ------------------*/ + /* Configure : CRC Polynomial */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + WRITE_REG(hspi->Instance->CRCPR, hspi->Init.CRCPolynomial); + } +#endif /* USE_SPI_CRC */ + +#if defined(SPI_I2SCFGR_I2SMOD) + /* Activate the SPI mode (Make sure that I2SMOD bit in I2SCFGR register is reset) */ + CLEAR_BIT(hspi->Instance->I2SCFGR, SPI_I2SCFGR_I2SMOD); +#endif /* SPI_I2SCFGR_I2SMOD */ + + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->State = HAL_SPI_STATE_READY; + + return HAL_OK; +} + +/** + * @brief De-Initialize the SPI peripheral. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DeInit(SPI_HandleTypeDef *hspi) +{ + /* Check the SPI handle allocation */ + if (hspi == NULL) + { + return HAL_ERROR; + } + + /* Check SPI Instance parameter */ + assert_param(IS_SPI_ALL_INSTANCE(hspi->Instance)); + + hspi->State = HAL_SPI_STATE_BUSY; + + /* Disable the SPI Peripheral Clock */ + __HAL_SPI_DISABLE(hspi); + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + if (hspi->MspDeInitCallback == NULL) + { + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware: GPIO, CLOCK, NVIC... */ + hspi->MspDeInitCallback(hspi); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC... */ + HAL_SPI_MspDeInit(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->State = HAL_SPI_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Initialize the SPI MSP. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_MspInit(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_MspInit should be implemented in the user file + */ +} + +/** + * @brief De-Initialize the SPI MSP. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_MspDeInit(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_MspDeInit should be implemented in the user file + */ +} + +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User SPI Callback + * To be used instead of the weak predefined callback + * @param hspi Pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI. + * @param CallbackID ID of the callback to be registered + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, pSPI_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hspi->ErrorCode |= HAL_SPI_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hspi); + + if (HAL_SPI_STATE_READY == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_TX_COMPLETE_CB_ID : + hspi->TxCpltCallback = pCallback; + break; + + case HAL_SPI_RX_COMPLETE_CB_ID : + hspi->RxCpltCallback = pCallback; + break; + + case HAL_SPI_TX_RX_COMPLETE_CB_ID : + hspi->TxRxCpltCallback = pCallback; + break; + + case HAL_SPI_TX_HALF_COMPLETE_CB_ID : + hspi->TxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_RX_HALF_COMPLETE_CB_ID : + hspi->RxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID : + hspi->TxRxHalfCpltCallback = pCallback; + break; + + case HAL_SPI_ERROR_CB_ID : + hspi->ErrorCallback = pCallback; + break; + + case HAL_SPI_ABORT_CB_ID : + hspi->AbortCpltCallback = pCallback; + break; + + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = pCallback; + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_SPI_STATE_RESET == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = pCallback; + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hspi); + return status; +} + +/** + * @brief Unregister an SPI Callback + * SPI callback is redirected to the weak predefined callback + * @param hspi Pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI. + * @param CallbackID ID of the callback to be unregistered + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hspi); + + if (HAL_SPI_STATE_READY == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_TX_COMPLETE_CB_ID : + hspi->TxCpltCallback = HAL_SPI_TxCpltCallback; /* Legacy weak TxCpltCallback */ + break; + + case HAL_SPI_RX_COMPLETE_CB_ID : + hspi->RxCpltCallback = HAL_SPI_RxCpltCallback; /* Legacy weak RxCpltCallback */ + break; + + case HAL_SPI_TX_RX_COMPLETE_CB_ID : + hspi->TxRxCpltCallback = HAL_SPI_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ + break; + + case HAL_SPI_TX_HALF_COMPLETE_CB_ID : + hspi->TxHalfCpltCallback = HAL_SPI_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + break; + + case HAL_SPI_RX_HALF_COMPLETE_CB_ID : + hspi->RxHalfCpltCallback = HAL_SPI_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + break; + + case HAL_SPI_TX_RX_HALF_COMPLETE_CB_ID : + hspi->TxRxHalfCpltCallback = HAL_SPI_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ + break; + + case HAL_SPI_ERROR_CB_ID : + hspi->ErrorCallback = HAL_SPI_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_SPI_ABORT_CB_ID : + hspi->AbortCpltCallback = HAL_SPI_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_SPI_STATE_RESET == hspi->State) + { + switch (CallbackID) + { + case HAL_SPI_MSPINIT_CB_ID : + hspi->MspInitCallback = HAL_SPI_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_SPI_MSPDEINIT_CB_ID : + hspi->MspDeInitCallback = HAL_SPI_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_INVALID_CALLBACK); + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hspi); + return status; +} +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup SPI_Exported_Functions_Group2 IO operation functions + * @brief Data transfers functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the SPI + data transfers. + + [..] The SPI supports master and slave mode : + + (#) There are two modes of transfer: + (++) Blocking mode: The communication is performed in polling mode. + The HAL status of all data processing is returned by the same function + after finishing transfer. + (++) No-Blocking mode: The communication is performed using Interrupts + or DMA, These APIs return the HAL status. + The end of the data processing will be indicated through the + dedicated SPI IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + The HAL_SPI_TxCpltCallback(), HAL_SPI_RxCpltCallback() and HAL_SPI_TxRxCpltCallback() user callbacks + will be executed respectively at the end of the transmit or Receive process + The HAL_SPI_ErrorCallback()user callback will be executed when a communication error is detected + + (#) APIs provided for these 2 transfer modes (Blocking mode or Non blocking mode using either Interrupt or DMA) + exist for 1Line (simplex) and 2Lines (full duplex) modes. + +@endverbatim + * @{ + */ + +/** + * @brief Transmit an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + HAL_StatusTypeDef errorcode = HAL_OK; + uint16_t initial_TxXferCount; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + initial_TxXferCount = Size; + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + hspi->TxISR = NULL; + hspi->RxISR = NULL; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Transmit data in 16 Bit mode */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + } + /* Transmit data in 16 Bit mode */ + while (hspi->TxXferCount > 0U) + { + /* Wait until TXE flag is set to send data */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + } + /* Transmit data in 8 Bit mode */ + else + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + if (hspi->TxXferCount > 1U) + { + /* write on the data register in packing mode */ + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount -= 2U; + } + else + { + *((__IO uint8_t *)&hspi->Instance->DR) = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr ++; + hspi->TxXferCount--; + } + } + while (hspi->TxXferCount > 0U) + { + /* Wait until TXE flag is set to send data */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) + { + if (hspi->TxXferCount > 1U) + { + /* write on the data register in packing mode */ + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount -= 2U; + } + else + { + *((__IO uint8_t *)&hspi->Instance->DR) = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + } + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + } +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + errorcode = HAL_ERROR; + } + +error: + hspi->State = HAL_SPI_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Receive an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be received + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + HAL_StatusTypeDef errorcode = HAL_OK; + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive(hspi, pData, pData, Size, Timeout); + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->pTxBuffPtr = (uint8_t *)NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + /* this is done to handle the CRCNEXT before the latest data */ + hspi->RxXferCount--; + } +#endif /* USE_SPI_CRC */ + + /* Set the Rx Fifo threshold */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + /* Set RX Fifo threshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + else + { + /* Set RX Fifo threshold according the reception data length: 8bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + + /* Configure communication direction: 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_RX(hspi); + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Receive data in 8 Bit mode */ + if (hspi->Init.DataSize <= SPI_DATASIZE_8BIT) + { + /* Transfer loop */ + while (hspi->RxXferCount > 0U) + { + /* Check the RXNE flag */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) + { + /* read the received data */ + (* (uint8_t *)hspi->pRxBuffPtr) = *(__IO uint8_t *)&hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint8_t); + hspi->RxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + } + else + { + /* Transfer loop */ + while (hspi->RxXferCount > 0U) + { + /* Check the RXNE flag */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + } + else + { + /* Timeout management */ + if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + } + +#if (USE_SPI_CRC != 0U) + /* Handle the CRC Transmission */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* freeze the CRC before the latest data */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + + /* Read the latest data */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* the latest data has not been received */ + errorcode = HAL_TIMEOUT; + goto error; + } + + /* Receive last data in 16 Bit mode */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + } + /* Receive last data in 8 Bit mode */ + else + { + (*(uint8_t *)hspi->pRxBuffPtr) = *(__IO uint8_t *)&hspi->Instance->DR; + } + + /* Wait the CRC data */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + errorcode = HAL_TIMEOUT; + goto error; + } + + /* Read CRC to Flush DR and RXNE flag */ + if (hspi->Init.DataSize == SPI_DATASIZE_16BIT) + { + /* Read 16bit CRC */ + READ_REG(hspi->Instance->DR); + } + else + { + /* Read 8bit CRC */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + + if ((hspi->Init.DataSize == SPI_DATASIZE_8BIT) && (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT)) + { + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + errorcode = HAL_TIMEOUT; + goto error; + } + /* Read 8bit CRC again in case of 16bit CRC in 8bit Data mode */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + } + } + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + errorcode = HAL_ERROR; + } + +error : + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Transmit and Receive an amount of data in blocking mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @param Size amount of data to be sent and received + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size, + uint32_t Timeout) +{ + uint16_t initial_TxXferCount; + uint16_t initial_RxXferCount; + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + uint32_t spi_cr1; + uint32_t spi_cr2; +#endif /* USE_SPI_CRC */ + + /* Variable used to alternate Rx and Tx during transfer */ + uint32_t txallowed = 1U; + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + initial_TxXferCount = Size; + initial_RxXferCount = Size; +#if (USE_SPI_CRC != 0U) + spi_cr1 = READ_REG(hspi->Instance->CR1); + spi_cr2 = READ_REG(hspi->Instance->CR2); +#endif /* USE_SPI_CRC */ + + if (!((tmp_state == HAL_SPI_STATE_READY) || \ + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferCount = Size; + hspi->RxXferSize = Size; + hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->TxXferCount = Size; + hspi->TxXferSize = Size; + + /*Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Set the Rx Fifo threshold */ + if ((hspi->Init.DataSize > SPI_DATASIZE_8BIT) || (initial_RxXferCount > 1U)) + { + /* Set fiforxthreshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + else + { + /* Set fiforxthreshold according the reception data length: 8bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Transmit and Receive data in 16 Bit mode */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + } + while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) + { + /* Check TXE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + /* Next Data is a reception (Rx). Tx not allowed */ + txallowed = 0U; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + /* Set NSS Soft to received correctly the CRC on slave mode with NSS pulse activated */ + if ((READ_BIT(spi_cr1, SPI_CR1_MSTR) == 0U) && (READ_BIT(spi_cr2, SPI_CR2_NSSP) == SPI_CR2_NSSP)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_SSM); + } + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } + + /* Check RXNE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) && (hspi->RxXferCount > 0U)) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + /* Next Data is a Transmission (Tx). Tx is allowed */ + txallowed = 1U; + } + if (((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + /* Transmit and Receive data in 8 Bit mode */ + else + { + if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) + { + if (hspi->TxXferCount > 1U) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount -= 2U; + } + else + { + *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + } + } + while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) + { + /* Check TXE flag */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) + { + if (hspi->TxXferCount > 1U) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount -= 2U; + } + else + { + *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + } + /* Next Data is a reception (Rx). Tx not allowed */ + txallowed = 0U; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + /* Set NSS Soft to received correctly the CRC on slave mode with NSS pulse activated */ + if ((READ_BIT(spi_cr1, SPI_CR1_MSTR) == 0U) && (READ_BIT(spi_cr2, SPI_CR2_NSSP) == SPI_CR2_NSSP)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_SSM); + } + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } + + /* Wait until RXNE flag is reset */ + if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_RXNE)) && (hspi->RxXferCount > 0U)) + { + if (hspi->RxXferCount > 1U) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)hspi->Instance->DR; + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount -= 2U; + if (hspi->RxXferCount <= 1U) + { + /* Set RX Fifo threshold before to switch on 8 bit data size */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + } + else + { + (*(uint8_t *)hspi->pRxBuffPtr) = *(__IO uint8_t *)&hspi->Instance->DR; + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + } + /* Next Data is a Transmission (Tx). Tx is allowed */ + txallowed = 1U; + } + if ((((HAL_GetTick() - tickstart) >= Timeout) && ((Timeout != HAL_MAX_DELAY))) || (Timeout == 0U)) + { + errorcode = HAL_TIMEOUT; + goto error; + } + } + } + +#if (USE_SPI_CRC != 0U) + /* Read CRC from DR to close CRC calculation process */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Wait until TXE flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + errorcode = HAL_TIMEOUT; + goto error; + } + /* Read CRC */ + if (hspi->Init.DataSize == SPI_DATASIZE_16BIT) + { + /* Read 16bit CRC */ + READ_REG(hspi->Instance->DR); + } + else + { + /* Read 8bit CRC */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + + if (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT) + { + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + errorcode = HAL_TIMEOUT; + goto error; + } + /* Read 8bit CRC again in case of 16bit CRC in 8bit Data mode */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + } + } + } + + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + /* Clear CRC Flag */ + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + + errorcode = HAL_ERROR; + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, Timeout, tickstart) != HAL_OK) + { + errorcode = HAL_ERROR; + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + +error : + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Transmit an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + /* Process Locked */ + __HAL_LOCK(hspi); + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + hspi->RxISR = NULL; + + /* Set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->TxISR = SPI_TxISR_16BIT; + } + else + { + hspi->TxISR = SPI_TxISR_8BIT; + } + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Enable TXE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); + + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + +error : + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Receive an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive_IT(hspi, pData, pData, Size); + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pTxBuffPtr = (uint8_t *)NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + hspi->TxISR = NULL; + + /* Check the data size to adapt Rx threshold and the set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + /* Set RX Fifo threshold according the reception data length: 16 bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + hspi->RxISR = SPI_RxISR_16BIT; + } + else + { + /* Set RX Fifo threshold according the reception data length: 8 bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + hspi->RxISR = SPI_RxISR_8BIT; + } + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_RX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->CRCSize = 1U; + if ((hspi->Init.DataSize <= SPI_DATASIZE_8BIT) && (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT)) + { + hspi->CRCSize = 2U; + } + SPI_RESET_CRC(hspi); + } + else + { + hspi->CRCSize = 0U; + } +#endif /* USE_SPI_CRC */ + + /* Enable TXE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + /* Note : The SPI must be enabled after unlocking current process + to avoid the risk of SPI interrupt handle execution before current + process unlock */ + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + +error : + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Transmit and Receive an amount of data in non-blocking mode with Interrupt. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @param Size amount of data to be sent and received + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size) +{ + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Process locked */ + __HAL_LOCK(hspi); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + + if (!((tmp_state == HAL_SPI_STATE_READY) || \ + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Set the function for IT treatment */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + hspi->RxISR = SPI_2linesRxISR_16BIT; + hspi->TxISR = SPI_2linesTxISR_16BIT; + } + else + { + hspi->RxISR = SPI_2linesRxISR_8BIT; + hspi->TxISR = SPI_2linesTxISR_8BIT; + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->CRCSize = 1U; + if ((hspi->Init.DataSize <= SPI_DATASIZE_8BIT) && (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT)) + { + hspi->CRCSize = 2U; + } + SPI_RESET_CRC(hspi); + } + else + { + hspi->CRCSize = 0U; + } +#endif /* USE_SPI_CRC */ + + /* Check if packing mode is enabled and if there is more than 2 data to receive */ + if ((hspi->Init.DataSize > SPI_DATASIZE_8BIT) || (Size >= 2U)) + { + /* Set RX Fifo threshold according the reception data length: 16 bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + else + { + /* Set RX Fifo threshold according the reception data length: 8 bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + + /* Enable TXE, RXNE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + +error : + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Transmit an amount of data in non-blocking mode with DMA. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check tx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); + + /* Process Locked */ + __HAL_LOCK(hspi); + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_TX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->pRxBuffPtr = (uint8_t *)NULL; + hspi->TxISR = NULL; + hspi->RxISR = NULL; + hspi->RxXferSize = 0U; + hspi->RxXferCount = 0U; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_TX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Set the SPI TxDMA Half transfer complete callback */ + hspi->hdmatx->XferHalfCpltCallback = SPI_DMAHalfTransmitCplt; + + /* Set the SPI TxDMA transfer complete callback */ + hspi->hdmatx->XferCpltCallback = SPI_DMATransmitCplt; + + /* Set the DMA error callback */ + hspi->hdmatx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmatx->XferAbortCallback = NULL; + + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX); + /* Packing mode is enabled only if the DMA setting is HALWORD */ + if ((hspi->Init.DataSize <= SPI_DATASIZE_8BIT) && (hspi->hdmatx->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD)) + { + /* Check the even/odd of the data size + crc if enabled */ + if ((hspi->TxXferCount & 0x1U) == 0U) + { + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX); + hspi->TxXferCount = (hspi->TxXferCount >> 1U); + } + else + { + SET_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX); + hspi->TxXferCount = (hspi->TxXferCount >> 1U) + 1U; + } + } + + /* Enable the Tx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmatx, (uint32_t)hspi->pTxBuffPtr, (uint32_t)&hspi->Instance->DR, hspi->TxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + + hspi->State = HAL_SPI_STATE_READY; + goto error; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Tx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + +error : + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Receive an amount of data in non-blocking mode with DMA. + * @note In case of MASTER mode and SPI_DIRECTION_2LINES direction, hdmatx shall be defined. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pData pointer to data buffer + * @note When the CRC feature is enabled the pData Length must be Size + 1. + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check rx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); + + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) + { + hspi->State = HAL_SPI_STATE_BUSY_RX; + + /* Check tx dma handle */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Call transmit-receive function to send Dummy data on Tx line and generate clock on CLK line */ + return HAL_SPI_TransmitReceive_DMA(hspi, pData, pData, Size); + } + + /* Process Locked */ + __HAL_LOCK(hspi); + + if (hspi->State != HAL_SPI_STATE_READY) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Set the transaction information */ + hspi->State = HAL_SPI_STATE_BUSY_RX; + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pRxBuffPtr = (uint8_t *)pData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /*Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + hspi->TxXferSize = 0U; + hspi->TxXferCount = 0U; + + /* Configure communication direction : 1Line */ + if (hspi->Init.Direction == SPI_DIRECTION_1LINE) + { + SPI_1LINE_RX(hspi); + } + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMARX); + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + /* Set RX Fifo threshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + else + { + /* Set RX Fifo threshold according the reception data length: 8bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + + if (hspi->hdmarx->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + /* Set RX Fifo threshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + + if ((hspi->RxXferCount & 0x1U) == 0x0U) + { + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMARX); + hspi->RxXferCount = hspi->RxXferCount >> 1U; + } + else + { + SET_BIT(hspi->Instance->CR2, SPI_CR2_LDMARX); + hspi->RxXferCount = (hspi->RxXferCount >> 1U) + 1U; + } + } + } + + /* Set the SPI RxDMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfReceiveCplt; + + /* Set the SPI Rx DMA transfer complete callback */ + hspi->hdmarx->XferCpltCallback = SPI_DMAReceiveCplt; + + /* Set the DMA error callback */ + hspi->hdmarx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Enable the Rx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmarx, (uint32_t)&hspi->Instance->DR, (uint32_t)hspi->pRxBuffPtr, hspi->RxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + + hspi->State = HAL_SPI_STATE_READY; + goto error; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Rx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + +error: + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Transmit and Receive an amount of data in non-blocking mode with DMA. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param pTxData pointer to transmission data buffer + * @param pRxData pointer to reception data buffer + * @note When the CRC feature is enabled the pRxData Length must be Size + 1 + * @param Size amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size) +{ + uint32_t tmp_mode; + HAL_SPI_StateTypeDef tmp_state; + HAL_StatusTypeDef errorcode = HAL_OK; + + /* Check rx & tx dma handles */ + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); + assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); + + /* Check Direction parameter */ + assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); + + /* Process locked */ + __HAL_LOCK(hspi); + + /* Init temporary variables */ + tmp_state = hspi->State; + tmp_mode = hspi->Init.Mode; + + if (!((tmp_state == HAL_SPI_STATE_READY) || + ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) + { + errorcode = HAL_BUSY; + goto error; + } + + if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) + { + errorcode = HAL_ERROR; + goto error; + } + + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ + if (hspi->State != HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_BUSY_TX_RX; + } + + /* Set the transaction information */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->TxXferSize = Size; + hspi->TxXferCount = Size; + hspi->pRxBuffPtr = (uint8_t *)pRxData; + hspi->RxXferSize = Size; + hspi->RxXferCount = Size; + + /* Init field not used in handle to zero */ + hspi->RxISR = NULL; + hspi->TxISR = NULL; + +#if (USE_SPI_CRC != 0U) + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } +#endif /* USE_SPI_CRC */ + + /* Reset the threshold bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX | SPI_CR2_LDMARX); + + /* The packing mode management is enabled by the DMA settings according the spi data size */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + /* Set fiforxthreshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + else + { + /* Set RX Fifo threshold according the reception data length: 8bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + + if (hspi->hdmatx->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + if ((hspi->TxXferSize & 0x1U) == 0x0U) + { + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX); + hspi->TxXferCount = hspi->TxXferCount >> 1U; + } + else + { + SET_BIT(hspi->Instance->CR2, SPI_CR2_LDMATX); + hspi->TxXferCount = (hspi->TxXferCount >> 1U) + 1U; + } + } + + if (hspi->hdmarx->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + /* Set RX Fifo threshold according the reception data length: 16bit */ + CLEAR_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + + if ((hspi->RxXferCount & 0x1U) == 0x0U) + { + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_LDMARX); + hspi->RxXferCount = hspi->RxXferCount >> 1U; + } + else + { + SET_BIT(hspi->Instance->CR2, SPI_CR2_LDMARX); + hspi->RxXferCount = (hspi->RxXferCount >> 1U) + 1U; + } + } + } + + /* Check if we are in Rx only or in Rx/Tx Mode and configure the DMA transfer complete callback */ + if (hspi->State == HAL_SPI_STATE_BUSY_RX) + { + /* Set the SPI Rx DMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfReceiveCplt; + hspi->hdmarx->XferCpltCallback = SPI_DMAReceiveCplt; + } + else + { + /* Set the SPI Tx/Rx DMA Half transfer complete callback */ + hspi->hdmarx->XferHalfCpltCallback = SPI_DMAHalfTransmitReceiveCplt; + hspi->hdmarx->XferCpltCallback = SPI_DMATransmitReceiveCplt; + } + + /* Set the DMA error callback */ + hspi->hdmarx->XferErrorCallback = SPI_DMAError; + + /* Set the DMA AbortCpltCallback */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Enable the Rx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmarx, (uint32_t)&hspi->Instance->DR, (uint32_t)hspi->pRxBuffPtr, hspi->RxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + + hspi->State = HAL_SPI_STATE_READY; + goto error; + } + + /* Enable Rx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + + /* Set the SPI Tx DMA transfer complete callback as NULL because the communication closing + is performed in DMA reception complete callback */ + hspi->hdmatx->XferHalfCpltCallback = NULL; + hspi->hdmatx->XferCpltCallback = NULL; + hspi->hdmatx->XferErrorCallback = NULL; + hspi->hdmatx->XferAbortCallback = NULL; + + /* Enable the Tx DMA Stream/Channel */ + if (HAL_OK != HAL_DMA_Start_IT(hspi->hdmatx, (uint32_t)hspi->pTxBuffPtr, (uint32_t)&hspi->Instance->DR, hspi->TxXferCount)) + { + /* Update SPI error code */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + + hspi->State = HAL_SPI_STATE_READY; + goto error; + } + + /* Check if the SPI is already enabled */ + if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) + { + /* Enable SPI peripheral */ + __HAL_SPI_ENABLE(hspi); + } + /* Enable the SPI Error Interrupt Bit */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); + + /* Enable Tx DMA Request */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + +error : + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return errorcode; +} + +/** + * @brief Abort ongoing transfer (blocking mode). + * @param hspi SPI handle. + * @note This procedure could be used for aborting any ongoing transfer (Tx and Rx), + * started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable SPI Interrupts (depending of transfer direction) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status +*/ +HAL_StatusTypeDef HAL_SPI_Abort(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode; + __IO uint32_t count, resetcount; + + /* Initialized local variable */ + errorcode = HAL_OK; + resetcount = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + count = resetcount; + + /* Clear ERRIE interrupt to avoid error interrupts generation during Abort procedure */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_ERRIE); + + /* Disable TXEIE, RXNEIE and ERRIE(mode fault event, overrun error, TI frame error) interrupts */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXEIE)) + { + hspi->TxISR = SPI_AbortTx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)) + { + hspi->RxISR = SPI_AbortRx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + /* Disable the SPI DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + /* Abort the SPI DMA Tx Stream/Channel : use blocking DMA Abort API (no callback) */ + if (hspi->hdmatx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_AbortCpltCallback() at end of DMA abort procedure */ + hspi->hdmatx->XferAbortCallback = NULL; + + /* Abort DMA Tx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort(hspi->hdmatx) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXDMAEN)); + + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + } + } + + /* Disable the SPI DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + /* Abort the SPI DMA Rx Stream/Channel : use blocking DMA Abort API (no callback) */ + if (hspi->hdmarx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_AbortCpltCallback() at end of DMA abort procedure */ + hspi->hdmarx->XferAbortCallback = NULL; + + /* Abort DMA Rx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort(hspi->hdmarx) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable Rx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_RXDMAEN)); + } + } + /* Reset Tx and Rx transfer counters */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check error during Abort procedure */ + if (hspi->ErrorCode == HAL_SPI_ERROR_ABORT) + { + /* return HAL_Error in case of error during Abort procedure */ + errorcode = HAL_ERROR; + } + else + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->state to ready */ + hspi->State = HAL_SPI_STATE_READY; + + return errorcode; +} + +/** + * @brief Abort ongoing transfer (Interrupt mode). + * @param hspi SPI handle. + * @note This procedure could be used for aborting any ongoing transfer (Tx and Rx), + * started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable SPI Interrupts (depending of transfer direction) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status +*/ +HAL_StatusTypeDef HAL_SPI_Abort_IT(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode; + uint32_t abortcplt ; + __IO uint32_t count, resetcount; + + /* Initialized local variable */ + errorcode = HAL_OK; + abortcplt = 1U; + resetcount = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + count = resetcount; + + /* Clear ERRIE interrupt to avoid error interrupts generation during Abort procedure */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_ERRIE); + + /* Change Rx and Tx Irq Handler to Disable TXEIE, RXNEIE and ERRIE interrupts */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXEIE)) + { + hspi->TxISR = SPI_AbortTx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)) + { + hspi->RxISR = SPI_AbortRx_ISR; + /* Wait HAL_SPI_STATE_ABORT state */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (hspi->State != HAL_SPI_STATE_ABORT); + /* Reset Timeout Counter */ + count = resetcount; + } + + /* If DMA Tx and/or DMA Rx Handles are associated to SPI Handle, DMA Abort complete callbacks should be initialised + before any call to DMA Abort functions */ + /* DMA Tx Handle is valid */ + if (hspi->hdmatx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + hspi->hdmatx->XferAbortCallback = SPI_DMATxAbortCallback; + } + else + { + hspi->hdmatx->XferAbortCallback = NULL; + } + } + /* DMA Rx Handle is valid */ + if (hspi->hdmarx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + hspi->hdmarx->XferAbortCallback = SPI_DMARxAbortCallback; + } + else + { + hspi->hdmarx->XferAbortCallback = NULL; + } + } + + /* Disable the SPI DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXDMAEN)) + { + /* Abort the SPI DMA Tx Stream/Channel */ + if (hspi->hdmatx != NULL) + { + /* Abort DMA Tx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort_IT(hspi->hdmatx) != HAL_OK) + { + hspi->hdmatx->XferAbortCallback = NULL; + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + else + { + abortcplt = 0U; + } + } + } + /* Disable the SPI DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXDMAEN)) + { + /* Abort the SPI DMA Rx Stream/Channel */ + if (hspi->hdmarx != NULL) + { + /* Abort DMA Rx Handle linked to SPI Peripheral */ + if (HAL_DMA_Abort_IT(hspi->hdmarx) != HAL_OK) + { + hspi->hdmarx->XferAbortCallback = NULL; + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + else + { + abortcplt = 0U; + } + } + } + + if (abortcplt == 1U) + { + /* Reset Tx and Rx transfer counters */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check error during Abort procedure */ + if (hspi->ErrorCode == HAL_SPI_ERROR_ABORT) + { + /* return HAL_Error in case of error during Abort procedure */ + errorcode = HAL_ERROR; + } + else + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + + return errorcode; +} + +/** + * @brief Pause the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAPause(SPI_HandleTypeDef *hspi) +{ + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Disable the SPI DMA Tx & Rx requests */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Resume the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAResume(SPI_HandleTypeDef *hspi) +{ + /* Process Locked */ + __HAL_LOCK(hspi); + + /* Enable the SPI DMA Tx & Rx requests */ + SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_OK; +} + +/** + * @brief Stop the DMA Transfer. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPI_DMAStop(SPI_HandleTypeDef *hspi) +{ + HAL_StatusTypeDef errorcode = HAL_OK; + /* The Lock is not implemented on this API to allow the user application + to call the HAL SPI API under callbacks HAL_SPI_TxCpltCallback() or HAL_SPI_RxCpltCallback() or HAL_SPI_TxRxCpltCallback(): + when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated + and the correspond call back is executed HAL_SPI_TxCpltCallback() or HAL_SPI_RxCpltCallback() or HAL_SPI_TxRxCpltCallback() + */ + + /* Abort the SPI DMA tx Stream/Channel */ + if (hspi->hdmatx != NULL) + { + if (HAL_OK != HAL_DMA_Abort(hspi->hdmatx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + } + } + /* Abort the SPI DMA rx Stream/Channel */ + if (hspi->hdmarx != NULL) + { + if (HAL_OK != HAL_DMA_Abort(hspi->hdmarx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + errorcode = HAL_ERROR; + } + } + + /* Disable the SPI DMA Tx & Rx requests */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + hspi->State = HAL_SPI_STATE_READY; + return errorcode; +} + +/** + * @brief Handle SPI interrupt request. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval None + */ +void HAL_SPI_IRQHandler(SPI_HandleTypeDef *hspi) +{ + uint32_t itsource = hspi->Instance->CR2; + uint32_t itflag = hspi->Instance->SR; + + /* SPI in mode Receiver ----------------------------------------------------*/ + if ((SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) == RESET) && + (SPI_CHECK_FLAG(itflag, SPI_FLAG_RXNE) != RESET) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_RXNE) != RESET)) + { + hspi->RxISR(hspi); + return; + } + + /* SPI in mode Transmitter -------------------------------------------------*/ + if ((SPI_CHECK_FLAG(itflag, SPI_FLAG_TXE) != RESET) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_TXE) != RESET)) + { + hspi->TxISR(hspi); + return; + } + + /* SPI in Error Treatment --------------------------------------------------*/ + if (((SPI_CHECK_FLAG(itflag, SPI_FLAG_MODF) != RESET) || (SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) != RESET) || (SPI_CHECK_FLAG(itflag, SPI_FLAG_FRE) != RESET)) && (SPI_CHECK_IT_SOURCE(itsource, SPI_IT_ERR) != RESET)) + { + /* SPI Overrun error interrupt occurred ----------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_OVR) != RESET) + { + if (hspi->State != HAL_SPI_STATE_BUSY_TX) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_OVR); + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + else + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + return; + } + } + + /* SPI Mode Fault error interrupt occurred -------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_MODF) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_MODF); + __HAL_SPI_CLEAR_MODFFLAG(hspi); + } + + /* SPI Frame error interrupt occurred ------------------------------------*/ + if (SPI_CHECK_FLAG(itflag, SPI_FLAG_FRE) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FRE); + __HAL_SPI_CLEAR_FREFLAG(hspi); + } + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Disable all interrupts */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE | SPI_IT_TXE | SPI_IT_ERR); + + hspi->State = HAL_SPI_STATE_READY; + /* Disable the SPI DMA requests if enabled */ + if ((HAL_IS_BIT_SET(itsource, SPI_CR2_TXDMAEN)) || (HAL_IS_BIT_SET(itsource, SPI_CR2_RXDMAEN))) + { + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN)); + + /* Abort the SPI DMA Rx channel */ + if (hspi->hdmarx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_ErrorCallback() at end of DMA abort procedure */ + hspi->hdmarx->XferAbortCallback = SPI_DMAAbortOnError; + if (HAL_OK != HAL_DMA_Abort_IT(hspi->hdmarx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + } + } + /* Abort the SPI DMA Tx channel */ + if (hspi->hdmatx != NULL) + { + /* Set the SPI DMA Abort callback : + will lead to call HAL_SPI_ErrorCallback() at end of DMA abort procedure */ + hspi->hdmatx->XferAbortCallback = SPI_DMAAbortOnError; + if (HAL_OK != HAL_DMA_Abort_IT(hspi->hdmatx)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + } + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + } + return; + } +} + +/** + * @brief Tx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_RxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Tx and Rx Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxRxCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxRxCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Tx Half Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxHalfCpltCallback should be implemented in the user file + */ +} + +/** + * @brief Rx Half Transfer completed callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_RxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_RxHalfCpltCallback() should be implemented in the user file + */ +} + +/** + * @brief Tx and Rx Half Transfer callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_TxRxHalfCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_TxRxHalfCpltCallback() should be implemented in the user file + */ +} + +/** + * @brief SPI error callback. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +__weak void HAL_SPI_ErrorCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_ErrorCallback should be implemented in the user file + */ + /* NOTE : The ErrorCode parameter in the hspi handle is updated by the SPI processes + and user can use HAL_SPI_GetError() API to check the latest error occurred + */ +} + +/** + * @brief SPI Abort Complete callback. + * @param hspi SPI handle. + * @retval None + */ +__weak void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hspi); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_SPI_AbortCpltCallback can be implemented in the user file. + */ +} + +/** + * @} + */ + +/** @defgroup SPI_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief SPI control functions + * +@verbatim + =============================================================================== + ##### Peripheral State and Errors functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the SPI. + (+) HAL_SPI_GetState() API can be helpful to check in run-time the state of the SPI peripheral + (+) HAL_SPI_GetError() check in run-time Errors occurring during communication +@endverbatim + * @{ + */ + +/** + * @brief Return the SPI handle state. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval SPI state + */ +HAL_SPI_StateTypeDef HAL_SPI_GetState(SPI_HandleTypeDef *hspi) +{ + /* Return SPI handle state */ + return hspi->State; +} + +/** + * @brief Return the SPI error code. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval SPI error code in bitmap format + */ +uint32_t HAL_SPI_GetError(SPI_HandleTypeDef *hspi) +{ + /* Return SPI ErrorCode */ + return hspi->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup SPI_Private_Functions + * @brief Private functions + * @{ + */ + +/** + * @brief DMA SPI transmit process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received data is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + hspi->TxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user Tx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxCpltCallback(hspi); +#else + HAL_SPI_TxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + +#if (USE_SPI_CRC != 0U) + /* CRC handling */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Wait until RXNE flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read CRC */ + if (hspi->Init.DataSize > SPI_DATASIZE_8BIT) + { + /* Read 16bit CRC */ + READ_REG(hspi->Instance->DR); + } + else + { + /* Read 8bit CRC */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + + if (hspi->Init.CRCLength == SPI_CRC_LENGTH_16BIT) + { + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read 8bit CRC again in case of 16bit CRC in 8bit Data mode */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + } + } + } +#endif /* USE_SPI_CRC */ + + /* Disable Rx/Tx DMA Request (done by default to handle the case master rx direction 2 lines) */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_FLAG; + } + + hspi->RxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI transmit receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + uint32_t tickstart; + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* DMA Normal Mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != DMA_SxCR_CIRC) + { + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + +#if (USE_SPI_CRC != 0U) + /* CRC handling */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + if ((hspi->Init.DataSize == SPI_DATASIZE_8BIT) && (hspi->Init.CRCLength == SPI_CRC_LENGTH_8BIT)) + { + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_QUARTER_FULL, SPI_DEFAULT_TIMEOUT, + tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read CRC to Flush DR and RXNE flag */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + } + else + { + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_HALF_FULL, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + /* Error on the CRC reception */ + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + } + /* Read CRC to Flush DR and RXNE flag */ + READ_REG(hspi->Instance->DR); + } + } +#endif /* USE_SPI_CRC */ + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Disable Rx/Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + hspi->TxXferCount = 0U; + hspi->RxXferCount = 0U; + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR)) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + } +#endif /* USE_SPI_CRC */ + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + return; + } + } + /* Call user TxRx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxCpltCallback(hspi); +#else + HAL_SPI_TxRxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half transmit process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfTransmitCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user Tx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxHalfCpltCallback(hspi); +#else + HAL_SPI_TxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half receive process complete callback + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user Rx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxHalfCpltCallback(hspi); +#else + HAL_SPI_RxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI half transmit receive process complete callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAHalfTransmitReceiveCplt(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Call user TxRx half complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxHalfCpltCallback(hspi); +#else + HAL_SPI_TxRxHalfCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI communication error callback. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA module. + * @retval None + */ +static void SPI_DMAError(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Stop the disable DMA transfer on SPI side */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN); + + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); + hspi->State = HAL_SPI_STATE_READY; + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI communication abort callback, when initiated by HAL services on Error + * (To be called at end of DMA Abort procedure following error occurrence). + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMAAbortOnError(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI Tx communication abort callback, when initiated by user + * (To be called at end of DMA Tx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Rx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMATxAbortCallback(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + hspi->hdmatx->XferAbortCallback = NULL; + + /* Disable Tx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); + + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Check if an Abort process is still ongoing */ + if (hspi->hdmarx != NULL) + { + if (hspi->hdmarx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA Stream/Channel are aborted, call user Abort Complete callback */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check no error during Abort procedure */ + if (hspi->ErrorCode != HAL_SPI_ERROR_ABORT) + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA SPI Rx communication abort callback, when initiated by user + * (To be called at end of DMA Rx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Tx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void SPI_DMARxAbortCallback(DMA_HandleTypeDef *hdma) +{ + SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + hspi->hdmarx->XferAbortCallback = NULL; + + /* Disable Rx DMA Request */ + CLEAR_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Check if an Abort process is still ongoing */ + if (hspi->hdmatx != NULL) + { + if (hspi->hdmatx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA Stream/Channel are aborted, call user Abort Complete callback */ + hspi->RxXferCount = 0U; + hspi->TxXferCount = 0U; + + /* Check no error during Abort procedure */ + if (hspi->ErrorCode != HAL_SPI_ERROR_ABORT) + { + /* Reset errorCode */ + hspi->ErrorCode = HAL_SPI_ERROR_NONE; + } + + /* Clear the Error flags in the SR register */ + __HAL_SPI_CLEAR_OVRFLAG(hspi); + __HAL_SPI_CLEAR_FREFLAG(hspi); + + /* Restore hspi->State to Ready */ + hspi->State = HAL_SPI_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->AbortCpltCallback(hspi); +#else + HAL_SPI_AbortCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ +} + +/** + * @brief Rx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Receive data in packing mode */ + if (hspi->RxXferCount > 1U) + { + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)(hspi->Instance->DR); + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount -= 2U; + if (hspi->RxXferCount == 1U) + { + /* Set RX Fifo threshold according the reception data length: 8bit */ + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + } + } + /* Receive data in 8 Bit mode */ + else + { + *hspi->pRxBuffPtr = *((__IO uint8_t *)&hspi->Instance->DR); + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + } + + /* Check end of the reception */ + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SET_BIT(hspi->Instance->CR2, SPI_RXFIFO_THRESHOLD); + hspi->RxISR = SPI_2linesRxISR_8BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Rx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + /* Read 8bit CRC to flush Data Regsiter */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + + hspi->CRCSize--; + + /* Check end of the reception */ + if (hspi->CRCSize == 0U) + { + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Tx 8-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesTxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Transmit data in packing Bit mode */ + if (hspi->TxXferCount >= 2U) + { + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount -= 2U; + } + /* Transmit data in 8 Bit mode */ + else + { + *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + } + + /* Check the end of the transmission */ + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Set CRC Next Bit to send CRC */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + + if (hspi->RxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +/** + * @brief Rx 16-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Receive data in 16 Bit mode */ + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)(hspi->Instance->DR); + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_2linesRxISR_16BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable RXNE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE); + + if (hspi->TxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 16-bit receive for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + /* Read 16bit CRC to flush Data Regsiter */ + READ_REG(hspi->Instance->DR); + + /* Disable RXNE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE); + + SPI_CloseRxTx_ISR(hspi); +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Tx 16-bit handler for Transmit and Receive in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Transmit data in 16 Bit mode */ + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + + /* Enable CRC Transmission */ + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Set CRC Next Bit to send CRC */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + return; + } +#endif /* USE_SPI_CRC */ + + /* Disable TXE interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_TXE); + + if (hspi->RxXferCount == 0U) + { + SPI_CloseRxTx_ISR(hspi); + } + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 8-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + /* Read 8bit CRC to flush Data Register */ + READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + + hspi->CRCSize--; + + if (hspi->CRCSize == 0U) + { + SPI_CloseRx_ISR(hspi); + } +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Manage the receive 8-bit in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + *hspi->pRxBuffPtr = (*(__IO uint8_t *)&hspi->Instance->DR); + hspi->pRxBuffPtr++; + hspi->RxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->RxXferCount == 1U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_RxISR_8BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + SPI_CloseRx_ISR(hspi); + } +} + +#if (USE_SPI_CRC != 0U) +/** + * @brief Manage the CRC 16-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) +{ + /* Read 16bit CRC to flush Data Register */ + READ_REG(hspi->Instance->DR); + + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + SPI_CloseRx_ISR(hspi); +} +#endif /* USE_SPI_CRC */ + +/** + * @brief Manage the 16-bit receive in Interrupt context. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_RxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + *((uint16_t *)hspi->pRxBuffPtr) = (uint16_t)(hspi->Instance->DR); + hspi->pRxBuffPtr += sizeof(uint16_t); + hspi->RxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->RxXferCount == 1U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + + if (hspi->RxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + hspi->RxISR = SPI_RxISR_16BITCRC; + return; + } +#endif /* USE_SPI_CRC */ + SPI_CloseRx_ISR(hspi); + } +} + +/** + * @brief Handle the data 8-bit transmit in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi) +{ + *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + hspi->pTxBuffPtr++; + hspi->TxXferCount--; + + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Enable CRC Transmission */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + SPI_CloseTx_ISR(hspi); + } +} + +/** + * @brief Handle the data 16-bit transmit in Interrupt mode. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi) +{ + /* Transmit data in 16 Bit mode */ + hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->pTxBuffPtr += sizeof(uint16_t); + hspi->TxXferCount--; + + if (hspi->TxXferCount == 0U) + { +#if (USE_SPI_CRC != 0U) + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + /* Enable CRC Transmission */ + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + SPI_CloseTx_ISR(hspi); + } +} + +/** + * @brief Handle SPI Communication Timeout. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param Flag SPI flag to check + * @param State flag state to check + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Flag, FlagStatus State, + uint32_t Timeout, uint32_t Tickstart) +{ + while ((__HAL_SPI_GET_FLAG(hspi, Flag) ? SET : RESET) != State) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Disable the SPI and reset the CRC: the CRC value should be cleared + on both master and slave sides in order to resynchronize the master + and slave for their respective CRC calculation */ + + /* Disable TXE, RXNE and ERR interrupts for the interrupt process */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Disable SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + } + + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_TIMEOUT; + } + } + } + + return HAL_OK; +} + +/** + * @brief Handle SPI FIFO Communication Timeout. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param Fifo Fifo to check + * @param State Fifo state to check + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_WaitFifoStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Fifo, uint32_t State, + uint32_t Timeout, uint32_t Tickstart) +{ + while ((hspi->Instance->SR & Fifo) != State) + { + if ((Fifo == SPI_SR_FRLVL) && (State == SPI_FRLVL_EMPTY)) + { + /* Read 8bit CRC to flush Data Register */ + READ_REG(*((__IO uint8_t *)&hspi->Instance->DR)); + } + + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Disable the SPI and reset the CRC: the CRC value should be cleared + on both master and slave sides in order to resynchronize the master + and slave for their respective CRC calculation */ + + /* Disable TXE, RXNE and ERR interrupts for the interrupt process */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Disable SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + } + + /* Reset CRC Calculation */ + if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) + { + SPI_RESET_CRC(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + + return HAL_TIMEOUT; + } + } + } + + return HAL_OK; +} + +/** + * @brief Handle the check of the RX transaction complete. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_EndRxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart) +{ + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Disable SPI peripheral */ + __HAL_SPI_DISABLE(hspi); + } + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + + if ((hspi->Init.Mode == SPI_MODE_MASTER) && ((hspi->Init.Direction == SPI_DIRECTION_1LINE) + || (hspi->Init.Direction == SPI_DIRECTION_2LINES_RXONLY))) + { + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Handle the check of the RXTX or TX transaction complete. + * @param hspi SPI handle + * @param Timeout Timeout duration + * @param Tickstart tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef SPI_EndRxTxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart) +{ + /* Control if the TX fifo is empty */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FTLVL, SPI_FTLVL_EMPTY, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + + /* Control if the RX fifo is empty */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + + return HAL_OK; +} + +/** + * @brief Handle the end of the RXTX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseRxTx_ISR(SPI_HandleTypeDef *hspi) +{ + uint32_t tickstart; + + /* Init tickstart for timeout managment*/ + tickstart = HAL_GetTick(); + + /* Disable ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, SPI_IT_ERR); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR) != RESET) + { + hspi->State = HAL_SPI_STATE_READY; + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { +#endif /* USE_SPI_CRC */ + if (hspi->ErrorCode == HAL_SPI_ERROR_NONE) + { + if (hspi->State == HAL_SPI_STATE_BUSY_RX) + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user TxRx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxRxCpltCallback(hspi); +#else + HAL_SPI_TxRxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + } + else + { + hspi->State = HAL_SPI_STATE_READY; + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +#if (USE_SPI_CRC != 0U) + } +#endif /* USE_SPI_CRC */ +} + +/** + * @brief Handle the end of the RX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseRx_ISR(SPI_HandleTypeDef *hspi) +{ + /* Disable RXNE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + /* Check the end of the transaction */ + if (SPI_EndRxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + hspi->State = HAL_SPI_STATE_READY; + +#if (USE_SPI_CRC != 0U) + /* Check if CRC error occurred */ + if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_CRCERR) != RESET) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); + __HAL_SPI_CLEAR_CRCERRFLAG(hspi); + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { +#endif /* USE_SPI_CRC */ + if (hspi->ErrorCode == HAL_SPI_ERROR_NONE) + { + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->RxCpltCallback(hspi); +#else + HAL_SPI_RxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +#if (USE_SPI_CRC != 0U) + } +#endif /* USE_SPI_CRC */ +} + +/** + * @brief Handle the end of the TX transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_CloseTx_ISR(SPI_HandleTypeDef *hspi) +{ + uint32_t tickstart; + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + /* Disable TXE and ERR interrupt */ + __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); + + /* Check the end of the transaction */ + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + } + + /* Clear overrun flag in 2 Lines communication mode because received is not read */ + if (hspi->Init.Direction == SPI_DIRECTION_2LINES) + { + __HAL_SPI_CLEAR_OVRFLAG(hspi); + } + + hspi->State = HAL_SPI_STATE_READY; + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + /* Call user error callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->ErrorCallback(hspi); +#else + HAL_SPI_ErrorCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } + else + { + /* Call user Rx complete callback */ +#if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) + hspi->TxCpltCallback(hspi); +#else + HAL_SPI_TxCpltCallback(hspi); +#endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Handle abort a Rx transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi) +{ + __IO uint32_t count; + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Disable RXNEIE interrupt */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_RXNEIE)); + + /* Check RXNEIE is disabled */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)); + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + hspi->State = HAL_SPI_STATE_ABORT; +} + +/** + * @brief Handle abort a Tx or Rx/Tx transaction. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for SPI module. + * @retval None + */ +static void SPI_AbortTx_ISR(SPI_HandleTypeDef *hspi) +{ + __IO uint32_t count; + + count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); + + /* Disable TXEIE interrupt */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXEIE)); + + /* Check TXEIE is disabled */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_TXEIE)); + + if (SPI_EndRxTxTransaction(hspi, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Disable SPI Peripheral */ + __HAL_SPI_DISABLE(hspi); + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Check case of Full-Duplex Mode and disable directly RXNEIE interrupt */ + if (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)) + { + /* Disable RXNEIE interrupt */ + CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_RXNEIE)); + + /* Check RXNEIE is disabled */ + do + { + if (count == 0U) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_ABORT); + break; + } + count--; + } + while (HAL_IS_BIT_SET(hspi->Instance->CR2, SPI_CR2_RXNEIE)); + + /* Control the BSY flag */ + if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_BSY, RESET, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + + /* Empty the FRLVL fifo */ + if (SPI_WaitFifoStateUntilTimeout(hspi, SPI_FLAG_FRLVL, SPI_FRLVL_EMPTY, SPI_DEFAULT_TIMEOUT, HAL_GetTick()) != HAL_OK) + { + hspi->ErrorCode = HAL_SPI_ERROR_ABORT; + } + } + hspi->State = HAL_SPI_STATE_ABORT; +} + +/** + * @} + */ + +#endif /* HAL_SPI_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c new file mode 100644 index 000000000..d34cb45e9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_spi_ex.c @@ -0,0 +1,115 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_spi_ex.c + * @author MCD Application Team + * @brief Extended SPI HAL module driver. + * This file provides firmware functions to manage the following + * SPI peripheral extended functionalities : + * + IO operation functions + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup SPIEx SPIEx + * @brief SPI Extended HAL module driver + * @{ + */ +#ifdef HAL_SPI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup SPIEx_Private_Constants SPIEx Private Constants + * @{ + */ +#define SPI_FIFO_SIZE 4UL +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup SPIEx_Exported_Functions SPIEx Exported Functions + * @{ + */ + +/** @defgroup SPIEx_Exported_Functions_Group1 IO operation functions + * @brief Data transfers functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of extended functions to manage the SPI + data transfers. + + (#) Rx data flush function: + (++) HAL_SPIEx_FlushRxFifo() + +@endverbatim + * @{ + */ + +/** + * @brief Flush the RX fifo. + * @param hspi pointer to a SPI_HandleTypeDef structure that contains + * the configuration information for the specified SPI module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_SPIEx_FlushRxFifo(SPI_HandleTypeDef *hspi) +{ + __IO uint32_t tmpreg; + uint8_t count = 0U; + while ((hspi->Instance->SR & SPI_FLAG_FRLVL) != SPI_FRLVL_EMPTY) + { + count++; + tmpreg = hspi->Instance->DR; + UNUSED(tmpreg); /* To avoid GCC warning */ + if (count == SPI_FIFO_SIZE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_SPI_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c new file mode 100644 index 000000000..b3365d6fb --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c @@ -0,0 +1,6901 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer (TIM) peripheral: + * + TIM Time Base Initialization + * + TIM Time Base Start + * + TIM Time Base Start Interruption + * + TIM Time Base Start DMA + * + TIM Output Compare/PWM Initialization + * + TIM Output Compare/PWM Channel Configuration + * + TIM Output Compare/PWM Start + * + TIM Output Compare/PWM Start Interruption + * + TIM Output Compare/PWM Start DMA + * + TIM Input Capture Initialization + * + TIM Input Capture Channel Configuration + * + TIM Input Capture Start + * + TIM Input Capture Start Interruption + * + TIM Input Capture Start DMA + * + TIM One Pulse Initialization + * + TIM One Pulse Channel Configuration + * + TIM One Pulse Start + * + TIM Encoder Interface Initialization + * + TIM Encoder Interface Start + * + TIM Encoder Interface Start Interruption + * + TIM Encoder Interface Start DMA + * + Commutation Event configuration with Interruption and DMA + * + TIM OCRef clear configuration + * + TIM External Clock configuration + @verbatim + ============================================================================== + ##### TIMER Generic features ##### + ============================================================================== + [..] The Timer features include: + (#) 16-bit up, down, up/down auto-reload counter. + (#) 16-bit programmable prescaler allowing dividing (also on the fly) the + counter clock frequency either by any factor between 1 and 65536. + (#) Up to 4 independent channels for: + (++) Input Capture + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to interconnect + several timers together. + (#) Supports incremental encoder for positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Time Base : HAL_TIM_Base_MspInit() + (++) Input Capture : HAL_TIM_IC_MspInit() + (++) Output Compare : HAL_TIM_OC_MspInit() + (++) PWM generation : HAL_TIM_PWM_MspInit() + (++) One-pulse mode output : HAL_TIM_OnePulse_MspInit() + (++) Encoder mode output : HAL_TIM_Encoder_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + Initialization function of this driver: + (++) HAL_TIM_Base_Init: to use the Timer to generate a simple time base + (++) HAL_TIM_OC_Init and HAL_TIM_OC_ConfigChannel: to use the Timer to generate an + Output Compare signal. + (++) HAL_TIM_PWM_Init and HAL_TIM_PWM_ConfigChannel: to use the Timer to generate a + PWM signal. + (++) HAL_TIM_IC_Init and HAL_TIM_IC_ConfigChannel: to use the Timer to measure an + external signal. + (++) HAL_TIM_OnePulse_Init and HAL_TIM_OnePulse_ConfigChannel: to use the Timer + in One Pulse Mode. + (++) HAL_TIM_Encoder_Init: to use the Timer Encoder Interface. + + (#) Activate the TIM peripheral using one of the start functions depending from the feature used: + (++) Time Base : HAL_TIM_Base_Start(), HAL_TIM_Base_Start_DMA(), HAL_TIM_Base_Start_IT() + (++) Input Capture : HAL_TIM_IC_Start(), HAL_TIM_IC_Start_DMA(), HAL_TIM_IC_Start_IT() + (++) Output Compare : HAL_TIM_OC_Start(), HAL_TIM_OC_Start_DMA(), HAL_TIM_OC_Start_IT() + (++) PWM generation : HAL_TIM_PWM_Start(), HAL_TIM_PWM_Start_DMA(), HAL_TIM_PWM_Start_IT() + (++) One-pulse mode output : HAL_TIM_OnePulse_Start(), HAL_TIM_OnePulse_Start_IT() + (++) Encoder mode output : HAL_TIM_Encoder_Start(), HAL_TIM_Encoder_Start_DMA(), HAL_TIM_Encoder_Start_IT(). + + (#) The DMA Burst is managed with the two following functions: + HAL_TIM_DMABurst_WriteStart() + HAL_TIM_DMABurst_ReadStart() + + *** Callback registration *** + ============================================= + + [..] + The compilation define USE_HAL_TIM_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function @ref HAL_TIM_RegisterCallback() to register a callback. + @ref HAL_TIM_RegisterCallback() takes as parameters the HAL peripheral handle, + the Callback ID and a pointer to the user callback function. + + [..] + Use function @ref HAL_TIM_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_TIM_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + + [..] + These functions allow to register/unregister following callbacks: + (+) Base_MspInitCallback : TIM Base Msp Init Callback. + (+) Base_MspDeInitCallback : TIM Base Msp DeInit Callback. + (+) IC_MspInitCallback : TIM IC Msp Init Callback. + (+) IC_MspDeInitCallback : TIM IC Msp DeInit Callback. + (+) OC_MspInitCallback : TIM OC Msp Init Callback. + (+) OC_MspDeInitCallback : TIM OC Msp DeInit Callback. + (+) PWM_MspInitCallback : TIM PWM Msp Init Callback. + (+) PWM_MspDeInitCallback : TIM PWM Msp DeInit Callback. + (+) OnePulse_MspInitCallback : TIM One Pulse Msp Init Callback. + (+) OnePulse_MspDeInitCallback : TIM One Pulse Msp DeInit Callback. + (+) Encoder_MspInitCallback : TIM Encoder Msp Init Callback. + (+) Encoder_MspDeInitCallback : TIM Encoder Msp DeInit Callback. + (+) HallSensor_MspInitCallback : TIM Hall Sensor Msp Init Callback. + (+) HallSensor_MspDeInitCallback : TIM Hall Sensor Msp DeInit Callback. + (+) PeriodElapsedCallback : TIM Period Elapsed Callback. + (+) PeriodElapsedHalfCpltCallback : TIM Period Elapsed half complete Callback. + (+) TriggerCallback : TIM Trigger Callback. + (+) TriggerHalfCpltCallback : TIM Trigger half complete Callback. + (+) IC_CaptureCallback : TIM Input Capture Callback. + (+) IC_CaptureHalfCpltCallback : TIM Input Capture half complete Callback. + (+) OC_DelayElapsedCallback : TIM Output Compare Delay Elapsed Callback. + (+) PWM_PulseFinishedCallback : TIM PWM Pulse Finished Callback. + (+) PWM_PulseFinishedHalfCpltCallback : TIM PWM Pulse Finished half complete Callback. + (+) ErrorCallback : TIM Error Callback. + (+) CommutationCallback : TIM Commutation Callback. + (+) CommutationHalfCpltCallback : TIM Commutation half complete Callback. + (+) BreakCallback : TIM Break Callback. + (+) Break2Callback : TIM Break2 Callback. + + [..] +By default, after the Init and when the state is HAL_TIM_STATE_RESET +all interrupt callbacks are set to the corresponding weak functions: + examples @ref HAL_TIM_TriggerCallback(), @ref HAL_TIM_ErrorCallback(). + + [..] + Exception done for MspInit and MspDeInit functions that are reset to the legacy weak + functionalities in the Init / DeInit only when these callbacks are null + (not registered beforehand). If not, MspInit or MspDeInit are not null, the Init / DeInit + keep and use the user MspInit / MspDeInit callbacks(registered beforehand) + + [..] + Callbacks can be registered / unregistered in HAL_TIM_STATE_READY state only. + Exception done MspInit / MspDeInit that can be registered / unregistered + in HAL_TIM_STATE_READY or HAL_TIM_STATE_RESET state, + thus registered(user) MspInit / DeInit callbacks can be used during the Init / DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_TIM_RegisterCallback() before calling DeInit or Init function. + + [..] + When The compilation define USE_HAL_TIM_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup TIM TIM + * @brief TIM HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup TIM_Private_Functions + * @{ + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC5_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC6_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource); +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig); +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @defgroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * +@verbatim + ============================================================================== + ##### Time Base functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM base. + (+) De-initialize the TIM base. + (+) Start the Time Base. + (+) Stop the Time Base. + (+) Start the Time Base and enable interrupt. + (+) Stop the Time Base and disable interrupt. + (+) Start the Time Base and enable DMA transfer. + (+) Stop the Time Base and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Time base Unit according to the specified + * parameters in the TIM_HandleTypeDef and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Base_DeInit() before HAL_TIM_Base_Init() + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Base_MspInitCallback == NULL) + { + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Base_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the Time Base configuration */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Base peripheral + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Base_MspDeInitCallback == NULL) + { + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Base_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspDeInit could be implemented in the user file + */ +} + + +/** + * @brief Starts the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Change the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Enable the TIM Update interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + /* Disable the TIM Update interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_UPDATE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)pData, (uint32_t)&htim->Instance->ARR, Length) != HAL_OK) + { + return HAL_ERROR; + } + + /* Enable the TIM Update DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_UPDATE); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * +@verbatim + ============================================================================== + ##### TIM Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Output Compare. + (+) De-initialize the TIM Output Compare. + (+) Start the TIM Output Compare. + (+) Stop the TIM Output Compare. + (+) Start the TIM Output Compare and enable interrupt. + (+) Stop the TIM Output Compare and disable interrupt. + (+) Start the TIM Output Compare and enable DMA transfer. + (+) Stop the TIM Output Compare and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Output Compare according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OC_DeInit() before HAL_TIM_OC_Init() + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OC_MspInitCallback == NULL) + { + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the Output Compare */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OC_MspDeInitCallback == NULL) + { + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + { + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + { + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + break; + } + + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + break; + } + + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * +@verbatim + ============================================================================== + ##### TIM PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM PWM. + (+) De-initialize the TIM PWM. + (+) Start the TIM PWM. + (+) Stop the TIM PWM. + (+) Start the TIM PWM and enable interrupt. + (+) Stop the TIM PWM and disable interrupt. + (+) Start the TIM PWM and enable DMA transfer. + (+) Stop the TIM PWM and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM PWM Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_PWM_DeInit() before HAL_TIM_PWM_Init() + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->PWM_MspInitCallback == NULL) + { + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->PWM_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the PWM */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->PWM_MspDeInitCallback == NULL) + { + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + } + /* DeInit the low level hardware */ + htim->PWM_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the PWM signal generation. + * @param htim TIM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + { + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Output Capture/Compare 3 request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + break; + } + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + break; + } + + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * +@verbatim + ============================================================================== + ##### TIM Input Capture functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Input Capture. + (+) De-initialize the TIM Input Capture. + (+) Start the TIM Input Capture. + (+) Stop the TIM Input Capture. + (+) Start the TIM Input Capture and enable interrupt. + (+) Stop the TIM Input Capture and disable interrupt. + (+) Start the TIM Input Capture and enable DMA transfer. + (+) Stop the TIM Input Capture and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Input Capture Time base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_IC_DeInit() before HAL_TIM_IC_Init() + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->IC_MspInitCallback == NULL) + { + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->IC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the input capture */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->IC_MspDeInitCallback == NULL) + { + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->IC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Input Capture MSP. + * @param htim TIM Input Capture handle + * @retval None + */ +__weak void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Input Capture MSP. + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + break; + } + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->CCR3, (uint32_t)pData, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->CCR4, (uint32_t)pData, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + break; + } + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + break; + } + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * +@verbatim + ============================================================================== + ##### TIM One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM One Pulse. + (+) De-initialize the TIM One Pulse. + (+) Start the TIM One Pulse. + (+) Stop the TIM One Pulse. + (+) Start the TIM One Pulse and enable interrupt. + (+) Stop the TIM One Pulse and disable interrupt. + (+) Start the TIM One Pulse and enable DMA transfer. + (+) Stop the TIM One Pulse and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM One Pulse Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OnePulse_DeInit() before HAL_TIM_OnePulse_Init() + * @param htim TIM One Pulse handle + * @param OnePulseMode Select the One pulse mode. + * This parameter can be one of the following values: + * @arg TIM_OPMODE_SINGLE: Only one pulse will be generated. + * @arg TIM_OPMODE_REPETITIVE: Repetitive pulses will be generated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_OPM_MODE(OnePulseMode)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OnePulse_MspInitCallback == NULL) + { + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OnePulse_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OnePulse_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the One Pulse Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Reset the OPM Bit */ + htim->Instance->CR1 &= ~TIM_CR1_OPM; + + /* Configure the OPM Mode */ + htim->Instance->CR1 |= OnePulseMode; + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM One Pulse + * @param htim TIM One Pulse handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OnePulse_MspDeInitCallback == NULL) + { + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OnePulse_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_OnePulse_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM One Pulse signal generation. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channels to be disable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * +@verbatim + ============================================================================== + ##### TIM Encoder functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Encoder. + (+) De-initialize the TIM Encoder. + (+) Start the TIM Encoder. + (+) Stop the TIM Encoder. + (+) Start the TIM Encoder and enable interrupt. + (+) Stop the TIM Encoder and disable interrupt. + (+) Start the TIM Encoder and enable DMA transfer. + (+) Stop the TIM Encoder and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Encoder Interface and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Encoder_DeInit() before HAL_TIM_Encoder_Init() + * @note Encoder mode and External clock mode 2 are not compatible and must not be selected together + * Ex: A call for @ref HAL_TIM_Encoder_Init will erase the settings of @ref HAL_TIM_ConfigClockSource + * using TIM_CLOCKSOURCE_ETRMODE2 and vice versa + * @param htim TIM Encoder Interface handle + * @param sConfig TIM Encoder Interface configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig) +{ + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_ENCODER_MODE(sConfig->EncoderMode)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC1Selection)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC2Selection)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC2Polarity)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC2Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC2Filter)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Encoder_MspInitCallback == NULL) + { + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Encoder_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_Encoder_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Reset the SMS and ECE bits */ + htim->Instance->SMCR &= ~(TIM_SMCR_SMS | TIM_SMCR_ECE); + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Get the TIMx CCMR1 register value */ + tmpccmr1 = htim->Instance->CCMR1; + + /* Get the TIMx CCER register value */ + tmpccer = htim->Instance->CCER; + + /* Set the encoder Mode */ + tmpsmcr |= sConfig->EncoderMode; + + /* Select the Capture Compare 1 and the Capture Compare 2 as input */ + tmpccmr1 &= ~(TIM_CCMR1_CC1S | TIM_CCMR1_CC2S); + tmpccmr1 |= (sConfig->IC1Selection | (sConfig->IC2Selection << 8U)); + + /* Set the Capture Compare 1 and the Capture Compare 2 prescalers and filters */ + tmpccmr1 &= ~(TIM_CCMR1_IC1PSC | TIM_CCMR1_IC2PSC); + tmpccmr1 &= ~(TIM_CCMR1_IC1F | TIM_CCMR1_IC2F); + tmpccmr1 |= sConfig->IC1Prescaler | (sConfig->IC2Prescaler << 8U); + tmpccmr1 |= (sConfig->IC1Filter << 4U) | (sConfig->IC2Filter << 12U); + + /* Set the TI1 and the TI2 Polarities */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC2P); + tmpccer &= ~(TIM_CCER_CC1NP | TIM_CCER_CC2NP); + tmpccer |= sConfig->IC1Polarity | (sConfig->IC2Polarity << 4U); + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Write to TIMx CCMR1 */ + htim->Instance->CCMR1 = tmpccmr1; + + /* Write to TIMx CCER */ + htim->Instance->CCER = tmpccer; + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + + +/** + * @brief DeInitializes the TIM Encoder interface + * @param htim TIM Encoder Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Encoder_MspDeInitCallback == NULL) + { + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Encoder_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Encoder_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Enable the encoder interface channels */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + } + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Enable the encoder interface channels */ + /* Enable the capture compare Interrupts 1 and/or 2 */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + } + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 and 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @param pData1 The destination Buffer address for IC1. + * @param pData2 The destination Buffer address for IC2. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((((pData1 == NULL) || (pData2 == NULL))) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError; + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + + case TIM_CHANNEL_ALL: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, Length) != HAL_OK) + { + return HAL_ERROR; + } + + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + default: + break; + } + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 and 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ +/** @defgroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief TIM IRQ handler management + * +@verbatim + ============================================================================== + ##### IRQ handler management ##### + ============================================================================== + [..] + This section provides Timer IRQ handler function. + +@endverbatim + * @{ + */ +/** + * @brief This function handles TIM interrupts requests. + * @param htim TIM handle + * @retval None + */ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) +{ + /* Capture compare 1 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC1) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC1) != RESET) + { + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC1); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC1S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + } + /* Capture compare 2 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC2) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC2) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC2); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC2S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 3 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC3) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC3) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC3); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC3S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 4 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC4) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC4) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC4); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC4S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* TIM Update event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_UPDATE) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_UPDATE) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_UPDATE); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Break input event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_BREAK) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_BREAK) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_BREAK); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->BreakCallback(htim); +#else + HAL_TIMEx_BreakCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Break2 input event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_BREAK2) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_BREAK) != RESET) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_BREAK2); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->Break2Callback(htim); +#else + HAL_TIMEx_Break2Callback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Trigger detection event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_TRIGGER) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_TRIGGER) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_TRIGGER); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM commutation event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_COM) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_COM) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_FLAG_COM); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief TIM Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure The Input Output channels for OC, PWM, IC or One Pulse mode. + (+) Configure External Clock source. + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master and the Slave synchronization. + (+) Configure the DMA Burst Mode. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the TIM Output Compare Channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM Output Compare handle + * @param sConfig TIM Output Compare configuration structure + * @param Channel TIM Channels to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, + TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_OC_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 1 in Output Compare */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 2 in Output Compare */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 3 in Output Compare */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 4 in Output Compare */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_5: + { + /* Check the parameters */ + assert_param(IS_TIM_CC5_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 5 in Output Compare */ + TIM_OC5_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_6: + { + /* Check the parameters */ + assert_param(IS_TIM_CC6_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 6 in Output Compare */ + TIM_OC6_SetConfig(htim->Instance, sConfig); + break; + } + + default: + break; + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Input Capture Channels according to the specified + * parameters in the TIM_IC_InitTypeDef. + * @param htim TIM IC handle + * @param sConfig TIM Input Capture configuration structure + * @param Channel TIM Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_IC_POLARITY(sConfig->ICPolarity)); + assert_param(IS_TIM_IC_SELECTION(sConfig->ICSelection)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->ICPrescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->ICFilter)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (Channel == TIM_CHANNEL_1) + { + /* TI1 Configuration */ + TIM_TI1_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->ICPrescaler; + } + else if (Channel == TIM_CHANNEL_2) + { + /* TI2 Configuration */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Set the IC2PSC value */ + htim->Instance->CCMR1 |= (sConfig->ICPrescaler << 8U); + } + else if (Channel == TIM_CHANNEL_3) + { + /* TI3 Configuration */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + TIM_TI3_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC3PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC; + + /* Set the IC3PSC value */ + htim->Instance->CCMR2 |= sConfig->ICPrescaler; + } + else + { + /* TI4 Configuration */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + TIM_TI4_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC4PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC; + + /* Set the IC4PSC value */ + htim->Instance->CCMR2 |= (sConfig->ICPrescaler << 8U); + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM PWM channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM PWM handle + * @param sConfig TIM PWM configuration structure + * @param Channel TIM Channels to be configured + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, + TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_PWM_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + assert_param(IS_TIM_FAST_STATE(sConfig->OCFastMode)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the Channel 1 in PWM mode */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel1 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC1PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the Channel 2 in PWM mode */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel2 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC2PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode << 8U; + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the Channel 3 in PWM mode */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel3 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC3PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the Channel 4 in PWM mode */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel4 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC4PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode << 8U; + break; + } + + case TIM_CHANNEL_5: + { + /* Check the parameters */ + assert_param(IS_TIM_CC5_INSTANCE(htim->Instance)); + + /* Configure the Channel 5 in PWM mode */ + TIM_OC5_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel5*/ + htim->Instance->CCMR3 |= TIM_CCMR3_OC5PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR3 &= ~TIM_CCMR3_OC5FE; + htim->Instance->CCMR3 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_6: + { + /* Check the parameters */ + assert_param(IS_TIM_CC6_INSTANCE(htim->Instance)); + + /* Configure the Channel 6 in PWM mode */ + TIM_OC6_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel6 */ + htim->Instance->CCMR3 |= TIM_CCMR3_OC6PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR3 &= ~TIM_CCMR3_OC6FE; + htim->Instance->CCMR3 |= sConfig->OCFastMode << 8U; + break; + } + + default: + break; + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM One Pulse Channels according to the specified + * parameters in the TIM_OnePulse_InitTypeDef. + * @param htim TIM One Pulse handle + * @param sConfig TIM One Pulse configuration structure + * @param OutputChannel TIM output channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param InputChannel TIM input Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @note To output a waveform with a minimum delay user can enable the fast + * mode by calling the @ref __HAL_TIM_ENABLE_OCxFAST macro. Then CCx + * output is forced in response to the edge detection on TIx input, + * without taking in account the comparison. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel) +{ + TIM_OC_InitTypeDef temp1; + + /* Check the parameters */ + assert_param(IS_TIM_OPM_CHANNELS(OutputChannel)); + assert_param(IS_TIM_OPM_CHANNELS(InputChannel)); + + if (OutputChannel != InputChannel) + { + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Extract the Output compare configuration from sConfig structure */ + temp1.OCMode = sConfig->OCMode; + temp1.Pulse = sConfig->Pulse; + temp1.OCPolarity = sConfig->OCPolarity; + temp1.OCNPolarity = sConfig->OCNPolarity; + temp1.OCIdleState = sConfig->OCIdleState; + temp1.OCNIdleState = sConfig->OCNIdleState; + + switch (OutputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_OC1_SetConfig(htim->Instance, &temp1); + break; + } + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_OC2_SetConfig(htim->Instance, &temp1); + break; + } + default: + break; + } + + switch (InputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_TI1_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1FP1; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI2FP2; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + default: + break; + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Configure the DMA Burst to transfer Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, uint32_t BurstRequestSrc, + uint32_t *BurstBuffer, uint32_t BurstLength) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)BurstBuffer, (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + default: + break; + } + /* configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM DMA Burst mode + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + break; + } + + if (HAL_OK == status) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA capture/compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + { + return HAL_ERROR; + } + break; + } + default: + break; + } + + /* configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stop the DMA burst reading + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + break; + } + + if (HAL_OK == status) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Generate a software event + * @param htim TIM handle + * @param EventSource specifies the event source. + * This parameter can be one of the following values: + * @arg TIM_EVENTSOURCE_UPDATE: Timer update Event source + * @arg TIM_EVENTSOURCE_CC1: Timer Capture Compare 1 Event source + * @arg TIM_EVENTSOURCE_CC2: Timer Capture Compare 2 Event source + * @arg TIM_EVENTSOURCE_CC3: Timer Capture Compare 3 Event source + * @arg TIM_EVENTSOURCE_CC4: Timer Capture Compare 4 Event source + * @arg TIM_EVENTSOURCE_COM: Timer COM event source + * @arg TIM_EVENTSOURCE_TRIGGER: Timer Trigger Event source + * @arg TIM_EVENTSOURCE_BREAK: Timer Break event source + * @arg TIM_EVENTSOURCE_BREAK2: Timer Break2 event source + * @note Basic timers can only generate an update event. + * @note TIM_EVENTSOURCE_COM is relevant only with advanced timer instances. + * @note TIM_EVENTSOURCE_BREAK and TIM_EVENTSOURCE_BREAK2 are relevant + * only for timer instances supporting break input(s). + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_EVENT_SOURCE(EventSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the event sources */ + htim->Instance->EGR = EventSource; + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configures the OCRef clear feature + * @param htim TIM handle + * @param sClearInputConfig pointer to a TIM_ClearInputConfigTypeDef structure that + * contains the OCREF clear feature and parameters for the TIM peripheral. + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 + * @arg TIM_CHANNEL_6: TIM Channel 6 + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, + TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_OCXREF_CLEAR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_CLEARINPUT_SOURCE(sClearInputConfig->ClearInputSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + switch (sClearInputConfig->ClearInputSource) + { + case TIM_CLEARINPUTSOURCE_NONE: + { + /* Clear the OCREF clear selection bit and the the ETR Bits */ + CLEAR_BIT(htim->Instance->SMCR, (TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP)); + break; + } + + case TIM_CLEARINPUTSOURCE_ETR: + { + /* Check the parameters */ + assert_param(IS_TIM_CLEARINPUT_POLARITY(sClearInputConfig->ClearInputPolarity)); + assert_param(IS_TIM_CLEARINPUT_PRESCALER(sClearInputConfig->ClearInputPrescaler)); + assert_param(IS_TIM_CLEARINPUT_FILTER(sClearInputConfig->ClearInputFilter)); + + /* When OCRef clear feature is used with ETR source, ETR prescaler must be off */ + if (sClearInputConfig->ClearInputPrescaler != TIM_CLEARINPUTPRESCALER_DIV1) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + TIM_ETR_SetConfig(htim->Instance, + sClearInputConfig->ClearInputPrescaler, + sClearInputConfig->ClearInputPolarity, + sClearInputConfig->ClearInputFilter); + break; + } + + default: + break; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 1 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + else + { + /* Disable the OCREF clear feature for Channel 1 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + break; + } + case TIM_CHANNEL_2: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 2 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + else + { + /* Disable the OCREF clear feature for Channel 2 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + break; + } + case TIM_CHANNEL_3: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 3 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + else + { + /* Disable the OCREF clear feature for Channel 3 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + break; + } + case TIM_CHANNEL_4: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 4 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + else + { + /* Disable the OCREF clear feature for Channel 4 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + break; + } + case TIM_CHANNEL_5: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 5 */ + SET_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC5CE); + } + else + { + /* Disable the OCREF clear feature for Channel 5 */ + CLEAR_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC5CE); + } + break; + } + case TIM_CHANNEL_6: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 6 */ + SET_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC6CE); + } + else + { + /* Disable the OCREF clear feature for Channel 6 */ + CLEAR_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC6CE); + } + break; + } + default: + break; + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the clock source to be used + * @param htim TIM handle + * @param sClockSourceConfig pointer to a TIM_ClockConfigTypeDef structure that + * contains the clock source information for the TIM peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig) +{ + uint32_t tmpsmcr; + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE(sClockSourceConfig->ClockSource)); + + /* Reset the SMS, TS, ECE, ETPS and ETRF bits */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr &= ~(TIM_SMCR_SMS | TIM_SMCR_TS); + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + htim->Instance->SMCR = tmpsmcr; + + switch (sClockSourceConfig->ClockSource) + { + case TIM_CLOCKSOURCE_INTERNAL: + { + assert_param(IS_TIM_INSTANCE(htim->Instance)); + break; + } + + case TIM_CLOCKSOURCE_ETRMODE1: + { + /* Check whether or not the timer instance supports external trigger input mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + + /* Select the External clock mode1 and the ETRF trigger */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr |= (TIM_SLAVEMODE_EXTERNAL1 | TIM_CLOCKSOURCE_ETRMODE1); + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + break; + } + + case TIM_CLOCKSOURCE_ETRMODE2: + { + /* Check whether or not the timer instance supports external trigger input mode 2 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + /* Enable the External clock mode2 */ + htim->Instance->SMCR |= TIM_SMCR_ECE; + break; + } + + case TIM_CLOCKSOURCE_TI1: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1); + break; + } + + case TIM_CLOCKSOURCE_TI2: + { + /* Check whether or not the timer instance supports external clock mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI2 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI2_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI2); + break; + } + + case TIM_CLOCKSOURCE_TI1ED: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1ED); + break; + } + + case TIM_CLOCKSOURCE_ITR0: + case TIM_CLOCKSOURCE_ITR1: + case TIM_CLOCKSOURCE_ITR2: + case TIM_CLOCKSOURCE_ITR3: + { + /* Check whether or not the timer instance supports internal trigger input */ + assert_param(IS_TIM_CLOCKSOURCE_ITRX_INSTANCE(htim->Instance)); + + TIM_ITRx_SetConfig(htim->Instance, sClockSourceConfig->ClockSource); + break; + } + + default: + break; + } + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Selects the signal connected to the TI1 input: direct from CH1_input + * or a XOR combination between CH1_input, CH2_input & CH3_input + * @param htim TIM handle. + * @param TI1_Selection Indicate whether or not channel 1 is connected to the + * output of a XOR gate. + * This parameter can be one of the following values: + * @arg TIM_TI1SELECTION_CH1: The TIMx_CH1 pin is connected to TI1 input + * @arg TIM_TI1SELECTION_XORCOMBINATION: The TIMx_CH1, CH2 and CH3 + * pins are connected to the TI1 input (XOR combination) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection) +{ + uint32_t tmpcr2; + + /* Check the parameters */ + assert_param(IS_TIM_XOR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TI1SELECTION(TI1_Selection)); + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Reset the TI1 selection */ + tmpcr2 &= ~TIM_CR2_TI1S; + + /* Set the TI1 selection */ + tmpcr2 |= TI1_Selection; + + /* Write to TIMxCR2 */ + htim->Instance->CR2 = tmpcr2; + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Disable Trigger Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode in interrupt mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Enable Trigger Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Read the captured value from Capture Compare unit + * @param htim TIM handle. + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval Captured value + */ +uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpreg = 0U; + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Return the capture 1 value */ + tmpreg = htim->Instance->CCR1; + + break; + } + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Return the capture 2 value */ + tmpreg = htim->Instance->CCR2; + + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Return the capture 3 value */ + tmpreg = htim->Instance->CCR3; + + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Return the capture 4 value */ + tmpreg = htim->Instance->CCR4; + + break; + } + + default: + break; + } + + return tmpreg; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * +@verbatim + ============================================================================== + ##### TIM Callbacks functions ##### + ============================================================================== + [..] + This section provides TIM callback functions: + (+) TIM Period elapsed callback + (+) TIM Output Compare callback + (+) TIM Input capture callback + (+) TIM Trigger callback + (+) TIM Error callback + +@endverbatim + * @{ + */ + +/** + * @brief Period elapsed callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Period elapsed half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Output Compare callback in non-blocking mode + * @param htim TIM OC handle + * @retval None + */ +__weak void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_DelayElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture half complete callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Timer error callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_ErrorCallback could be implemented in the user file + */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User TIM callback to be used instead of the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @arg @ref HAL_TIM_BREAK2_CB_ID Break2 Callback ID + * @param pCallback pointer to the callback function + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(htim); + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + htim->PeriodElapsedCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + htim->PeriodElapsedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_CB_ID : + htim->TriggerCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + htim->TriggerHalfCpltCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + htim->IC_CaptureCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + htim->IC_CaptureHalfCpltCallback = pCallback; + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + htim->OC_DelayElapsedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + htim->PWM_PulseFinishedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + htim->PWM_PulseFinishedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_ERROR_CB_ID : + htim->ErrorCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_CB_ID : + htim->CommutationCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + htim->CommutationHalfCpltCallback = pCallback; + break; + + case HAL_TIM_BREAK_CB_ID : + htim->BreakCallback = pCallback; + break; + + case HAL_TIM_BREAK2_CB_ID : + htim->Break2Callback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Unregister a TIM callback + * TIM callback is redirected to the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @arg @ref HAL_TIM_BREAK2_CB_ID Break2 Callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(htim); + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; /* Legacy weak Base MspInit Callback */ + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; /* Legacy weak Base Msp DeInit Callback */ + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; /* Legacy weak IC Msp Init Callback */ + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; /* Legacy weak IC Msp DeInit Callback */ + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; /* Legacy weak OC Msp Init Callback */ + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; /* Legacy weak OC Msp DeInit Callback */ + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; /* Legacy weak PWM Msp Init Callback */ + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; /* Legacy weak PWM Msp DeInit Callback */ + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; /* Legacy weak One Pulse Msp Init Callback */ + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; /* Legacy weak One Pulse Msp DeInit Callback */ + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; /* Legacy weak Encoder Msp Init Callback */ + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; /* Legacy weak Encoder Msp DeInit Callback */ + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; /* Legacy weak Hall Sensor Msp Init Callback */ + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; /* Legacy weak Hall Sensor Msp DeInit Callback */ + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; /* Legacy weak Period Elapsed Callback */ + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; /* Legacy weak Period Elapsed half complete Callback */ + break; + + case HAL_TIM_TRIGGER_CB_ID : + htim->TriggerCallback = HAL_TIM_TriggerCallback; /* Legacy weak Trigger Callback */ + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; /* Legacy weak Trigger half complete Callback */ + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; /* Legacy weak IC Capture Callback */ + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; /* Legacy weak IC Capture half complete Callback */ + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; /* Legacy weak OC Delay Elapsed Callback */ + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; /* Legacy weak PWM Pulse Finished Callback */ + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; /* Legacy weak PWM Pulse Finished half complete Callback */ + break; + + case HAL_TIM_ERROR_CB_ID : + htim->ErrorCallback = HAL_TIM_ErrorCallback; /* Legacy weak Error Callback */ + break; + + case HAL_TIM_COMMUTATION_CB_ID : + htim->CommutationCallback = HAL_TIMEx_CommutCallback; /* Legacy weak Commutation Callback */ + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; /* Legacy weak Commutation half complete Callback */ + break; + + case HAL_TIM_BREAK_CB_ID : + htim->BreakCallback = HAL_TIMEx_BreakCallback; /* Legacy weak Break Callback */ + break; + + case HAL_TIM_BREAK2_CB_ID : + htim->Break2Callback = HAL_TIMEx_Break2Callback; /* Legacy weak Break2 Callback */ + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; /* Legacy weak Base MspInit Callback */ + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; /* Legacy weak Base Msp DeInit Callback */ + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; /* Legacy weak IC Msp Init Callback */ + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; /* Legacy weak IC Msp DeInit Callback */ + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; /* Legacy weak OC Msp Init Callback */ + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; /* Legacy weak OC Msp DeInit Callback */ + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; /* Legacy weak PWM Msp Init Callback */ + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; /* Legacy weak PWM Msp DeInit Callback */ + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; /* Legacy weak One Pulse Msp Init Callback */ + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; /* Legacy weak One Pulse Msp DeInit Callback */ + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; /* Legacy weak Encoder Msp Init Callback */ + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; /* Legacy weak Encoder Msp DeInit Callback */ + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; /* Legacy weak Hall Sensor Msp Init Callback */ + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; /* Legacy weak Hall Sensor Msp DeInit Callback */ + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return status; +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief TIM Peripheral State functions + * +@verbatim + ============================================================================== + ##### Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Base handle state. + * @param htim TIM Base handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM OC handle state. + * @param htim TIM Output Compare handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM PWM handle state. + * @param htim TIM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Input Capture handle state. + * @param htim TIM IC handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM One Pulse Mode handle state. + * @param htim TIM OPM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM Encoder Interface handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ + +/** + * @brief TIM DMA error callback + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMAError(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Delay Pulse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Delay Pulse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedHalfCpltCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureHalfCpltCallback(htim); +#else + HAL_TIM_IC_CaptureHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Period Elapse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Period Elapse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedHalfCpltCallback(htim); +#else + HAL_TIM_PeriodElapsedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerHalfCpltCallback(htim); +#else + HAL_TIM_TriggerHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief Time Base configuration + * @param TIMx TIM peripheral + * @param Structure TIM Base configuration structure + * @retval None + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure) +{ + uint32_t tmpcr1; + tmpcr1 = TIMx->CR1; + + /* Set TIM Time Base Unit parameters ---------------------------------------*/ + if (IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx)) + { + /* Select the Counter Mode */ + tmpcr1 &= ~(TIM_CR1_DIR | TIM_CR1_CMS); + tmpcr1 |= Structure->CounterMode; + } + + if (IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx)) + { + /* Set the clock division */ + tmpcr1 &= ~TIM_CR1_CKD; + tmpcr1 |= (uint32_t)Structure->ClockDivision; + } + + /* Set the auto-reload preload */ + MODIFY_REG(tmpcr1, TIM_CR1_ARPE, Structure->AutoReloadPreload); + + TIMx->CR1 = tmpcr1; + + /* Set the Autoreload value */ + TIMx->ARR = (uint32_t)Structure->Period ; + + /* Set the Prescaler value */ + TIMx->PSC = Structure->Prescaler; + + if (IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx)) + { + /* Set the Repetition Counter value */ + TIMx->RCR = Structure->RepetitionCounter; + } + + /* Generate an update event to reload the Prescaler + and the repetition counter (only for advanced timer) value immediately */ + TIMx->EGR = TIM_EGR_UG; +} + +/** + * @brief Timer Output Compare 1 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 1: Reset the CC1E Bit */ + TIMx->CCER &= ~TIM_CCER_CC1E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~TIM_CCMR1_OC1M; + tmpccmrx &= ~TIM_CCMR1_CC1S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC1P; + /* Set the Output Compare Polarity */ + tmpccer |= OC_Config->OCPolarity; + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_1)) + { + /* Check parameters */ + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC1NP; + /* Set the Output N Polarity */ + tmpccer |= OC_Config->OCNPolarity; + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC1NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS1; + tmpcr2 &= ~TIM_CR2_OIS1N; + /* Set the Output Idle state */ + tmpcr2 |= OC_Config->OCIdleState; + /* Set the Output N Idle state */ + tmpcr2 |= OC_Config->OCNIdleState; + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR1 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 2 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR1_OC2M; + tmpccmrx &= ~TIM_CCMR1_CC2S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC2P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 4U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_2)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC2NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 4U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC2NE; + + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS2; + tmpcr2 &= ~TIM_CR2_OIS2N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 2U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 2U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR2 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 3 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 3: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC3E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC3M; + tmpccmrx &= ~TIM_CCMR2_CC3S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC3P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 8U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_3)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC3NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 8U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC3NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS3; + tmpcr2 &= ~TIM_CR2_OIS3N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 4U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 4U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR3 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 4 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 4: Reset the CC4E Bit */ + TIMx->CCER &= ~TIM_CCER_CC4E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC4M; + tmpccmrx &= ~TIM_CCMR2_CC4S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC4P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 12U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS4; + + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 6U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR4 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 5 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +static void TIM_OC5_SetConfig(TIM_TypeDef *TIMx, + TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the output: Reset the CCxE Bit */ + TIMx->CCER &= ~TIM_CCER_CC5E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR3; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~(TIM_CCMR3_OC5M); + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC5P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 16U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS5; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 8U); + } + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR3 */ + TIMx->CCMR3 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR5 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 6 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The ouput configuration structure + * @retval None + */ +static void TIM_OC6_SetConfig(TIM_TypeDef *TIMx, + TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the output: Reset the CCxE Bit */ + TIMx->CCER &= ~TIM_CCER_CC6E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR3; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~(TIM_CCMR3_OC6M); + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= (uint32_t)~TIM_CCER_CC6P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 20U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS6; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 10U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR3 */ + TIMx->CCMR3 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR6 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Slave Timer configuration function + * @param htim TIM handle + * @param sSlaveConfig Slave timer configuration + * @retval None + */ +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Reset the Trigger Selection Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source */ + tmpsmcr |= sSlaveConfig->InputTrigger; + + /* Reset the slave mode Bits */ + tmpsmcr &= ~TIM_SMCR_SMS; + /* Set the slave mode */ + tmpsmcr |= sSlaveConfig->SlaveMode; + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Configure the trigger prescaler, filter, and polarity */ + switch (sSlaveConfig->InputTrigger) + { + case TIM_TS_ETRF: + { + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPRESCALER(sSlaveConfig->TriggerPrescaler)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + /* Configure the ETR Trigger source */ + TIM_ETR_SetConfig(htim->Instance, + sSlaveConfig->TriggerPrescaler, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI1F_ED: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + if(sSlaveConfig->SlaveMode == TIM_SLAVEMODE_GATED) + { + return HAL_ERROR; + } + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = htim->Instance->CCER; + htim->Instance->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = htim->Instance->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((sSlaveConfig->TriggerFilter) << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + htim->Instance->CCMR1 = tmpccmr1; + htim->Instance->CCER = tmpccer; + break; + } + + case TIM_TS_TI1FP1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI1 Filter and Polarity */ + TIM_TI1_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI2FP2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI2 Filter and Polarity */ + TIM_TI2_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_ITR0: + case TIM_TS_ITR1: + case TIM_TS_ITR2: + case TIM_TS_ITR3: + { + /* Check the parameter */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + break; + } + + default: + break; + } + return HAL_OK; +} + +/** + * @brief Configure the TI1 as Input. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 1 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 1 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_TRC: TIM Input 1 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI2FP1 + * (on channel2 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Select the Input */ + if (IS_TIM_CC2_INSTANCE(TIMx) != RESET) + { + tmpccmr1 &= ~TIM_CCMR1_CC1S; + tmpccmr1 |= TIM_ICSelection; + } + else + { + tmpccmr1 |= TIM_CCMR1_CC1S_0; + } + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((TIM_ICFilter << 4U) & TIM_CCMR1_IC1F); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= (TIM_ICPolarity & (TIM_CCER_CC1P | TIM_CCER_CC1NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI1. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= (TIM_ICFilter << 4U); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= TIM_ICPolarity; + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI2 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 2 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 2 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_TRC: TIM Input 2 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI1FP2 + * (on channel1 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr1 &= ~TIM_CCMR1_CC2S; + tmpccmr1 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= ((TIM_ICFilter << 12U) & TIM_CCMR1_IC2F); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= ((TIM_ICPolarity << 4U) & (TIM_CCER_CC2P | TIM_CCER_CC2NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI2. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= (TIM_ICFilter << 12U); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= (TIM_ICPolarity << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI3 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 3 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 3 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_TRC: TIM Input 3 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI3FP4 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 3: Reset the CC3E Bit */ + TIMx->CCER &= ~TIM_CCER_CC3E; + tmpccmr2 = TIMx->CCMR2; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC3S; + tmpccmr2 |= TIM_ICSelection; + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC3F; + tmpccmr2 |= ((TIM_ICFilter << 4U) & TIM_CCMR2_IC3F); + + /* Select the Polarity and set the CC3E Bit */ + tmpccer &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP); + tmpccer |= ((TIM_ICPolarity << 8U) & (TIM_CCER_CC3P | TIM_CCER_CC3NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI4 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 4 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 4 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_TRC: TIM Input 4 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI4FP3 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + * @retval None + */ +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 4: Reset the CC4E Bit */ + TIMx->CCER &= ~TIM_CCER_CC4E; + tmpccmr2 = TIMx->CCMR2; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC4S; + tmpccmr2 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC4F; + tmpccmr2 |= ((TIM_ICFilter << 12U) & TIM_CCMR2_IC4F); + + /* Select the Polarity and set the CC4E Bit */ + tmpccer &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP); + tmpccer |= ((TIM_ICPolarity << 12U) & (TIM_CCER_CC4P | TIM_CCER_CC4NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer ; +} + +/** + * @brief Selects the Input Trigger source + * @param TIMx to select the TIM peripheral + * @param InputTriggerSource The Input Trigger source. + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal Trigger 0 + * @arg TIM_TS_ITR1: Internal Trigger 1 + * @arg TIM_TS_ITR2: Internal Trigger 2 + * @arg TIM_TS_ITR3: Internal Trigger 3 + * @arg TIM_TS_TI1F_ED: TI1 Edge Detector + * @arg TIM_TS_TI1FP1: Filtered Timer Input 1 + * @arg TIM_TS_TI2FP2: Filtered Timer Input 2 + * @arg TIM_TS_ETRF: External Trigger input + * @retval None + */ +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource) +{ + uint32_t tmpsmcr; + + /* Get the TIMx SMCR register value */ + tmpsmcr = TIMx->SMCR; + /* Reset the TS Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source and the slave mode*/ + tmpsmcr |= (InputTriggerSource | TIM_SLAVEMODE_EXTERNAL1); + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} +/** + * @brief Configures the TIMx External Trigger (ETR). + * @param TIMx to select the TIM peripheral + * @param TIM_ExtTRGPrescaler The external Trigger Prescaler. + * This parameter can be one of the following values: + * @arg TIM_ETRPRESCALER_DIV1: ETRP Prescaler OFF. + * @arg TIM_ETRPRESCALER_DIV2: ETRP frequency divided by 2. + * @arg TIM_ETRPRESCALER_DIV4: ETRP frequency divided by 4. + * @arg TIM_ETRPRESCALER_DIV8: ETRP frequency divided by 8. + * @param TIM_ExtTRGPolarity The external Trigger Polarity. + * This parameter can be one of the following values: + * @arg TIM_ETRPOLARITY_INVERTED: active low or falling edge active. + * @arg TIM_ETRPOLARITY_NONINVERTED: active high or rising edge active. + * @param ExtTRGFilter External Trigger Filter. + * This parameter must be a value between 0x00 and 0x0F + * @retval None + */ +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter) +{ + uint32_t tmpsmcr; + + tmpsmcr = TIMx->SMCR; + + /* Reset the ETR Bits */ + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + + /* Set the Prescaler, the Filter value and the Polarity */ + tmpsmcr |= (uint32_t)(TIM_ExtTRGPrescaler | (TIM_ExtTRGPolarity | (ExtTRGFilter << 8U))); + + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} + +/** + * @brief Enables or disables the TIM Capture Compare Channel x. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @param ChannelState specifies the TIM Channel CCxE bit new state. + * This parameter can be: TIM_CCx_ENABLE or TIM_CCx_DISABLE. + * @retval None + */ +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState) +{ + uint32_t tmp; + + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(TIMx)); + assert_param(IS_TIM_CHANNELS(Channel)); + + tmp = TIM_CCER_CC1E << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + + /* Reset the CCxE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxE Bit */ + TIMx->CCER |= (uint32_t)(ChannelState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Reset interrupt callbacks to the legacy weak callbacks. + * @param htim pointer to a TIM_HandleTypeDef structure that contains + * the configuration information for TIM module. + * @retval None + */ +void TIM_ResetCallback(TIM_HandleTypeDef *htim) +{ + /* Reset the TIM callback to the legacy weak callbacks */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; /* Legacy weak PeriodElapsedCallback */ + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; /* Legacy weak PeriodElapsedHalfCpltCallback */ + htim->TriggerCallback = HAL_TIM_TriggerCallback; /* Legacy weak TriggerCallback */ + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; /* Legacy weak TriggerHalfCpltCallback */ + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; /* Legacy weak IC_CaptureCallback */ + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; /* Legacy weak IC_CaptureHalfCpltCallback */ + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; /* Legacy weak OC_DelayElapsedCallback */ + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; /* Legacy weak PWM_PulseFinishedCallback */ + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; /* Legacy weak PWM_PulseFinishedHalfCpltCallback */ + htim->ErrorCallback = HAL_TIM_ErrorCallback; /* Legacy weak ErrorCallback */ + htim->CommutationCallback = HAL_TIMEx_CommutCallback; /* Legacy weak CommutationCallback */ + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; /* Legacy weak CommutationHalfCpltCallback */ + htim->BreakCallback = HAL_TIMEx_BreakCallback; /* Legacy weak BreakCallback */ + htim->Break2Callback = HAL_TIMEx_Break2Callback; /* Legacy weak Break2Callback */ +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c new file mode 100644 index 000000000..7dd91df44 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c @@ -0,0 +1,2154 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim_ex.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer Extended peripheral: + * + Time Hall Sensor Interface Initialization + * + Time Hall Sensor Interface Start + * + Time Complementary signal break and dead time configuration + * + Time Master and Slave synchronization configuration + * + Time Output Compare/PWM Channel Configuration (for channels 5 and 6) + * + Timer remapping capabilities configuration + @verbatim + ============================================================================== + ##### TIMER Extended features ##### + ============================================================================== + [..] + The Timer Extended features include: + (#) Complementary outputs with programmable dead-time for : + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to + interconnect several timers together. + (#) Break input to put the timer output signals in reset state or in a known state. + (#) Supports incremental (quadrature) encoder and hall-sensor circuitry for + positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Hall Sensor output : HAL_TIMEx_HallSensor_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + initialization function of this driver: + (++) HAL_TIMEx_HallSensor_Init() and HAL_TIMEx_ConfigCommutEvent(): to use the + Timer Hall Sensor Interface and the commutation event with the corresponding + Interrupt and DMA request if needed (Note that One Timer is used to interface + with the Hall sensor Interface and another Timer should be used to use + the commutation event). + + (#) Activate the TIM peripheral using one of the start functions: + (++) Complementary Output Compare : HAL_TIMEx_OCN_Start(), HAL_TIMEx_OCN_Start_DMA(), HAL_TIMEx_OC_Start_IT() + (++) Complementary PWM generation : HAL_TIMEx_PWMN_Start(), HAL_TIMEx_PWMN_Start_DMA(), HAL_TIMEx_PWMN_Start_IT() + (++) Complementary One-pulse mode output : HAL_TIMEx_OnePulseN_Start(), HAL_TIMEx_OnePulseN_Start_IT() + (++) Hall Sensor output : HAL_TIMEx_HallSensor_Start(), HAL_TIMEx_HallSensor_Start_DMA(), HAL_TIMEx_HallSensor_Start_IT(). + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup TIMEx TIMEx + * @brief TIM Extended HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState); + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @defgroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * +@verbatim + ============================================================================== + ##### Timer Hall Sensor functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure TIM HAL Sensor. + (+) De-initialize TIM HAL Sensor. + (+) Start the Hall Sensor Interface. + (+) Stop the Hall Sensor Interface. + (+) Start the Hall Sensor Interface and enable interrupts. + (+) Stop the Hall Sensor Interface and disable interrupts. + (+) Start the Hall Sensor Interface and enable DMA transfers. + (+) Stop the Hall Sensor Interface and disable DMA transfers. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Hall Sensor Interface and initialize the associated handle. + * @param htim TIM Hall Sensor Interface handle + * @param sConfig TIM Hall Sensor configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig) +{ + TIM_OC_InitTypeDef OC_Config; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_IC_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy week callbacks */ + TIM_ResetCallback(htim); + + if (htim->HallSensor_MspInitCallback == NULL) + { + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->HallSensor_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIMEx_HallSensor_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Configure the Channel 1 as Input Channel to interface with the three Outputs of the Hall sensor */ + TIM_TI1_SetConfig(htim->Instance, sConfig->IC1Polarity, TIM_ICSELECTION_TRC, sConfig->IC1Filter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->IC1Prescaler; + + /* Enable the Hall sensor interface (XOR function of the three inputs) */ + htim->Instance->CR2 |= TIM_CR2_TI1S; + + /* Select the TIM_TS_TI1F_ED signal as Input trigger for the TIM */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1F_ED; + + /* Use the TIM_TS_TI1F_ED signal to reset the TIM counter each edge detection */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_RESET; + + /* Program channel 2 in PWM 2 mode with the desired Commutation_Delay*/ + OC_Config.OCFastMode = TIM_OCFAST_DISABLE; + OC_Config.OCIdleState = TIM_OCIDLESTATE_RESET; + OC_Config.OCMode = TIM_OCMODE_PWM2; + OC_Config.OCNIdleState = TIM_OCNIDLESTATE_RESET; + OC_Config.OCNPolarity = TIM_OCNPOLARITY_HIGH; + OC_Config.OCPolarity = TIM_OCPOLARITY_HIGH; + OC_Config.Pulse = sConfig->Commutation_Delay; + + TIM_OC2_SetConfig(htim->Instance, &OC_Config); + + /* Select OC2REF as trigger output on TRGO: write the MMS bits in the TIMx_CR2 + register to 101 */ + htim->Instance->CR2 &= ~TIM_CR2_MMS; + htim->Instance->CR2 |= TIM_TRGO_OC2REF; + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Hall Sensor interface + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->HallSensor_MspDeInitCallback == NULL) + { + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + } + /* DeInit the low level hardware */ + htim->HallSensor_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIMEx_HallSensor_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Hall Sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1, 2 and 3 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Enable the capture compare Interrupts 1 event */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts event */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if (((uint32_t)pData == 0U) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Set the DMA Input Capture 1 Callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream for Capture 1*/ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the capture compare 1 Interrupt */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + + /* Disable the capture compare Interrupts 1 event */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * +@verbatim + ============================================================================== + ##### Timer Complementary Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary Output Compare/PWM. + (+) Stop the Complementary Output Compare/PWM. + (+) Start the Complementary Output Compare/PWM and enable interrupts. + (+) Stop the Complementary Output Compare/PWM and disable interrupts. + (+) Start the Complementary Output Compare/PWM and enable DMA transfers. + (+) Stop the Complementary Output Compare/PWM and disable DMA transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM OC handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + + default: + break; + } + + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpccer; + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + break; + } + + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if (((uint32_t)pData == 0U) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + break; + } + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + break; + } + + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * +@verbatim + ============================================================================== + ##### Timer Complementary PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary PWM. + (+) Stop the Complementary PWM. + (+) Start the Complementary PWM and enable interrupts. + (+) Stop the Complementary PWM and disable interrupts. + (+) Start the Complementary PWM and enable DMA transfers. + (+) Stop the Complementary PWM and disable DMA transfers. + (+) Start the Complementary Input Capture measurement. + (+) Stop the Complementary Input Capture. + (+) Start the Complementary Input Capture and enable interrupts. + (+) Stop the Complementary Input Capture and disable interrupts. + (+) Start the Complementary Input Capture and enable DMA transfers. + (+) Stop the Complementary Input Capture and disable DMA transfers. + (+) Start the Complementary One Pulse generation. + (+) Stop the Complementary One Pulse. + (+) Start the Complementary One Pulse and enable interrupts. + (+) Stop the Complementary One Pulse and disable interrupts. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + break; + } + + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpccer; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + break; + } + + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode on the + * complementary output + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if (((uint32_t)pData == 0U) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + { + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + break; + } + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode on the complementary + * output + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + break; + } + + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * +@verbatim + ============================================================================== + ##### Timer Complementary One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary One Pulse generation. + (+) Stop the Complementary One Pulse. + (+) Start the Complementary One Pulse and enable interrupts. + (+) Stop the Complementary One Pulse and disable interrupts. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM One Pulse signal generation on the complementary + * output. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Enable the complementary One Pulse output */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation on the complementary + * output. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the complementary One Pulse output */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + /* Enable the complementary One Pulse output */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @param htim TIM One Pulse handle + * @param OutputChannel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the complementary One Pulse output */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure the commutation event in case of use of the Hall sensor interface. + (+) Configure Output channels for OC and PWM mode. + + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master synchronization. + (+) Configure timer remapping capabilities. + (+) Enable or disable channel grouping. + +@endverbatim + * @{ + */ + +/** + * @brief Configure the TIM commutation event sequence. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with interrupt. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + /* Enable the Commutation Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with DMA. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @note The user should configure the DMA in his own software, in This function only the COMDE bit is set + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Enable the Commutation DMA Request */ + /* Set the DMA Commutation Callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Enable the Commutation DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in master mode. + * @param htim TIM handle. + * @param sMasterConfig pointer to a TIM_MasterConfigTypeDef structure that + * contains the selected trigger output (TRGO) and the Master/Slave + * mode. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + TIM_MasterConfigTypeDef *sMasterConfig) +{ + uint32_t tmpcr2; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_MASTER_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRGO_SOURCE(sMasterConfig->MasterOutputTrigger)); + assert_param(IS_TIM_MSM_STATE(sMasterConfig->MasterSlaveMode)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Change the handler state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* If the timer supports ADC synchronization through TRGO2, set the master mode selection 2 */ + if (IS_TIM_TRGO2_INSTANCE(htim->Instance)) + { + /* Check the parameters */ + assert_param(IS_TIM_TRGO2_SOURCE(sMasterConfig->MasterOutputTrigger2)); + + /* Clear the MMS2 bits */ + tmpcr2 &= ~TIM_CR2_MMS2; + /* Select the TRGO2 source*/ + tmpcr2 |= sMasterConfig->MasterOutputTrigger2; + } + + /* Reset the MMS Bits */ + tmpcr2 &= ~TIM_CR2_MMS; + /* Select the TRGO source */ + tmpcr2 |= sMasterConfig->MasterOutputTrigger; + + /* Update TIMx CR2 */ + htim->Instance->CR2 = tmpcr2; + + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + /* Reset the MSM Bit */ + tmpsmcr &= ~TIM_SMCR_MSM; + /* Set master mode */ + tmpsmcr |= sMasterConfig->MasterSlaveMode; + + /* Update TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + } + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the Break feature, dead time, Lock level, OSSI/OSSR State + * and the AOE(automatic output enable). + * @param htim TIM handle + * @param sBreakDeadTimeConfig pointer to a TIM_ConfigBreakDeadConfigTypeDef structure that + * contains the BDTR Register configuration information for the TIM peripheral. + * @note Interrupts can be generated when an active level is detected on the + * break input, the break 2 input or the system break input. Break + * interrupt can be enabled by calling the @ref __HAL_TIM_ENABLE_IT macro. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig) +{ + /* Keep this variable initialized to 0 as it is used to configure BDTR register */ + uint32_t tmpbdtr = 0U; + + /* Check the parameters */ + assert_param(IS_TIM_BREAK_INSTANCE(htim->Instance)); + assert_param(IS_TIM_OSSR_STATE(sBreakDeadTimeConfig->OffStateRunMode)); + assert_param(IS_TIM_OSSI_STATE(sBreakDeadTimeConfig->OffStateIDLEMode)); + assert_param(IS_TIM_LOCK_LEVEL(sBreakDeadTimeConfig->LockLevel)); + assert_param(IS_TIM_DEADTIME(sBreakDeadTimeConfig->DeadTime)); + assert_param(IS_TIM_BREAK_STATE(sBreakDeadTimeConfig->BreakState)); + assert_param(IS_TIM_BREAK_POLARITY(sBreakDeadTimeConfig->BreakPolarity)); + assert_param(IS_TIM_BREAK_FILTER(sBreakDeadTimeConfig->BreakFilter)); + assert_param(IS_TIM_AUTOMATIC_OUTPUT_STATE(sBreakDeadTimeConfig->AutomaticOutput)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Set the Lock level, the Break enable Bit and the Polarity, the OSSR State, + the OSSI State, the dead time value and the Automatic Output Enable Bit */ + + /* Set the BDTR bits */ + MODIFY_REG(tmpbdtr, TIM_BDTR_DTG, sBreakDeadTimeConfig->DeadTime); + MODIFY_REG(tmpbdtr, TIM_BDTR_LOCK, sBreakDeadTimeConfig->LockLevel); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSI, sBreakDeadTimeConfig->OffStateIDLEMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSR, sBreakDeadTimeConfig->OffStateRunMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKE, sBreakDeadTimeConfig->BreakState); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKP, sBreakDeadTimeConfig->BreakPolarity); + MODIFY_REG(tmpbdtr, TIM_BDTR_AOE, sBreakDeadTimeConfig->AutomaticOutput); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKF, (sBreakDeadTimeConfig->BreakFilter << TIM_BDTR_BKF_Pos)); + + if (IS_TIM_BKIN2_INSTANCE(htim->Instance)) + { + /* Check the parameters */ + assert_param(IS_TIM_BREAK2_STATE(sBreakDeadTimeConfig->Break2State)); + assert_param(IS_TIM_BREAK2_POLARITY(sBreakDeadTimeConfig->Break2Polarity)); + assert_param(IS_TIM_BREAK_FILTER(sBreakDeadTimeConfig->Break2Filter)); + + /* Set the BREAK2 input related BDTR bits */ + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2F, (sBreakDeadTimeConfig->Break2Filter << TIM_BDTR_BK2F_Pos)); + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2E, sBreakDeadTimeConfig->Break2State); + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2P, sBreakDeadTimeConfig->Break2Polarity); + } + + /* Set TIMx_BDTR */ + htim->Instance->BDTR = tmpbdtr; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** + * @brief Configures the break input source. + * @param htim TIM handle. + * @param BreakInput Break input to configure + * This parameter can be one of the following values: + * @arg TIM_BREAKINPUT_BRK: Timer break input + * @arg TIM_BREAKINPUT_BRK2: Timer break 2 input + * @param sBreakInputConfig Break input source configuration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakInput(TIM_HandleTypeDef *htim, + uint32_t BreakInput, + TIMEx_BreakInputConfigTypeDef *sBreakInputConfig) + +{ + uint32_t tmporx; + uint32_t bkin_enable_mask; + uint32_t bkin_polarity_mask; + uint32_t bkin_enable_bitpos; + uint32_t bkin_polarity_bitpos; + + /* Check the parameters */ + assert_param(IS_TIM_BREAK_INSTANCE(htim->Instance)); + assert_param(IS_TIM_BREAKINPUT(BreakInput)); + assert_param(IS_TIM_BREAKINPUTSOURCE(sBreakInputConfig->Source)); + assert_param(IS_TIM_BREAKINPUTSOURCE_STATE(sBreakInputConfig->Enable)); +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) + { + assert_param(IS_TIM_BREAKINPUTSOURCE_POLARITY(sBreakInputConfig->Polarity)); + } +#else + assert_param(IS_TIM_BREAKINPUTSOURCE_POLARITY(sBreakInputConfig->Polarity)); +#endif /* DFSDM1_Channel0 */ + + /* Check input state */ + __HAL_LOCK(htim); + + switch (sBreakInputConfig->Source) + { + case TIM_BREAKINPUTSOURCE_BKIN: + { + bkin_enable_mask = TIM1_AF1_BKINE; + bkin_enable_bitpos = 0; + bkin_polarity_mask = TIM1_AF1_BKINP; + bkin_polarity_bitpos = 9; + break; + } + + case TIM_BREAKINPUTSOURCE_DFSDM1: + { + bkin_enable_mask = TIM1_AF1_BKDF1BKE; + bkin_enable_bitpos = 8; + bkin_polarity_mask = 0U; + bkin_polarity_bitpos = 0U; + break; + } + + default: + { + bkin_enable_mask = 0U; + bkin_polarity_mask = 0U; + bkin_enable_bitpos = 0U; + bkin_polarity_bitpos = 0U; + break; + } + } + + switch (BreakInput) + { + case TIM_BREAKINPUT_BRK: + { + /* Get the TIMx_AF1 register value */ + tmporx = htim->Instance->AF1; + + /* Enable the break input */ + tmporx &= ~bkin_enable_mask; + tmporx |= (sBreakInputConfig->Enable << bkin_enable_bitpos) & bkin_enable_mask; + + /* Set the break input polarity */ +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) +#endif /* DFSDM1_Channel0 */ + { + tmporx &= ~bkin_polarity_mask; + tmporx |= (sBreakInputConfig->Polarity << bkin_polarity_bitpos) & bkin_polarity_mask; + } + + /* Set TIMx_AF1 */ + htim->Instance->AF1 = tmporx; + break; + } + case TIM_BREAKINPUT_BRK2: + { + /* Get the TIMx_AF2 register value */ + tmporx = htim->Instance->AF2; + + /* Enable the break input */ + tmporx &= ~bkin_enable_mask; + tmporx |= (sBreakInputConfig->Enable << bkin_enable_bitpos) & bkin_enable_mask; + + /* Set the break input polarity */ +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) +#endif /* DFSDM1_Channel0 */ + { + tmporx &= ~bkin_polarity_mask; + tmporx |= (sBreakInputConfig->Polarity << bkin_polarity_bitpos) & bkin_polarity_mask; + } + + /* Set TIMx_AF2 */ + htim->Instance->AF2 = tmporx; + break; + } + default: + break; + } + + __HAL_UNLOCK(htim); + + return HAL_OK; +} +#endif /*TIM_BREAK_INPUT_SUPPORT */ + +/** + * @brief Configures the TIMx Remapping input capabilities. + * @param htim TIM handle. + * @param Remap specifies the TIM remapping source. + * This parameter can be one of the following values: + * @arg TIM_TIM2_TIM8_TRGO: TIM2 ITR1 input is connected to TIM8 Trigger output(default) + * @arg TIM_TIM2_ETH_PTP: TIM2 ITR1 input is connected to ETH PTP trigger output. + * @arg TIM_TIM2_USBFS_SOF: TIM2 ITR1 input is connected to USB FS SOF. + * @arg TIM_TIM2_USBHS_SOF: TIM2 ITR1 input is connected to USB HS SOF. + * @arg TIM_TIM5_GPIO: TIM5 CH4 input is connected to dedicated Timer pin(default) + * @arg TIM_TIM5_LSI: TIM5 CH4 input is connected to LSI clock. + * @arg TIM_TIM5_LSE: TIM5 CH4 input is connected to LSE clock. + * @arg TIM_TIM5_RTC: TIM5 CH4 input is connected to RTC Output event. + * @arg TIM_TIM11_GPIO: TIM11 CH4 input is connected to dedicated Timer pin(default) + * @arg TIM_TIM11_SPDIF: SPDIF Frame synchronous + * @arg TIM_TIM11_HSE: TIM11 CH4 input is connected to HSE_RTC clock + * (HSE divided by a programmable prescaler) + * @arg TIM_TIM11_MCO1: TIM11 CH1 input is connected to MCO1 + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) +{ + __HAL_LOCK(htim); + + /* Check parameters */ + assert_param(IS_TIM_REMAP_INSTANCE(htim->Instance)); + assert_param(IS_TIM_REMAP(Remap)); + + /* Set the Timer remapping configuration */ + htim->Instance->OR = Remap; + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Group channel 5 and channel 1, 2 or 3 + * @param htim TIM handle. + * @param Channels specifies the reference signal(s) the OC5REF is combined with. + * This parameter can be any combination of the following values: + * TIM_GROUPCH5_NONE: No effect of OC5REF on OC1REFC, OC2REFC and OC3REFC + * TIM_GROUPCH5_OC1REFC: OC1REFC is the logical AND of OC1REFC and OC5REF + * TIM_GROUPCH5_OC2REFC: OC2REFC is the logical AND of OC2REFC and OC5REF + * TIM_GROUPCH5_OC3REFC: OC3REFC is the logical AND of OC3REFC and OC5REF + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_GroupChannel5(TIM_HandleTypeDef *htim, uint32_t Channels) +{ + /* Check parameters */ + assert_param(IS_TIM_COMBINED3PHASEPWM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_GROUPCH5(Channels)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Clear GC5Cx bit fields */ + htim->Instance->CCR5 &= ~(TIM_CCR5_GC5C3 | TIM_CCR5_GC5C2 | TIM_CCR5_GC5C1); + + /* Set GC5Cx bit fields */ + htim->Instance->CCR5 |= Channels; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * +@verbatim + ============================================================================== + ##### Extended Callbacks functions ##### + ============================================================================== + [..] + This section provides Extended TIM callback functions: + (+) Timer Commutation callback + (+) Timer Break callback + +@endverbatim + * @{ + */ + +/** + * @brief Hall commutation changed callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutCallback could be implemented in the user file + */ +} +/** + * @brief Hall commutation changed half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Break detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_BreakCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Break2 detection callback in non blocking mode + * @param htim: TIM handle + * @retval None + */ +__weak void HAL_TIMEx_Break2Callback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_TIMEx_Break2Callback could be implemented in the user file + */ +} +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * +@verbatim + ============================================================================== + ##### Extended Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Hall Sensor interface handle state. + * @param htim TIM Hall Sensor handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Functions TIMEx Private Functions + * @{ + */ + +/** + * @brief TIM DMA Commutation callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Commutation half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationHalfCpltCallback(htim); +#else + HAL_TIMEx_CommutHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + + +/** + * @brief Enables or disables the TIM Capture Compare Channel xN. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @param ChannelNState specifies the TIM Channel CCxNE bit new state. + * This parameter can be: TIM_CCxN_ENABLE or TIM_CCxN_Disable. + * @retval None + */ +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState) +{ + uint32_t tmp; + + tmp = TIM_CCER_CC1NE << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + + /* Reset the CCxNE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxNE Bit */ + TIMx->CCER |= (uint32_t)(ChannelNState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ +} +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c new file mode 100644 index 000000000..0e557de5e --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c @@ -0,0 +1,3618 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart.c + * @author MCD Application Team + * @brief UART HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Universal Asynchronous Receiver Transmitter Peripheral (UART). + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + * + @verbatim + =============================================================================== + ##### How to use this driver ##### + =============================================================================== + [..] + The UART HAL driver can be used as follows: + + (#) Declare a UART_HandleTypeDef handle structure (eg. UART_HandleTypeDef huart). + (#) Initialize the UART low level resources by implementing the HAL_UART_MspInit() API: + (++) Enable the USARTx interface clock. + (++) UART pins configuration: + (+++) Enable the clock for the UART GPIOs. + (+++) Configure these UART pins as alternate function pull-up. + (++) NVIC configuration if you need to use interrupt process (HAL_UART_Transmit_IT() + and HAL_UART_Receive_IT() APIs): + (+++) Configure the USARTx interrupt priority. + (+++) Enable the NVIC USART IRQ handle. + (++) UART interrupts handling: + -@@- The specific UART interrupts (Transmission complete interrupt, + RXNE interrupt, RX/TX FIFOs related interrupts and Error Interrupts) + are managed using the macros __HAL_UART_ENABLE_IT() and __HAL_UART_DISABLE_IT() + inside the transmit and receive processes. + (++) DMA Configuration if you need to use DMA process (HAL_UART_Transmit_DMA() + and HAL_UART_Receive_DMA() APIs): + (+++) Declare a DMA handle structure for the Tx/Rx channel. + (+++) Enable the DMAx interface clock. + (+++) Configure the declared DMA handle structure with the required Tx/Rx parameters. + (+++) Configure the DMA Tx/Rx channel. + (+++) Associate the initialized DMA handle to the UART DMA Tx/Rx handle. + (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx/Rx channel. + + (#) Program the Baud Rate, Word Length, Stop Bit, Parity, Hardware + flow control and Mode (Receiver/Transmitter) in the huart handle Init structure. + + (#) If required, program UART advanced features (TX/RX pins swap, auto Baud rate detection,...) + in the huart handle AdvancedInit structure. + + (#) For the UART asynchronous mode, initialize the UART registers by calling + the HAL_UART_Init() API. + + (#) For the UART Half duplex mode, initialize the UART registers by calling + the HAL_HalfDuplex_Init() API. + + (#) For the UART LIN (Local Interconnection Network) mode, initialize the UART registers + by calling the HAL_LIN_Init() API. + + (#) For the UART Multiprocessor mode, initialize the UART registers + by calling the HAL_MultiProcessor_Init() API. + + (#) For the UART RS485 Driver Enabled mode, initialize the UART registers + by calling the HAL_RS485Ex_Init() API. + + [..] + (@) These API's (HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init(), HAL_MultiProcessor_Init(), + also configure the low level Hardware GPIO, CLOCK, CORTEX...etc) by + calling the customized HAL_UART_MspInit() API. + + ##### Callback registration ##### + ================================== + + [..] + The compilation define USE_HAL_UART_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function @ref HAL_UART_RegisterCallback() to register a user callback. + Function @ref HAL_UART_RegisterCallback() allows to register following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) WakeupCallback : Wakeup Callback. + (+) RxFifoFullCallback : Rx Fifo Full Callback. + (+) TxFifoEmptyCallback : Tx Fifo Empty Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + [..] + Use function @ref HAL_UART_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. + @ref HAL_UART_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) WakeupCallback : Wakeup Callback. + (+) RxFifoFullCallback : Rx Fifo Full Callback. + (+) TxFifoEmptyCallback : Tx Fifo Empty Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + + [..] + By default, after the @ref HAL_UART_Init() and when the state is HAL_UART_STATE_RESET + all callbacks are set to the corresponding weak (surcharged) functions: + examples @ref HAL_UART_TxCpltCallback(), @ref HAL_UART_RxHalfCpltCallback(). + Exception done for MspInit and MspDeInit functions that are respectively + reset to the legacy weak (surcharged) functions in the @ref HAL_UART_Init() + and @ref HAL_UART_DeInit() only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the @ref HAL_UART_Init() and @ref HAL_UART_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand). + + [..] + Callbacks can be registered/unregistered in HAL_UART_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_UART_STATE_READY or HAL_UART_STATE_RESET state, thus registered (user) + MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_UART_RegisterCallback() before calling @ref HAL_UART_DeInit() + or @ref HAL_UART_Init() function. + + [..] + When The compilation define USE_HAL_UART_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available + and weak (surcharged) callbacks are used. + + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup UART UART + * @brief HAL UART module driver + * @{ + */ + +#ifdef HAL_UART_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup UART_Private_Constants UART Private Constants + * @{ + */ +#define USART_CR1_FIELDS ((uint32_t)(USART_CR1_M | USART_CR1_PCE | USART_CR1_PS | \ + USART_CR1_TE | USART_CR1_RE | USART_CR1_OVER8 )) /*!< UART or USART CR1 fields of parameters set by UART_SetConfig API */ + +#define USART_CR3_FIELDS ((uint32_t)(USART_CR3_RTSE | USART_CR3_CTSE | USART_CR3_ONEBIT)) /*!< UART or USART CR3 fields of parameters set by UART_SetConfig API */ + + +#define UART_BRR_MIN 0x10U /* UART BRR minimum authorized value */ +#define UART_BRR_MAX 0x0000FFFFU /* UART BRR maximum authorized value */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup UART_Private_Functions + * @{ + */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart); +static void UART_EndRxTransfer(UART_HandleTypeDef *huart); +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAError(DMA_HandleTypeDef *hdma); +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma); +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_TxISR_8BIT(UART_HandleTypeDef *huart); +static void UART_TxISR_16BIT(UART_HandleTypeDef *huart); +static void UART_EndTransmit_IT(UART_HandleTypeDef *huart); +static void UART_RxISR_8BIT(UART_HandleTypeDef *huart); +static void UART_RxISR_16BIT(UART_HandleTypeDef *huart); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup UART_Exported_Functions UART Exported Functions + * @{ + */ + +/** @defgroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to initialize the USARTx or the UARTy + in asynchronous mode. + (+) For the asynchronous mode the parameters below can be configured: + (++) Baud Rate + (++) Word Length + (++) Stop Bit + (++) Parity: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + (++) Hardware flow control + (++) Receiver/transmitter modes + (++) Over Sampling Method + (++) One-Bit Sampling Method + (+) For the asynchronous mode, the following advanced features can be configured as well: + (++) TX and/or RX pin level inversion + (++) data logical level inversion + (++) RX and TX pins swap + (++) RX overrun detection disabling + (++) DMA disabling on RX error + (++) MSB first on communication line + (++) auto Baud rate detection + [..] + The HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init()and HAL_MultiProcessor_Init()API + follow respectively the UART asynchronous, UART Half duplex, UART LIN mode + and UART multiprocessor mode configuration procedures (details for the procedures + are available in reference manual). + +@endverbatim + + Depending on the frame length defined by the M1 and M0 bits (7-bit, + 8-bit or 9-bit), the possible UART formats are listed in the + following table. + + Table 1. UART frame format. + +-----------------------------------------------------------------------+ + | M1 bit | M0 bit | PCE bit | UART frame | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 0 | | SB | 8 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 1 | | SB | 7 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 0 | | SB | 9 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 1 | | SB | 8 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 0 | | SB | 7 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 1 | | SB | 6 bit data | PB | STB | | + +-----------------------------------------------------------------------+ + + * @{ + */ + +/** + * @brief Initialize the UART mode according to the specified + * parameters in the UART_InitTypeDef and initialize the associated handle. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + if (huart->Init.HwFlowCtl != UART_HWCONTROL_NONE) + { + /* Check the parameters */ + assert_param(IS_UART_HWFLOW_INSTANCE(huart->Instance)); + } + else + { + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + } + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In asynchronous mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Initialize the half-duplex mode according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check UART instance */ + assert_param(IS_UART_HALFDUPLEX_INSTANCE(huart->Instance)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In half-duplex mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the Half-Duplex mode by setting the HDSEL bit in the CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_HDSEL); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief Initialize the LIN mode according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @param BreakDetectLength Specifies the LIN break detection length. + * This parameter can be one of the following values: + * @arg @ref UART_LINBREAKDETECTLENGTH_10B 10-bit break detection + * @arg @ref UART_LINBREAKDETECTLENGTH_11B 11-bit break detection + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the LIN UART instance */ + assert_param(IS_UART_LIN_INSTANCE(huart->Instance)); + /* Check the Break detection length parameter */ + assert_param(IS_UART_LIN_BREAK_DETECT_LENGTH(BreakDetectLength)); + + /* LIN mode limited to 16-bit oversampling only */ + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + return HAL_ERROR; + } + /* LIN mode limited to 8-bit data length */ + if (huart->Init.WordLength != UART_WORDLENGTH_8B) + { + return HAL_ERROR; + } + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In LIN mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_CLKEN); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_HDSEL | USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the LIN mode by setting the LINEN bit in the CR2 register */ + SET_BIT(huart->Instance->CR2, USART_CR2_LINEN); + + /* Set the USART LIN Break detection length. */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_LBDL, BreakDetectLength); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief Initialize the multiprocessor mode according to the specified + * parameters in the UART_InitTypeDef and initialize the associated handle. + * @param huart UART handle. + * @param Address UART node address (4-, 6-, 7- or 8-bit long). + * @param WakeUpMethod Specifies the UART wakeup method. + * This parameter can be one of the following values: + * @arg @ref UART_WAKEUPMETHOD_IDLELINE WakeUp by an idle line detection + * @arg @ref UART_WAKEUPMETHOD_ADDRESSMARK WakeUp by an address mark + * @note If the user resorts to idle line detection wake up, the Address parameter + * is useless and ignored by the initialization function. + * @note If the user resorts to address mark wake up, the address length detection + * is configured by default to 4 bits only. For the UART to be able to + * manage 6-, 7- or 8-bit long addresses detection, the API + * HAL_MultiProcessorEx_AddressLength_Set() must be called after + * HAL_MultiProcessor_Init(). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the wake up method parameter */ + assert_param(IS_UART_WAKEUPMETHOD(WakeUpMethod)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In multiprocessor mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register. */ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + if (WakeUpMethod == UART_WAKEUPMETHOD_ADDRESSMARK) + { + /* If address mark wake up method is chosen, set the USART address node */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADD, ((uint32_t)Address << UART_CR2_ADDRESS_LSB_POS)); + } + + /* Set the wake up method by setting the WAKE bit in the CR1 register */ + MODIFY_REG(huart->Instance->CR1, USART_CR1_WAKE, WakeUpMethod); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief DeInitialize the UART peripheral. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + huart->Instance->CR1 = 0x0U; + huart->Instance->CR2 = 0x0U; + huart->Instance->CR3 = 0x0U; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + if (huart->MspDeInitCallback == NULL) + { + huart->MspDeInitCallback = HAL_UART_MspDeInit; + } + /* DeInit the low level hardware */ + huart->MspDeInitCallback(huart); +#else + /* DeInit the low level hardware */ + HAL_UART_MspDeInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_RESET; + huart->RxState = HAL_UART_STATE_RESET; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Initialize the UART MSP. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_MspInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_MspInit can be implemented in the user file + */ +} + +/** + * @brief DeInitialize the UART MSP. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_MspDeInit can be implemented in the user file + */ +} + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User UART Callback + * To be used instead of the weak predefined callback + * @param huart uart handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_WAKEUP_CB_ID Wakeup Callback ID + * @arg @ref HAL_UART_RX_FIFO_FULL_CB_ID Rx Fifo Full Callback ID + * @arg @ref HAL_UART_TX_FIFO_EMPTY_CB_ID Tx Fifo Empty Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = pCallback; + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = pCallback; + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = pCallback; + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = pCallback; + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = pCallback; + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = pCallback; + break; + + case HAL_UART_WAKEUP_CB_ID : + huart->WakeupCallback = pCallback; + break; + + + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else if (huart->gState == HAL_UART_STATE_RESET) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief Unregister an UART Callback + * UART callaback is redirected to the weak predefined callback + * @param huart uart handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_WAKEUP_CB_ID Wakeup Callback ID + * @arg @ref HAL_UART_RX_FIFO_FULL_CB_ID Rx Fifo Full Callback ID + * @arg @ref HAL_UART_TX_FIFO_EMPTY_CB_ID Tx Fifo Empty Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + __HAL_LOCK(huart); + + if (HAL_UART_STATE_READY == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ + break; + +#if defined(USART_CR1_UESM) + case HAL_UART_WAKEUP_CB_ID : + huart->WakeupCallback = HAL_UARTEx_WakeupCallback; /* Legacy weak WakeupCallback */ + break; + +#endif /* USART_CR1_UESM */ + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; /* Legacy weak MspInitCallback */ + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; /* Legacy weak MspDeInitCallback */ + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else if (HAL_UART_STATE_RESET == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + __HAL_UNLOCK(huart); + + return status; +} +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group2 IO operation functions + * @brief UART Transmit/Receive functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + This subsection provides a set of functions allowing to manage the UART asynchronous + and Half duplex data transfers. + + (#) There are two mode of transfer: + (+) Blocking mode: The communication is performed in polling mode. + The HAL status of all data processing is returned by the same function + after finishing transfer. + (+) Non-Blocking mode: The communication is performed using Interrupts + or DMA, These API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + The HAL_UART_TxCpltCallback(), HAL_UART_RxCpltCallback() user callbacks + will be executed respectively at the end of the transmit or Receive process + The HAL_UART_ErrorCallback()user callback will be executed when a communication error is detected + + (#) Blocking mode API's are : + (+) HAL_UART_Transmit() + (+) HAL_UART_Receive() + + (#) Non-Blocking mode API's with Interrupt are : + (+) HAL_UART_Transmit_IT() + (+) HAL_UART_Receive_IT() + (+) HAL_UART_IRQHandler() + + (#) Non-Blocking mode API's with DMA are : + (+) HAL_UART_Transmit_DMA() + (+) HAL_UART_Receive_DMA() + (+) HAL_UART_DMAPause() + (+) HAL_UART_DMAResume() + (+) HAL_UART_DMAStop() + + (#) A set of Transfer Complete Callbacks are provided in Non_Blocking mode: + (+) HAL_UART_TxHalfCpltCallback() + (+) HAL_UART_TxCpltCallback() + (+) HAL_UART_RxHalfCpltCallback() + (+) HAL_UART_RxCpltCallback() + (+) HAL_UART_ErrorCallback() + + (#) Non-Blocking mode transfers could be aborted using Abort API's : + (+) HAL_UART_Abort() + (+) HAL_UART_AbortTransmit() + (+) HAL_UART_AbortReceive() + (+) HAL_UART_Abort_IT() + (+) HAL_UART_AbortTransmit_IT() + (+) HAL_UART_AbortReceive_IT() + + (#) For Abort services based on interrupts (HAL_UART_Abortxxx_IT), a set of Abort Complete Callbacks are provided: + (+) HAL_UART_AbortCpltCallback() + (+) HAL_UART_AbortTransmitCpltCallback() + (+) HAL_UART_AbortReceiveCpltCallback() +#if defined(USART_CR1_UESM) + + (#) Wakeup from Stop mode Callback: + (+) HAL_UARTEx_WakeupCallback() +#endif + + (#) In Non-Blocking mode transfers, possible errors are split into 2 categories. + Errors are handled as follows : + (+) Error is considered as Recoverable and non blocking : Transfer could go till end, but error severity is + to be evaluated by user : this concerns Frame Error, Parity Error or Noise Error in Interrupt mode reception . + Received character is then retrieved and stored in Rx buffer, Error code is set to allow user to identify error type, + and HAL_UART_ErrorCallback() user callback is executed. Transfer is kept ongoing on UART side. + If user wants to abort it, Abort services should be called by user. + (+) Error is considered as Blocking : Transfer could not be completed properly and is aborted. + This concerns Overrun Error In Interrupt mode reception and all errors in DMA mode. + Error code is set to allow user to identify error type, and HAL_UART_ErrorCallback() user callback is executed. + + -@- In the Half duplex communication, it is forbidden to run the transmit + and receive process in parallel, the UART state HAL_UART_STATE_BUSY_TX_RX can't be useful. + +@endverbatim + * @{ + */ + +/** + * @brief Send an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint32_t tickstart; + + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Init tickstart for timeout managment*/ + tickstart = HAL_GetTick(); + + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + __HAL_UNLOCK(huart); + + while (huart->TxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + huart->Instance->TDR = (uint16_t)(*pdata16bits & 0x01FFU); + pdata16bits++; + } + else + { + huart->Instance->TDR = (uint8_t)(*pdata8bits & 0xFFU); + pdata8bits++; + } + huart->TxXferCount--; + } + + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint16_t uhMask; + uint32_t tickstart; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + /* Init tickstart for timeout managment*/ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* Computation of UART mask to apply to RDR register */ + UART_MASK_COMPUTATION(huart); + uhMask = huart->Mask; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + __HAL_UNLOCK(huart); + + /* as long as data have to be received */ + while (huart->RxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->RDR & uhMask); + pdata16bits++; + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->RDR & (uint8_t)uhMask); + pdata8bits++; + } + huart->RxXferCount--; + } + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Send an amount of data in interrupt mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + huart->TxISR = NULL; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Set the Tx ISR function pointer according to the data word length */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + huart->TxISR = UART_TxISR_16BIT; + } + else + { + huart->TxISR = UART_TxISR_8BIT; + } + + __HAL_UNLOCK(huart); + + /* Enable the Transmit Data Register Empty interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in interrupt mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + huart->RxXferCount = Size; + huart->RxISR = NULL; + + /* Computation of UART mask to apply to RDR register */ + UART_MASK_COMPUTATION(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Set the Rx ISR function pointer according to the data word length */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + huart->RxISR = UART_RxISR_16BIT; + } + else + { + huart->RxISR = UART_RxISR_8BIT; + } + + __HAL_UNLOCK(huart); + + /* Enable the UART Parity Error interrupt and Data Register Not Empty interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_PEIE | USART_CR1_RXNEIE); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Send an amount of data in DMA mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + if (huart->hdmatx != NULL) + { + /* Set the UART DMA transfer complete callback */ + huart->hdmatx->XferCpltCallback = UART_DMATransmitCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmatx->XferHalfCpltCallback = UART_DMATxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmatx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmatx->XferAbortCallback = NULL; + + /* Enable the UART transmit DMA channel */ + if (HAL_DMA_Start_IT(huart->hdmatx, (uint32_t)huart->pTxBuffPtr, (uint32_t)&huart->Instance->TDR, Size) != HAL_OK) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + __HAL_UNLOCK(huart); + + /* Restore huart->gState to ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_ERROR; + } + } + /* Clear the TC flag in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_TCF); + + __HAL_UNLOCK(huart); + + /* Enable the DMA transfer for transmit request by setting the DMAT bit + in the UART CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in DMA mode. + * @note When the UART parity is enabled (PCE = 1), the received data contain + * the parity bit (MSB position). + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + if (huart->hdmarx != NULL) + { + /* Set the UART DMA transfer complete callback */ + huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmarx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA channel */ + if (HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->RDR, (uint32_t)huart->pRxBuffPtr, Size) != HAL_OK) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + __HAL_UNLOCK(huart); + + /* Restore huart->gState to ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_ERROR; + } + } + __HAL_UNLOCK(huart); + + /* Enable the UART Parity Error Interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the DMA transfer for the receiver request by setting the DMAR bit + in the UART CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Pause the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) +{ + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + __HAL_LOCK(huart); + + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + /* Disable the UART DMA Tx request */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Rx request */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Resume the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + /* Enable the UART DMA Tx request */ + SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + /* Clear the Overrun flag before resuming the Rx transfer */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF); + + /* Reenable PE and ERR (Frame error, noise error, overrun error) interrupts */ + SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the UART DMA Rx request */ + SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Stop the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) +{ + /* The Lock is not implemented on this API to allow the user application + to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() / + HAL_UART_TxHalfCpltCallback / HAL_UART_RxHalfCpltCallback: + indeed, when HAL_DMA_Abort() API is called, the DMA TX/RX Transfer or Half Transfer complete + interrupt is generated if the DMA transfer interruption occurs at the middle or at the end of + the stream and the corresponding call back is executed. */ + + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + /* Stop UART DMA Tx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel */ + if (huart->hdmatx != NULL) + { + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel */ + if (huart->hdmarx != NULL) + { + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + + UART_EndRxTransfer(huart); + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfers (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfers (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) +{ + uint32_t abortcplt = 1U; + + /* Disable interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If DMA Tx and/or DMA Rx Handles are associated to UART Handle, DMA Abort complete callbacks should be initialised + before any call to DMA Abort functions */ + /* DMA Tx Handle is valid */ + if (huart->hdmatx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + huart->hdmatx->XferAbortCallback = UART_DMATxAbortCallback; + } + else + { + huart->hdmatx->XferAbortCallback = NULL; + } + } + /* DMA Rx Handle is valid */ + if (huart->hdmarx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + huart->hdmarx->XferAbortCallback = UART_DMARxAbortCallback; + } + else + { + huart->hdmarx->XferAbortCallback = NULL; + } + } + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable DMA Tx at UART level */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmatx != NULL) + { + /* UART Tx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + huart->hdmatx->XferAbortCallback = NULL; + } + else + { + abortcplt = 0U; + } + } + } + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmarx != NULL) + { + /* UART Rx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + huart->hdmarx->XferAbortCallback = NULL; + abortcplt = 1U; + } + else + { + abortcplt = 0U; + } + } + } + + /* if no DMA abort complete callback execution is required => call user Abort Complete callback */ + if (abortcplt == 1U) + { + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Clear ISR function pointers */ + huart->RxISR = NULL; + huart->TxISR = NULL; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Disable the UART DMA Tx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = UART_DMATxOnlyAbortCallback; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + /* Call Directly huart->hdmatx->XferAbortCallback function in case of error */ + huart->hdmatx->XferAbortCallback(huart->hdmatx); + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + /* Clear TxISR function pointers */ + huart->TxISR = NULL; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + /* Clear TxISR function pointers */ + huart->TxISR = NULL; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMARxOnlyAbortCallback; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear RxISR function pointer */ + huart->pRxBuffPtr = NULL; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear RxISR function pointer */ + huart->pRxBuffPtr = NULL; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Handle UART interrupt request. + * @param huart UART handle. + * @retval None + */ +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) +{ + uint32_t isrflags = READ_REG(huart->Instance->ISR); + uint32_t cr1its = READ_REG(huart->Instance->CR1); + uint32_t cr3its = READ_REG(huart->Instance->CR3); + + uint32_t errorflags; + uint32_t errorcode; + + /* If no error occurs */ + errorflags = (isrflags & (uint32_t)(USART_ISR_PE | USART_ISR_FE | USART_ISR_ORE | USART_ISR_NE | USART_ISR_RTOF)); + if (errorflags == 0U) + { + /* UART in mode Receiver ---------------------------------------------------*/ + if (((isrflags & USART_ISR_RXNE) != 0U) + && ((cr1its & USART_CR1_RXNEIE) != 0U)) + { + if (huart->RxISR != NULL) + { + huart->RxISR(huart); + } + return; + } + } + + /* If some errors occur */ + if ((errorflags != 0U) + && (((cr3its & USART_CR3_EIE) != 0U) + || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE)) != 0U))) + { + /* UART parity error interrupt occurred -------------------------------------*/ + if (((isrflags & USART_ISR_PE) != 0U) && ((cr1its & USART_CR1_PEIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_PEF); + + huart->ErrorCode |= HAL_UART_ERROR_PE; + } + + /* UART frame error interrupt occurred --------------------------------------*/ + if (((isrflags & USART_ISR_FE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_FEF); + + huart->ErrorCode |= HAL_UART_ERROR_FE; + } + + /* UART noise error interrupt occurred --------------------------------------*/ + if (((isrflags & USART_ISR_NE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_NEF); + + huart->ErrorCode |= HAL_UART_ERROR_NE; + } + + /* UART Over-Run interrupt occurred -----------------------------------------*/ + if (((isrflags & USART_ISR_ORE) != 0U) + && (((cr1its & USART_CR1_RXNEIE) != 0U) || + ((cr3its & USART_CR3_EIE) != 0U))) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF); + + huart->ErrorCode |= HAL_UART_ERROR_ORE; + } + + /* UART Receiver Timeout interrupt occurred ---------------------------------*/ + if (((isrflags & USART_ISR_RTOF) != 0U) && ((cr1its & USART_CR1_RTOIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_RTOF); + + huart->ErrorCode |= HAL_UART_ERROR_RTO; + } + + /* Call UART Error Call back function if need be ----------------------------*/ + if (huart->ErrorCode != HAL_UART_ERROR_NONE) + { + /* UART in mode Receiver --------------------------------------------------*/ + if (((isrflags & USART_ISR_RXNE) != 0U) + && ((cr1its & USART_CR1_RXNEIE) != 0U)) + { + if (huart->RxISR != NULL) + { + huart->RxISR(huart); + } + } + + /* If Error is to be considered as blocking : + - Receiver Timeout error in Reception + - Overrun error in Reception + - any error occurs in DMA mode reception + */ + errorcode = huart->ErrorCode; + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) || + ((errorcode & (HAL_UART_ERROR_RTO | HAL_UART_ERROR_ORE)) != 0U)) + { + /* Blocking error : transfer is aborted + Set the UART state ready to be able to start again the process, + Disable Rx Interrupts, and disable Rx DMA request, if ongoing */ + UART_EndRxTransfer(huart); + + /* Disable the UART DMA Rx request if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_ErrorCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMAAbortOnError; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Non Blocking error : transfer could go on. + Error is notified to user through user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + } + } + return; + + } /* End if some error occurs */ +#if defined(USART_CR1_UESM) + + /* UART wakeup from Stop mode interrupt occurred ---------------------------*/ + if (((isrflags & USART_ISR_WUF) != 0U) && ((cr3its & USART_CR3_WUFIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_WUF); + + /* UART Rx state is not reset as a reception process might be ongoing. + If UART handle state fields need to be reset to READY, this could be done in Wakeup callback */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Wakeup Callback */ + huart->WakeupCallback(huart); +#else + /* Call legacy weak Wakeup Callback */ + HAL_UARTEx_WakeupCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + return; + } +#endif /* USART_CR1_UESM */ + + /* UART in mode Transmitter ------------------------------------------------*/ + if (((isrflags & USART_ISR_TXE) != 0U) + && ((cr1its & USART_CR1_TXEIE) != 0U)) + { + if (huart->TxISR != NULL) + { + huart->TxISR(huart); + } + return; + } + + /* UART in mode Transmitter (transmission end) -----------------------------*/ + if (((isrflags & USART_ISR_TC) != 0U) && ((cr1its & USART_CR1_TCIE) != 0U)) + { + UART_EndTransmit_IT(huart); + return; + } + +} + +/** + * @brief Tx Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_TxCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Tx Half Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_TxHalfCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Rx Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_RxCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Rx Half Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_RxHalfCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART error callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_ErrorCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortTransmitCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Receive Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortReceiveCpltCallback can be implemented in the user file. + */ +} + +#if defined(USART_CR1_UESM) +/** + * @brief UART wakeup from Stop mode callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UARTEx_WakeupCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UARTEx_WakeupCallback can be implemented in the user file. + */ +} + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group3 Peripheral Control functions + * @brief UART control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the UART. + (+) HAL_UART_ReceiverTimeout_Config() API allows to configure the receiver timeout value on the fly + (+) HAL_UART_EnableReceiverTimeout() API enables the receiver timeout feature + (+) HAL_UART_DisableReceiverTimeout() API disables the receiver timeout feature + (+) HAL_MultiProcessor_EnableMuteMode() API enables mute mode + (+) HAL_MultiProcessor_DisableMuteMode() API disables mute mode + (+) HAL_MultiProcessor_EnterMuteMode() API enters mute mode + (+) UART_SetConfig() API configures the UART peripheral + (+) UART_AdvFeatureConfig() API optionally configures the UART advanced features + (+) UART_CheckIdleState() API ensures that TEACK and/or REACK are set after initialization + (+) HAL_HalfDuplex_EnableTransmitter() API disables receiver and enables transmitter + (+) HAL_HalfDuplex_EnableReceiver() API disables transmitter and enables receiver + (+) HAL_LIN_SendBreak() API transmits the break characters +@endverbatim + * @{ + */ + +/** + * @brief Update on the fly the receiver timeout value in RTOR register. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param TimeoutValue receiver timeout value in number of baud blocks. The timeout + * value must be less or equal to 0x0FFFFFFFF. + * @retval None + */ +void HAL_UART_ReceiverTimeout_Config(UART_HandleTypeDef *huart, uint32_t TimeoutValue) +{ + assert_param(IS_UART_RECEIVER_TIMEOUT_VALUE(TimeoutValue)); + MODIFY_REG(huart->Instance->RTOR, USART_RTOR_RTO, TimeoutValue); +} + +/** + * @brief Enable the UART receiver timeout feature. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_EnableReceiverTimeout(UART_HandleTypeDef *huart) +{ + if (huart->gState == HAL_UART_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Set the USART RTOEN bit */ + SET_BIT(huart->Instance->CR2, USART_CR2_RTOEN); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Disable the UART receiver timeout feature. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DisableReceiverTimeout(UART_HandleTypeDef *huart) +{ + if (huart->gState == HAL_UART_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear the USART RTOEN bit */ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_RTOEN); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Enable UART in mute mode (does not mean UART enters mute mode; + * to enter mute mode, HAL_MultiProcessor_EnterMuteMode() API must be called). + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_EnableMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Enable USART mute mode by setting the MME bit in the CR1 register */ + SET_BIT(huart->Instance->CR1, USART_CR1_MME); + + huart->gState = HAL_UART_STATE_READY; + + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Disable UART mute mode (does not mean the UART actually exits mute mode + * as it may not have been in mute mode at this very moment). + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_DisableMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable USART mute mode by clearing the MME bit in the CR1 register */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_MME); + + huart->gState = HAL_UART_STATE_READY; + + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Enter UART mute mode (means UART actually enters mute mode). + * @note To exit from mute mode, HAL_MultiProcessor_DisableMuteMode() API must be called. + * @param huart UART handle. + * @retval None + */ +void HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_UART_SEND_REQ(huart, UART_MUTE_MODE_REQUEST); +} + +/** + * @brief Enable the UART transmitter and disable the UART receiver. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear TE and RE bits */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's transmit interface by setting the TE bit in the USART CR1 register */ + SET_BIT(huart->Instance->CR1, USART_CR1_TE); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Enable the UART receiver and disable the UART transmitter. + * @param huart UART handle. + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear TE and RE bits */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's receive interface by setting the RE bit in the USART CR1 register */ + SET_BIT(huart->Instance->CR1, USART_CR1_RE); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + + +/** + * @brief Transmit break characters. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart) +{ + /* Check the parameters */ + assert_param(IS_UART_LIN_INSTANCE(huart->Instance)); + + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Send break characters */ + __HAL_UART_SEND_REQ(huart, UART_SENDBREAK_REQUEST); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group4 Peripheral State and Error functions + * @brief UART Peripheral State functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Error functions ##### + ============================================================================== + [..] + This subsection provides functions allowing to : + (+) Return the UART handle state. + (+) Return the UART handle error code + +@endverbatim + * @{ + */ + +/** + * @brief Return the UART handle state. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART. + * @retval HAL state + */ +HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart) +{ + uint32_t temp1; + uint32_t temp2; + temp1 = huart->gState; + temp2 = huart->RxState; + + return (HAL_UART_StateTypeDef)(temp1 | temp2); +} + +/** + * @brief Return the UART handle error code. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART. + * @retval UART Error Code + */ +uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart) +{ + return huart->ErrorCode; +} +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup UART_Private_Functions UART Private Functions + * @{ + */ + +/** + * @brief Initialize the callbacks to their default values. + * @param huart UART handle. + * @retval none + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart) +{ + /* Init the UART Callback settings */ + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ +#if defined(USART_CR1_UESM) + huart->WakeupCallback = HAL_UARTEx_WakeupCallback; /* Legacy weak WakeupCallback */ +#endif /* USART_CR1_UESM */ + +} +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @brief Configure the UART peripheral. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef UART_SetConfig(UART_HandleTypeDef *huart) +{ + uint32_t tmpreg; + uint16_t brrtemp; + UART_ClockSourceTypeDef clocksource; + uint32_t usartdiv = 0x00000000U; + HAL_StatusTypeDef ret = HAL_OK; + uint32_t pclk; + + /* Check the parameters */ + assert_param(IS_UART_BAUDRATE(huart->Init.BaudRate)); + assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_STOPBITS(huart->Init.StopBits)); + assert_param(IS_UART_ONE_BIT_SAMPLE(huart->Init.OneBitSampling)); + + assert_param(IS_UART_PARITY(huart->Init.Parity)); + assert_param(IS_UART_MODE(huart->Init.Mode)); + assert_param(IS_UART_HARDWARE_FLOW_CONTROL(huart->Init.HwFlowCtl)); + assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); + + /*-------------------------- USART CR1 Configuration -----------------------*/ + /* Clear M, PCE, PS, TE, RE and OVER8 bits and configure + * the UART Word Length, Parity, Mode and oversampling: + * set the M bits according to huart->Init.WordLength value + * set PCE and PS bits according to huart->Init.Parity value + * set TE and RE bits according to huart->Init.Mode value + * set OVER8 bit according to huart->Init.OverSampling value */ + tmpreg = (uint32_t)huart->Init.WordLength | huart->Init.Parity | huart->Init.Mode | huart->Init.OverSampling ; + MODIFY_REG(huart->Instance->CR1, USART_CR1_FIELDS, tmpreg); + + /*-------------------------- USART CR2 Configuration -----------------------*/ + /* Configure the UART Stop Bits: Set STOP[13:12] bits according + * to huart->Init.StopBits value */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_STOP, huart->Init.StopBits); + + /*-------------------------- USART CR3 Configuration -----------------------*/ + /* Configure + * - UART HardWare Flow Control: set CTSE and RTSE bits according + * to huart->Init.HwFlowCtl value + * - one-bit sampling method versus three samples' majority rule according + * to huart->Init.OneBitSampling (not applicable to LPUART) */ + tmpreg = (uint32_t)huart->Init.HwFlowCtl; + + tmpreg |= huart->Init.OneBitSampling; + MODIFY_REG(huart->Instance->CR3, USART_CR3_FIELDS, tmpreg); + + + /*-------------------------- USART BRR Configuration -----------------------*/ + UART_GETCLOCKSOURCE(huart, clocksource); + + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + switch (clocksource) + { + case UART_CLOCKSOURCE_PCLK1: + pclk = HAL_RCC_GetPCLK1Freq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING8(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_PCLK2: + pclk = HAL_RCC_GetPCLK2Freq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING8(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_HSI: + usartdiv = (uint16_t)(UART_DIV_SAMPLING8(HSI_VALUE, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_SYSCLK: + pclk = HAL_RCC_GetSysClockFreq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING8(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_LSE: + usartdiv = (uint16_t)(UART_DIV_SAMPLING8(LSE_VALUE, huart->Init.BaudRate)); + break; + default: + ret = HAL_ERROR; + break; + } + + /* USARTDIV must be greater than or equal to 0d16 */ + if ((usartdiv >= UART_BRR_MIN) && (usartdiv <= UART_BRR_MAX)) + { + brrtemp = (uint16_t)(usartdiv & 0xFFF0U); + brrtemp |= (uint16_t)((usartdiv & (uint16_t)0x000FU) >> 1U); + huart->Instance->BRR = brrtemp; + } + else + { + ret = HAL_ERROR; + } + } + else + { + switch (clocksource) + { + case UART_CLOCKSOURCE_PCLK1: + pclk = HAL_RCC_GetPCLK1Freq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING16(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_PCLK2: + pclk = HAL_RCC_GetPCLK2Freq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING16(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_HSI: + usartdiv = (uint16_t)(UART_DIV_SAMPLING16(HSI_VALUE, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_SYSCLK: + pclk = HAL_RCC_GetSysClockFreq(); + usartdiv = (uint16_t)(UART_DIV_SAMPLING16(pclk, huart->Init.BaudRate)); + break; + case UART_CLOCKSOURCE_LSE: + usartdiv = (uint16_t)(UART_DIV_SAMPLING16(LSE_VALUE, huart->Init.BaudRate)); + break; + default: + ret = HAL_ERROR; + break; + } + + /* USARTDIV must be greater than or equal to 0d16 */ + if ((usartdiv >= UART_BRR_MIN) && (usartdiv <= UART_BRR_MAX)) + { + huart->Instance->BRR = usartdiv; + } + else + { + ret = HAL_ERROR; + } + } + + + /* Clear ISR function pointers */ + huart->RxISR = NULL; + huart->TxISR = NULL; + + return ret; +} + +/** + * @brief Configure the UART peripheral advanced features. + * @param huart UART handle. + * @retval None + */ +void UART_AdvFeatureConfig(UART_HandleTypeDef *huart) +{ + /* Check whether the set of advanced features to configure is properly set */ + assert_param(IS_UART_ADVFEATURE_INIT(huart->AdvancedInit.AdvFeatureInit)); + + /* if required, configure TX pin active level inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_TXINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_TXINV(huart->AdvancedInit.TxPinLevelInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_TXINV, huart->AdvancedInit.TxPinLevelInvert); + } + + /* if required, configure RX pin active level inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_RXINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_RXINV(huart->AdvancedInit.RxPinLevelInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_RXINV, huart->AdvancedInit.RxPinLevelInvert); + } + + /* if required, configure data inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_DATAINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_DATAINV(huart->AdvancedInit.DataInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_DATAINV, huart->AdvancedInit.DataInvert); + } + + /* if required, configure RX/TX pins swap */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_SWAP_INIT)) + { + assert_param(IS_UART_ADVFEATURE_SWAP(huart->AdvancedInit.Swap)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_SWAP, huart->AdvancedInit.Swap); + } + + /* if required, configure RX overrun detection disabling */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_RXOVERRUNDISABLE_INIT)) + { + assert_param(IS_UART_OVERRUN(huart->AdvancedInit.OverrunDisable)); + MODIFY_REG(huart->Instance->CR3, USART_CR3_OVRDIS, huart->AdvancedInit.OverrunDisable); + } + + /* if required, configure DMA disabling on reception error */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_DMADISABLEONERROR_INIT)) + { + assert_param(IS_UART_ADVFEATURE_DMAONRXERROR(huart->AdvancedInit.DMADisableonRxError)); + MODIFY_REG(huart->Instance->CR3, USART_CR3_DDRE, huart->AdvancedInit.DMADisableonRxError); + } + + /* if required, configure auto Baud rate detection scheme */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_AUTOBAUDRATE_INIT)) + { + assert_param(IS_USART_AUTOBAUDRATE_DETECTION_INSTANCE(huart->Instance)); + assert_param(IS_UART_ADVFEATURE_AUTOBAUDRATE(huart->AdvancedInit.AutoBaudRateEnable)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_ABREN, huart->AdvancedInit.AutoBaudRateEnable); + /* set auto Baudrate detection parameters if detection is enabled */ + if (huart->AdvancedInit.AutoBaudRateEnable == UART_ADVFEATURE_AUTOBAUDRATE_ENABLE) + { + assert_param(IS_UART_ADVFEATURE_AUTOBAUDRATEMODE(huart->AdvancedInit.AutoBaudRateMode)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_ABRMODE, huart->AdvancedInit.AutoBaudRateMode); + } + } + + /* if required, configure MSB first on communication line */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_MSBFIRST_INIT)) + { + assert_param(IS_UART_ADVFEATURE_MSBFIRST(huart->AdvancedInit.MSBFirst)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_MSBFIRST, huart->AdvancedInit.MSBFirst); + } +} + +/** + * @brief Check the UART Idle State. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef UART_CheckIdleState(UART_HandleTypeDef *huart) +{ + uint32_t tickstart; + + /* Initialize the UART ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Init tickstart for timeout managment*/ + tickstart = HAL_GetTick(); + + /* Check if the Transmitter is enabled */ + if ((huart->Instance->CR1 & USART_CR1_TE) == USART_CR1_TE) + { + /* Wait until TEACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_TEACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + /* Timeout occurred */ + return HAL_TIMEOUT; + } + } +#if defined(USART_ISR_REACK) + + /* Check if the Receiver is enabled */ + if ((huart->Instance->CR1 & USART_CR1_RE) == USART_CR1_RE) + { + /* Wait until REACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_REACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + /* Timeout occurred */ + return HAL_TIMEOUT; + } + } +#endif + + /* Initialize the UART State */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Handle UART Communication Timeout. + * @param huart UART handle. + * @param Flag Specifies the UART flag to check + * @param Status Flag status (SET or RESET) + * @param Tickstart Tick start value + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout) +{ + /* Wait until flag is set */ + while ((__HAL_UART_GET_FLAG(huart, Flag) ? SET : RESET) == Status) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_TIMEOUT; + } + + if (READ_BIT(huart->Instance->CR1, USART_CR1_RE) != 0U) + { + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_RTOF) == SET) + { + /* Clear Receiver Timeout flag*/ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_RTOF); + + /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ErrorCode = HAL_UART_ERROR_RTO; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_TIMEOUT; + } + } + } + } + return HAL_OK; +} + + +/** + * @brief End ongoing Tx transfer on UART peripheral (following error detection or Transmit completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; +} + + +/** + * @brief End ongoing Rx transfer on UART peripheral (following error detection or Reception completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndRxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Reset RxIsr function pointer */ + huart->RxISR = NULL; +} + + +/** + * @brief DMA UART transmit process complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + /* DMA Normal mode */ + if (hdma->Init.Mode != DMA_CIRCULAR) + { + huart->TxXferCount = 0U; + + /* Disable the DMA transfer for transmit request by resetting the DMAT bit + in the UART CR3 register */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Enable the UART Transmit Complete Interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + /* DMA Circular mode */ + else + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART transmit process half complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx Half complete callback*/ + huart->TxHalfCpltCallback(huart); +#else + /*Call legacy weak Tx Half complete callback*/ + HAL_UART_TxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART receive process complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + /* DMA Normal mode */ + if (hdma->Init.Mode != DMA_CIRCULAR) + { + huart->RxXferCount = 0U; + + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by resetting the DMAR bit + in the UART CR3 register */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + } + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART receive process half complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Half complete callback*/ + huart->RxHalfCpltCallback(huart); +#else + /*Call legacy weak Rx Half complete callback*/ + HAL_UART_RxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART communication error callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAError(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + /* Stop UART DMA Tx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + huart->TxXferCount = 0U; + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + huart->RxXferCount = 0U; + UART_EndRxTransfer(huart); + } + + huart->ErrorCode |= HAL_UART_ERROR_DMA; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART communication abort callback, when initiated by HAL services on Error + * (To be called at end of DMA Abort procedure following error occurrence). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + huart->RxXferCount = 0U; + huart->TxXferCount = 0U; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user + * (To be called at end of DMA Tx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Rx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->hdmatx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmarx != NULL) + { + if (huart->hdmarx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user + * (To be called at end of DMA Rx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Tx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->hdmarx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmatx != NULL) + { + if (huart->hdmatx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortTransmit_IT API (Abort only Tx transfer) + * (This callback is executed at end of DMA Tx Abort procedure following user abort request, + * and leads to user Tx Abort Complete callback execution). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->TxXferCount = 0U; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortReceive_IT API (Abort only Rx transfer) + * (This callback is executed at end of DMA Rx Abort procedure following user abort request, + * and leads to user Rx Abort Complete callback execution). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief TX interrrupt handler for 7 or 8 bits data word length . + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Transmit_IT(). + * @param huart UART handle. + * @retval None + */ +static void UART_TxISR_8BIT(UART_HandleTypeDef *huart) +{ + /* Check that a Tx process is ongoing */ + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + if (huart->TxXferCount == 0U) + { + /* Disable the UART Transmit Data Register Empty Interrupt */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + /* Enable the UART Transmit Complete Interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + else + { + huart->Instance->TDR = (uint8_t)(*huart->pTxBuffPtr & (uint8_t)0xFF); + huart->pTxBuffPtr++; + huart->TxXferCount--; + } + } +} + +/** + * @brief TX interrrupt handler for 9 bits data word length. + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Transmit_IT(). + * @param huart UART handle. + * @retval None + */ +static void UART_TxISR_16BIT(UART_HandleTypeDef *huart) +{ + uint16_t *tmp; + + /* Check that a Tx process is ongoing */ + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + if (huart->TxXferCount == 0U) + { + /* Disable the UART Transmit Data Register Empty Interrupt */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + /* Enable the UART Transmit Complete Interrupt */ + SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + else + { + tmp = (uint16_t *) huart->pTxBuffPtr; + huart->Instance->TDR = (((uint32_t)(*tmp)) & 0x01FFUL); + huart->pTxBuffPtr += 2U; + huart->TxXferCount--; + } + } +} + + +/** + * @brief Wrap up transmission in non-blocking mode. + * @param huart pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +static void UART_EndTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable the UART Transmit Complete Interrupt */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_TCIE); + + /* Tx process is ended, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* Cleat TxISR function pointer */ + huart->TxISR = NULL; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief RX interrrupt handler for 7 or 8 bits data word length . + * @param huart UART handle. + * @retval None + */ +static void UART_RxISR_8BIT(UART_HandleTypeDef *huart) +{ + uint16_t uhMask = huart->Mask; + uint16_t uhdata; + + /* Check that a Rx process is ongoing */ + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + uhdata = (uint16_t) READ_REG(huart->Instance->RDR); + *huart->pRxBuffPtr = (uint8_t)(uhdata & (uint8_t)uhMask); + huart->pRxBuffPtr++; + huart->RxXferCount--; + + if (huart->RxXferCount == 0U) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupts */ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Clear RxISR function pointer */ + huart->RxISR = NULL; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Clear RXNE interrupt flag */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + } +} + +/** + * @brief RX interrrupt handler for 9 bits data word length . + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Receive_IT() + * @param huart UART handle. + * @retval None + */ +static void UART_RxISR_16BIT(UART_HandleTypeDef *huart) +{ + uint16_t *tmp; + uint16_t uhMask = huart->Mask; + uint16_t uhdata; + + /* Check that a Rx process is ongoing */ + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + uhdata = (uint16_t) READ_REG(huart->Instance->RDR); + tmp = (uint16_t *) huart->pRxBuffPtr ; + *tmp = (uint16_t)(uhdata & uhMask); + huart->pRxBuffPtr += 2U; + huart->RxXferCount--; + + if (huart->RxXferCount == 0U) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupt*/ + CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Clear RxISR function pointer */ + huart->RxISR = NULL; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Clear RXNE interrupt flag */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + } +} + + +/** + * @} + */ + +#endif /* HAL_UART_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c new file mode 100644 index 000000000..cdc535c51 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c @@ -0,0 +1,485 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart_ex.c + * @author MCD Application Team + * @brief Extended UART HAL module driver. + * This file provides firmware functions to manage the following extended + * functionalities of the Universal Asynchronous Receiver Transmitter Peripheral (UART). + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + * + @verbatim + ============================================================================== + ##### UART peripheral extended features ##### + ============================================================================== + + (#) Declare a UART_HandleTypeDef handle structure. + + (#) For the UART RS485 Driver Enable mode, initialize the UART registers + by calling the HAL_RS485Ex_Init() API. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup UARTEx UARTEx + * @brief UART Extended HAL module driver + * @{ + */ + +#ifdef HAL_UART_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup UARTEx_Private_Functions UARTEx Private Functions + * @{ + */ +#if defined(USART_CR1_UESM) +static void UARTEx_Wakeup_AddressConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection); +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup UARTEx_Exported_Functions UARTEx Exported Functions + * @{ + */ + +/** @defgroup UARTEx_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Extended Initialization and Configuration Functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to initialize the USARTx or the UARTy + in asynchronous mode. + (+) For the asynchronous mode the parameters below can be configured: + (++) Baud Rate + (++) Word Length + (++) Stop Bit + (++) Parity: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + (++) Hardware flow control + (++) Receiver/transmitter modes + (++) Over Sampling Method + (++) One-Bit Sampling Method + (+) For the asynchronous mode, the following advanced features can be configured as well: + (++) TX and/or RX pin level inversion + (++) data logical level inversion + (++) RX and TX pins swap + (++) RX overrun detection disabling + (++) DMA disabling on RX error + (++) MSB first on communication line + (++) auto Baud rate detection + [..] + The HAL_RS485Ex_Init() API follows the UART RS485 mode configuration + procedures (details for the procedures are available in reference manual). + +@endverbatim + + Depending on the frame length defined by the M1 and M0 bits (7-bit, + 8-bit or 9-bit), the possible UART formats are listed in the + following table. + + Table 1. UART frame format. + +-----------------------------------------------------------------------+ + | M1 bit | M0 bit | PCE bit | UART frame | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 0 | | SB | 8 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 1 | | SB | 7 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 0 | | SB | 9 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 1 | | SB | 8 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 0 | | SB | 7 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 1 | | SB | 6 bit data | PB | STB | | + +-----------------------------------------------------------------------+ + + * @{ + */ + +/** + * @brief Initialize the RS485 Driver enable feature according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @param Polarity Select the driver enable polarity. + * This parameter can be one of the following values: + * @arg @ref UART_DE_POLARITY_HIGH DE signal is active high + * @arg @ref UART_DE_POLARITY_LOW DE signal is active low + * @param AssertionTime Driver Enable assertion time: + * 5-bit value defining the time between the activation of the DE (Driver Enable) + * signal and the beginning of the start bit. It is expressed in sample time + * units (1/8 or 1/16 bit time, depending on the oversampling rate) + * @param DeassertionTime Driver Enable deassertion time: + * 5-bit value defining the time between the end of the last stop bit, in a + * transmitted message, and the de-activation of the DE (Driver Enable) signal. + * It is expressed in sample time units (1/8 or 1/16 bit time, depending on the + * oversampling rate). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RS485Ex_Init(UART_HandleTypeDef *huart, uint32_t Polarity, uint32_t AssertionTime, + uint32_t DeassertionTime) +{ + uint32_t temp; + + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + /* Check the Driver Enable UART instance */ + assert_param(IS_UART_DRIVER_ENABLE_INSTANCE(huart->Instance)); + + /* Check the Driver Enable polarity */ + assert_param(IS_UART_DE_POLARITY(Polarity)); + + /* Check the Driver Enable assertion time */ + assert_param(IS_UART_ASSERTIONTIME(AssertionTime)); + + /* Check the Driver Enable deassertion time */ + assert_param(IS_UART_DEASSERTIONTIME(DeassertionTime)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* Enable the Driver Enable mode by setting the DEM bit in the CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_DEM); + + /* Set the Driver Enable polarity */ + MODIFY_REG(huart->Instance->CR3, USART_CR3_DEP, Polarity); + + /* Set the Driver Enable assertion and deassertion times */ + temp = (AssertionTime << UART_CR1_DEAT_ADDRESS_LSB_POS); + temp |= (DeassertionTime << UART_CR1_DEDT_ADDRESS_LSB_POS); + MODIFY_REG(huart->Instance->CR1, (USART_CR1_DEDT | USART_CR1_DEAT), temp); + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + +/** + * @} + */ + + +/** @defgroup UARTEx_Exported_Functions_Group3 Peripheral Control functions + * @brief Extended Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides the following functions: + (+) HAL_UARTEx_EnableClockStopMode() API enables the UART clock (HSI or LSE only) during stop mode + (+) HAL_UARTEx_DisableClockStopMode() API disables the above functionality + (+) HAL_MultiProcessorEx_AddressLength_Set() API optionally sets the UART node address + detection length to more than 4 bits for multiprocessor address mark wake up. +#if defined(USART_CR1_UESM) + (+) HAL_UARTEx_StopModeWakeUpSourceConfig() API defines the wake-up from stop mode + trigger: address match, Start Bit detection or RXNE bit status. + (+) HAL_UARTEx_EnableStopMode() API enables the UART to wake up the MCU from stop mode + (+) HAL_UARTEx_DisableStopMode() API disables the above functionality +#endif + +@endverbatim + * @{ + */ + +#if defined(USART_CR3_UCESM) +/** + * @brief Keep UART Clock enabled when in Stop Mode. + * @note When the USART clock source is configured to be LSE or HSI, it is possible to keep enabled + * this clock during STOP mode by setting the UCESM bit in USART_CR3 control register. + * @note When LPUART is used to wakeup from stop with LSE is selected as LPUART clock source, + * and desired baud rate is 9600 baud, the bit UCESM bit in LPUART_CR3 control register must be set. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_EnableClockStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Set UCESM bit */ + SET_BIT(huart->Instance->CR3, USART_CR3_UCESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Disable UART Clock when in Stop Mode. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_DisableClockStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Clear UCESM bit */ + CLEAR_BIT(huart->Instance->CR3, USART_CR3_UCESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +#endif /* USART_CR3_UCESM */ +/** + * @brief By default in multiprocessor mode, when the wake up method is set + * to address mark, the UART handles only 4-bit long addresses detection; + * this API allows to enable longer addresses detection (6-, 7- or 8-bit + * long). + * @note Addresses detection lengths are: 6-bit address detection in 7-bit data mode, + * 7-bit address detection in 8-bit data mode, 8-bit address detection in 9-bit data mode. + * @param huart UART handle. + * @param AddressLength This parameter can be one of the following values: + * @arg @ref UART_ADDRESS_DETECT_4B 4-bit long address + * @arg @ref UART_ADDRESS_DETECT_7B 6-, 7- or 8-bit long address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessorEx_AddressLength_Set(UART_HandleTypeDef *huart, uint32_t AddressLength) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the address length parameter */ + assert_param(IS_UART_ADDRESSLENGTH_DETECT(AddressLength)); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the address length */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADDM7, AddressLength); + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState to Ready */ + return (UART_CheckIdleState(huart)); +} + +#if defined(USART_CR1_UESM) +/** + * @brief Set Wakeup from Stop mode interrupt flag selection. + * @note It is the application responsibility to enable the interrupt used as + * usart_wkup interrupt source before entering low-power mode. + * @param huart UART handle. + * @param WakeUpSelection Address match, Start Bit detection or RXNE/RXFNE bit status. + * This parameter can be one of the following values: + * @arg @ref UART_WAKEUP_ON_ADDRESS + * @arg @ref UART_WAKEUP_ON_STARTBIT + * @arg @ref UART_WAKEUP_ON_READDATA_NONEMPTY + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_StopModeWakeUpSourceConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tickstart; + + /* check the wake-up from stop mode UART instance */ + assert_param(IS_UART_WAKEUP_FROMSTOP_INSTANCE(huart->Instance)); + /* check the wake-up selection parameter */ + assert_param(IS_UART_WAKEUP_SELECTION(WakeUpSelection.WakeUpEvent)); + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the wake-up selection scheme */ + MODIFY_REG(huart->Instance->CR3, USART_CR3_WUS, WakeUpSelection.WakeUpEvent); + + if (WakeUpSelection.WakeUpEvent == UART_WAKEUP_ON_ADDRESS) + { + UARTEx_Wakeup_AddressConfig(huart, WakeUpSelection); + } + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* Init tickstart for timeout managment*/ + tickstart = HAL_GetTick(); + + /* Wait until REACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_REACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + status = HAL_TIMEOUT; + } + else + { + /* Initialize the UART State */ + huart->gState = HAL_UART_STATE_READY; + } + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief Enable UART Stop Mode. + * @note The UART is able to wake up the MCU from Stop 1 mode as long as UART clock is HSI or LSE. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_EnableStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Set UESM bit */ + SET_BIT(huart->Instance->CR1, USART_CR1_UESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Disable UART Stop Mode. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_DisableStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Clear UESM bit */ + CLEAR_BIT(huart->Instance->CR1, USART_CR1_UESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup UARTEx_Private_Functions + * @{ + */ +#if defined(USART_CR1_UESM) + +/** + * @brief Initialize the UART wake-up from stop mode parameters when triggered by address detection. + * @param huart UART handle. + * @param WakeUpSelection UART wake up from stop mode parameters. + * @retval None + */ +static void UARTEx_Wakeup_AddressConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection) +{ + assert_param(IS_UART_ADDRESSLENGTH_DETECT(WakeUpSelection.AddressLength)); + + /* Set the USART address length */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADDM7, WakeUpSelection.AddressLength); + + /* Set the USART address node */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADD, ((uint32_t)WakeUpSelection.Address << UART_CR2_ADDRESS_LSB_POS)); +} +#endif /* USART_CR1_UESM */ + +/** + * @} + */ + +#endif /* HAL_UART_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c new file mode 100644 index 000000000..79c3479f6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_fmc.c @@ -0,0 +1,1082 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_fmc.c + * @author MCD Application Team + * @brief FMC Low Layer HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the Flexible Memory Controller (FMC) peripheral memories: + * + Initialization/de-initialization functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### FMC peripheral features ##### + ============================================================================== + [..] The Flexible memory controller (FMC) includes three memory controllers: + (+) The NOR/PSRAM memory controller + (+) The NAND memory controller + (+) The Synchronous DRAM (SDRAM) controller + + [..] The FMC functional block makes the interface with synchronous and asynchronous static + memories, SDRAM memories, and 16-bit PC memory cards. Its main purposes are: + (+) to translate AHB transactions into the appropriate external device protocol + (+) to meet the access time requirements of the external memory devices + + [..] All external memories share the addresses, data and control signals with the controller. + Each external device is accessed by means of a unique Chip Select. The FMC performs + only one access at a time to an external device. + The main features of the FMC controller are the following: + (+) Interface with static-memory mapped devices including: + (++) Static random access memory (SRAM) + (++) Read-only memory (ROM) + (++) NOR Flash memory/OneNAND Flash memory + (++) PSRAM (4 memory banks) + (++) 16-bit PC Card compatible devices + (++) Two banks of NAND Flash memory with ECC hardware to check up to 8 Kbytes of + data + (+) Interface with synchronous DRAM (SDRAM) memories + (+) Independent Chip Select control for each memory bank + (+) Independent configuration for each memory bank + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup FMC_LL FMC Low Layer + * @brief FMC driver modules + * @{ + */ + +#if defined (HAL_SRAM_MODULE_ENABLED) || defined(HAL_NOR_MODULE_ENABLED) || defined(HAL_NAND_MODULE_ENABLED) || defined(HAL_SDRAM_MODULE_ENABLED) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup FMC_LL_Exported_Functions FMC Low Layer Exported Functions + * @{ + */ + +/** @defgroup FMC_LL_Exported_Functions_NORSRAM FMC Low Layer NOR SRAM Exported Functions + * @brief NORSRAM Controller functions + * + @verbatim + ============================================================================== + ##### How to use NORSRAM device driver ##### + ============================================================================== + + [..] + This driver contains a set of APIs to interface with the FMC NORSRAM banks in order + to run the NORSRAM external devices. + + (+) FMC NORSRAM bank reset using the function FMC_NORSRAM_DeInit() + (+) FMC NORSRAM bank control configuration using the function FMC_NORSRAM_Init() + (+) FMC NORSRAM bank timing configuration using the function FMC_NORSRAM_Timing_Init() + (+) FMC NORSRAM bank extended timing configuration using the function + FMC_NORSRAM_Extended_Timing_Init() + (+) FMC NORSRAM bank enable/disable write operation using the functions + FMC_NORSRAM_WriteOperation_Enable()/FMC_NORSRAM_WriteOperation_Disable() + + +@endverbatim + * @{ + */ + +/** @defgroup FMC_LL_NORSRAM_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + ============================================================================== + ##### Initialization and de_initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the FMC NORSRAM interface + (+) De-initialize the FMC NORSRAM interface + (+) Configure the FMC clock and associated GPIOs + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the FMC_NORSRAM device according to the specified + * control parameters in the FMC_NORSRAM_InitTypeDef + * @param Device Pointer to NORSRAM device instance + * @param Init Pointer to NORSRAM Initialization structure + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_InitTypeDef* Init) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_DEVICE(Device)); + assert_param(IS_FMC_NORSRAM_BANK(Init->NSBank)); + assert_param(IS_FMC_MUX(Init->DataAddressMux)); + assert_param(IS_FMC_MEMORY(Init->MemoryType)); + assert_param(IS_FMC_NORSRAM_MEMORY_WIDTH(Init->MemoryDataWidth)); + assert_param(IS_FMC_BURSTMODE(Init->BurstAccessMode)); + assert_param(IS_FMC_WAIT_POLARITY(Init->WaitSignalPolarity)); + assert_param(IS_FMC_WAIT_SIGNAL_ACTIVE(Init->WaitSignalActive)); + assert_param(IS_FMC_WRITE_OPERATION(Init->WriteOperation)); + assert_param(IS_FMC_WAITE_SIGNAL(Init->WaitSignal)); + assert_param(IS_FMC_EXTENDED_MODE(Init->ExtendedMode)); + assert_param(IS_FMC_ASYNWAIT(Init->AsynchronousWait)); + assert_param(IS_FMC_WRITE_BURST(Init->WriteBurst)); + assert_param(IS_FMC_CONTINOUS_CLOCK(Init->ContinuousClock)); + assert_param(IS_FMC_WRITE_FIFO(Init->WriteFifo)); + assert_param(IS_FMC_PAGESIZE(Init->PageSize)); + + /* Get the BTCR register value */ + tmpr = Device->BTCR[Init->NSBank]; + + /* Clear MBKEN, MUXEN, MTYP, MWID, FACCEN, BURSTEN, WAITPOL, WAITCFG, WREN, + WAITEN, EXTMOD, ASYNCWAIT, CBURSTRW and CCLKEN bits */ + tmpr &= ((uint32_t)~(FMC_BCR1_MBKEN | FMC_BCR1_MUXEN | FMC_BCR1_MTYP | \ + FMC_BCR1_MWID | FMC_BCR1_FACCEN | FMC_BCR1_BURSTEN | \ + FMC_BCR1_WAITPOL | FMC_BCR1_CPSIZE | FMC_BCR1_WAITCFG | \ + FMC_BCR1_WREN | FMC_BCR1_WAITEN | FMC_BCR1_EXTMOD | \ + FMC_BCR1_ASYNCWAIT | FMC_BCR1_CBURSTRW | FMC_BCR1_CCLKEN | FMC_BCR1_WFDIS)); + + /* Set NORSRAM device control parameters */ + tmpr |= (uint32_t)(Init->DataAddressMux |\ + Init->MemoryType |\ + Init->MemoryDataWidth |\ + Init->BurstAccessMode |\ + Init->WaitSignalPolarity |\ + Init->WaitSignalActive |\ + Init->WriteOperation |\ + Init->WaitSignal |\ + Init->ExtendedMode |\ + Init->AsynchronousWait |\ + Init->WriteBurst |\ + Init->ContinuousClock |\ + Init->PageSize |\ + Init->WriteFifo); + + if(Init->MemoryType == FMC_MEMORY_TYPE_NOR) + { + tmpr |= (uint32_t)FMC_NORSRAM_FLASH_ACCESS_ENABLE; + } + + Device->BTCR[Init->NSBank] = tmpr; + + /* Configure synchronous mode when Continuous clock is enabled for bank2..4 */ + if((Init->ContinuousClock == FMC_CONTINUOUS_CLOCK_SYNC_ASYNC) && (Init->NSBank != FMC_NORSRAM_BANK1)) + { + Device->BTCR[FMC_NORSRAM_BANK1] |= (uint32_t)(Init->ContinuousClock); + } + if(Init->NSBank != FMC_NORSRAM_BANK1) + { + Device->BTCR[FMC_NORSRAM_BANK1] |= (uint32_t)(Init->WriteFifo); + } + + return HAL_OK; +} + + +/** + * @brief DeInitialize the FMC_NORSRAM peripheral + * @param Device Pointer to NORSRAM device instance + * @param ExDevice Pointer to NORSRAM extended mode device instance + * @param Bank NORSRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_DeInit(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_EXTENDED_TypeDef *ExDevice, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_DEVICE(Device)); + assert_param(IS_FMC_NORSRAM_EXTENDED_DEVICE(ExDevice)); + assert_param(IS_FMC_NORSRAM_BANK(Bank)); + + /* Disable the FMC_NORSRAM device */ + __FMC_NORSRAM_DISABLE(Device, Bank); + + /* De-initialize the FMC_NORSRAM device */ + /* FMC_NORSRAM_BANK1 */ + if(Bank == FMC_NORSRAM_BANK1) + { + Device->BTCR[Bank] = 0x000030DB; + } + /* FMC_NORSRAM_BANK2, FMC_NORSRAM_BANK3 or FMC_NORSRAM_BANK4 */ + else + { + Device->BTCR[Bank] = 0x000030D2; + } + + Device->BTCR[Bank + 1] = 0x0FFFFFFF; + ExDevice->BWTR[Bank] = 0x0FFFFFFF; + + return HAL_OK; +} + + +/** + * @brief Initialize the FMC_NORSRAM Timing according to the specified + * parameters in the FMC_NORSRAM_TimingTypeDef + * @param Device Pointer to NORSRAM device instance + * @param Timing Pointer to NORSRAM Timing structure + * @param Bank NORSRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_Timing_Init(FMC_NORSRAM_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_DEVICE(Device)); + assert_param(IS_FMC_ADDRESS_SETUP_TIME(Timing->AddressSetupTime)); + assert_param(IS_FMC_ADDRESS_HOLD_TIME(Timing->AddressHoldTime)); + assert_param(IS_FMC_DATASETUP_TIME(Timing->DataSetupTime)); + assert_param(IS_FMC_TURNAROUND_TIME(Timing->BusTurnAroundDuration)); + assert_param(IS_FMC_CLK_DIV(Timing->CLKDivision)); + assert_param(IS_FMC_DATA_LATENCY(Timing->DataLatency)); + assert_param(IS_FMC_ACCESS_MODE(Timing->AccessMode)); + assert_param(IS_FMC_NORSRAM_BANK(Bank)); + + /* Get the BTCR register value */ + tmpr = Device->BTCR[Bank + 1]; + + /* Clear ADDSET, ADDHLD, DATAST, BUSTURN, CLKDIV, DATLAT and ACCMOD bits */ + tmpr &= ((uint32_t)~(FMC_BTR1_ADDSET | FMC_BTR1_ADDHLD | FMC_BTR1_DATAST | \ + FMC_BTR1_BUSTURN | FMC_BTR1_CLKDIV | FMC_BTR1_DATLAT | \ + FMC_BTR1_ACCMOD)); + + /* Set FMC_NORSRAM device timing parameters */ + tmpr |= (uint32_t)(Timing->AddressSetupTime |\ + ((Timing->AddressHoldTime) << 4) |\ + ((Timing->DataSetupTime) << 8) |\ + ((Timing->BusTurnAroundDuration) << 16) |\ + (((Timing->CLKDivision)-1) << 20) |\ + (((Timing->DataLatency)-2) << 24) |\ + (Timing->AccessMode) + ); + + Device->BTCR[Bank + 1] = tmpr; + + /* Configure Clock division value (in NORSRAM bank 1) when continuous clock is enabled */ + if(HAL_IS_BIT_SET(Device->BTCR[FMC_NORSRAM_BANK1], FMC_BCR1_CCLKEN)) + { + tmpr = (uint32_t)(Device->BTCR[FMC_NORSRAM_BANK1 + 1] & ~(((uint32_t)0x0F) << 20)); + tmpr |= (uint32_t)(((Timing->CLKDivision)-1) << 20); + Device->BTCR[FMC_NORSRAM_BANK1 + 1] = tmpr; + } + + return HAL_OK; +} + +/** + * @brief Initialize the FMC_NORSRAM Extended mode Timing according to the specified + * parameters in the FMC_NORSRAM_TimingTypeDef + * @param Device Pointer to NORSRAM device instance + * @param Timing Pointer to NORSRAM Timing structure + * @param Bank NORSRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_Extended_Timing_Init(FMC_NORSRAM_EXTENDED_TypeDef *Device, FMC_NORSRAM_TimingTypeDef *Timing, uint32_t Bank, uint32_t ExtendedMode) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_EXTENDED_MODE(ExtendedMode)); + + /* Set NORSRAM device timing register for write configuration, if extended mode is used */ + if(ExtendedMode == FMC_EXTENDED_MODE_ENABLE) + { + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_EXTENDED_DEVICE(Device)); + assert_param(IS_FMC_ADDRESS_SETUP_TIME(Timing->AddressSetupTime)); + assert_param(IS_FMC_ADDRESS_HOLD_TIME(Timing->AddressHoldTime)); + assert_param(IS_FMC_DATASETUP_TIME(Timing->DataSetupTime)); + assert_param(IS_FMC_TURNAROUND_TIME(Timing->BusTurnAroundDuration)); + assert_param(IS_FMC_CLK_DIV(Timing->CLKDivision)); + assert_param(IS_FMC_DATA_LATENCY(Timing->DataLatency)); + assert_param(IS_FMC_ACCESS_MODE(Timing->AccessMode)); + assert_param(IS_FMC_NORSRAM_BANK(Bank)); + + /* Get the BWTR register value */ + tmpr = Device->BWTR[Bank]; + + /* Clear ADDSET, ADDHLD, DATAST, BUSTURN, CLKDIV, DATLAT and ACCMOD bits */ + tmpr &= ((uint32_t)~(FMC_BWTR1_ADDSET | FMC_BWTR1_ADDHLD | FMC_BWTR1_DATAST | \ + FMC_BWTR1_BUSTURN | FMC_BWTR1_ACCMOD)); + + tmpr |= (uint32_t)(Timing->AddressSetupTime |\ + ((Timing->AddressHoldTime) << 4) |\ + ((Timing->DataSetupTime) << 8) |\ + ((Timing->BusTurnAroundDuration) << 16) |\ + (Timing->AccessMode)); + + Device->BWTR[Bank] = tmpr; + } + else + { + Device->BWTR[Bank] = 0x0FFFFFFF; + } + + return HAL_OK; +} +/** + * @} + */ + +/** @addtogroup FMC_LL_NORSRAM_Private_Functions_Group2 + * @brief management functions + * +@verbatim + ============================================================================== + ##### FMC_NORSRAM Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control dynamically + the FMC NORSRAM interface. + +@endverbatim + * @{ + */ + +/** + * @brief Enables dynamically FMC_NORSRAM write operation. + * @param Device Pointer to NORSRAM device instance + * @param Bank NORSRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Enable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_DEVICE(Device)); + assert_param(IS_FMC_NORSRAM_BANK(Bank)); + + /* Enable write operation */ + Device->BTCR[Bank] |= FMC_WRITE_OPERATION_ENABLE; + + return HAL_OK; +} + +/** + * @brief Disables dynamically FMC_NORSRAM write operation. + * @param Device Pointer to NORSRAM device instance + * @param Bank NORSRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NORSRAM_WriteOperation_Disable(FMC_NORSRAM_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NORSRAM_DEVICE(Device)); + assert_param(IS_FMC_NORSRAM_BANK(Bank)); + + /* Disable write operation */ + Device->BTCR[Bank] &= ~FMC_WRITE_OPERATION_ENABLE; + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup FMC_LL_Exported_Functions_NAND FMC Low Layer NAND Exported Functions + * @brief NAND Controller functions + * + @verbatim + ============================================================================== + ##### How to use NAND device driver ##### + ============================================================================== + [..] + This driver contains a set of APIs to interface with the FMC NAND banks in order + to run the NAND external devices. + + (+) FMC NAND bank reset using the function FMC_NAND_DeInit() + (+) FMC NAND bank control configuration using the function FMC_NAND_Init() + (+) FMC NAND bank common space timing configuration using the function + FMC_NAND_CommonSpace_Timing_Init() + (+) FMC NAND bank attribute space timing configuration using the function + FMC_NAND_AttributeSpace_Timing_Init() + (+) FMC NAND bank enable/disable ECC correction feature using the functions + FMC_NAND_ECC_Enable()/FMC_NAND_ECC_Disable() + (+) FMC NAND bank get ECC correction code using the function FMC_NAND_GetECC() + +@endverbatim + * @{ + */ + +/** @defgroup FMC_LL_NAND_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de_initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the FMC NAND interface + (+) De-initialize the FMC NAND interface + (+) Configure the FMC clock and associated GPIOs + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the FMC_NAND device according to the specified + * control parameters in the FMC_NAND_HandleTypeDef + * @param Device Pointer to NAND device instance + * @param Init Pointer to NAND Initialization structure + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_Init(FMC_NAND_TypeDef *Device, FMC_NAND_InitTypeDef *Init) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_NAND_BANK(Init->NandBank)); + assert_param(IS_FMC_WAIT_FEATURE(Init->Waitfeature)); + assert_param(IS_FMC_NAND_MEMORY_WIDTH(Init->MemoryDataWidth)); + assert_param(IS_FMC_ECC_STATE(Init->EccComputation)); + assert_param(IS_FMC_ECCPAGE_SIZE(Init->ECCPageSize)); + assert_param(IS_FMC_TCLR_TIME(Init->TCLRSetupTime)); + assert_param(IS_FMC_TAR_TIME(Init->TARSetupTime)); + + /* Get the NAND bank 3 register value */ + tmpr = Device->PCR; + + /* Clear PWAITEN, PBKEN, PTYP, PWID, ECCEN, TCLR, TAR and ECCPS bits */ + tmpr &= ((uint32_t)~(FMC_PCR_PWAITEN | FMC_PCR_PBKEN | FMC_PCR_PTYP | \ + FMC_PCR_PWID | FMC_PCR_ECCEN | FMC_PCR_TCLR | \ + FMC_PCR_TAR | FMC_PCR_ECCPS)); + /* Set NAND device control parameters */ + tmpr |= (uint32_t)(Init->Waitfeature |\ + FMC_PCR_MEMORY_TYPE_NAND |\ + Init->MemoryDataWidth |\ + Init->EccComputation |\ + Init->ECCPageSize |\ + ((Init->TCLRSetupTime) << 9) |\ + ((Init->TARSetupTime) << 13)); + + /* NAND bank 3 registers configuration */ + Device->PCR = tmpr; + + return HAL_OK; + +} + +/** + * @brief Initializes the FMC_NAND Common space Timing according to the specified + * parameters in the FMC_NAND_PCC_TimingTypeDef + * @param Device Pointer to NAND device instance + * @param Timing Pointer to NAND timing structure + * @param Bank NAND bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_CommonSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_SETUP_TIME(Timing->SetupTime)); + assert_param(IS_FMC_WAIT_TIME(Timing->WaitSetupTime)); + assert_param(IS_FMC_HOLD_TIME(Timing->HoldSetupTime)); + assert_param(IS_FMC_HIZ_TIME(Timing->HiZSetupTime)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Get the NAND bank 3 register value */ + tmpr = Device->PMEM; + + /* Clear MEMSETx, MEMWAITx, MEMHOLDx and MEMHIZx bits */ + tmpr &= ((uint32_t)~(FMC_PMEM_MEMSET3 | FMC_PMEM_MEMWAIT3 | FMC_PMEM_MEMHOLD3 | \ + FMC_PMEM_MEMHIZ3)); + /* Set FMC_NAND device timing parameters */ + tmpr |= (uint32_t)(Timing->SetupTime |\ + ((Timing->WaitSetupTime) << 8) |\ + ((Timing->HoldSetupTime) << 16) |\ + ((Timing->HiZSetupTime) << 24) + ); + + /* NAND bank 3 registers configuration */ + Device->PMEM = tmpr; + + return HAL_OK; +} + +/** + * @brief Initializes the FMC_NAND Attribute space Timing according to the specified + * parameters in the FMC_NAND_PCC_TimingTypeDef + * @param Device Pointer to NAND device instance + * @param Timing Pointer to NAND timing structure + * @param Bank NAND bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_AttributeSpace_Timing_Init(FMC_NAND_TypeDef *Device, FMC_NAND_PCC_TimingTypeDef *Timing, uint32_t Bank) +{ + uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_SETUP_TIME(Timing->SetupTime)); + assert_param(IS_FMC_WAIT_TIME(Timing->WaitSetupTime)); + assert_param(IS_FMC_HOLD_TIME(Timing->HoldSetupTime)); + assert_param(IS_FMC_HIZ_TIME(Timing->HiZSetupTime)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Get the NAND bank 3 register value */ + tmpr = Device->PATT; + + /* Clear ATTSETx, ATTWAITx, ATTHOLDx and ATTHIZx bits */ + tmpr &= ((uint32_t)~(FMC_PATT_ATTSET3 | FMC_PATT_ATTWAIT3 | FMC_PATT_ATTHOLD3 | \ + FMC_PATT_ATTHIZ3)); + /* Set FMC_NAND device timing parameters */ + tmpr |= (uint32_t)(Timing->SetupTime |\ + ((Timing->WaitSetupTime) << 8) |\ + ((Timing->HoldSetupTime) << 16) |\ + ((Timing->HiZSetupTime) << 24)); + + /* NAND bank 3 registers configuration */ + Device->PATT = tmpr; + + return HAL_OK; +} + +/** + * @brief DeInitializes the FMC_NAND device + * @param Device Pointer to NAND device instance + * @param Bank NAND bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_DeInit(FMC_NAND_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Disable the NAND Bank */ + __FMC_NAND_DISABLE(Device); + + /* Set the FMC_NAND_BANK3 registers to their reset values */ + Device->PCR = 0x00000018U; + Device->SR = 0x00000040U; + Device->PMEM = 0xFCFCFCFCU; + Device->PATT = 0xFCFCFCFCU; + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup HAL_FMC_NAND_Group3 Control functions + * @brief management functions + * +@verbatim + ============================================================================== + ##### FMC_NAND Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control dynamically + the FMC NAND interface. + +@endverbatim + * @{ + */ + + +/** + * @brief Enables dynamically FMC_NAND ECC feature. + * @param Device Pointer to NAND device instance + * @param Bank NAND bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_ECC_Enable(FMC_NAND_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Enable ECC feature */ + Device->PCR |= FMC_PCR_ECCEN; + + return HAL_OK; +} + + +/** + * @brief Disables dynamically FMC_NAND ECC feature. + * @param Device Pointer to NAND device instance + * @param Bank NAND bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_ECC_Disable(FMC_NAND_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Disable ECC feature */ + Device->PCR &= ~FMC_PCR_ECCEN; + + return HAL_OK; +} + +/** + * @brief Disables dynamically FMC_NAND ECC feature. + * @param Device Pointer to NAND device instance + * @param ECCval Pointer to ECC value + * @param Bank NAND bank number + * @param Timeout Timeout wait value + * @retval HAL status + */ +HAL_StatusTypeDef FMC_NAND_GetECC(FMC_NAND_TypeDef *Device, uint32_t *ECCval, uint32_t Bank, uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_FMC_NAND_DEVICE(Device)); + assert_param(IS_FMC_NAND_BANK(Bank)); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until FIFO is empty */ + while(__FMC_NAND_GET_FLAG(Device, Bank, FMC_FLAG_FEMPT) == RESET) + { + /* Check for the Timeout */ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + return HAL_TIMEOUT; + } + } + } + + /* Get the ECCR register value */ + *ECCval = (uint32_t)Device->ECCR; + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup FMC_LL_SDRAM + * @brief SDRAM Controller functions + * + @verbatim + ============================================================================== + ##### How to use SDRAM device driver ##### + ============================================================================== + [..] + This driver contains a set of APIs to interface with the FMC SDRAM banks in order + to run the SDRAM external devices. + + (+) FMC SDRAM bank reset using the function FMC_SDRAM_DeInit() + (+) FMC SDRAM bank control configuration using the function FMC_SDRAM_Init() + (+) FMC SDRAM bank timing configuration using the function FMC_SDRAM_Timing_Init() + (+) FMC SDRAM bank enable/disable write operation using the functions + FMC_SDRAM_WriteOperation_Enable()/FMC_SDRAM_WriteOperation_Disable() + (+) FMC SDRAM bank send command using the function FMC_SDRAM_SendCommand() + +@endverbatim + * @{ + */ + +/** @addtogroup FMC_LL_SDRAM_Private_Functions_Group1 + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de_initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the FMC SDRAM interface + (+) De-initialize the FMC SDRAM interface + (+) Configure the FMC clock and associated GPIOs + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the FMC_SDRAM device according to the specified + * control parameters in the FMC_SDRAM_InitTypeDef + * @param Device Pointer to SDRAM device instance + * @param Init Pointer to SDRAM Initialization structure + * @retval HAL status + */ +HAL_StatusTypeDef FMC_SDRAM_Init(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_InitTypeDef *Init) +{ + uint32_t tmpr1 = 0; + uint32_t tmpr2 = 0; + + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_SDRAM_BANK(Init->SDBank)); + assert_param(IS_FMC_COLUMNBITS_NUMBER(Init->ColumnBitsNumber)); + assert_param(IS_FMC_ROWBITS_NUMBER(Init->RowBitsNumber)); + assert_param(IS_FMC_SDMEMORY_WIDTH(Init->MemoryDataWidth)); + assert_param(IS_FMC_INTERNALBANK_NUMBER(Init->InternalBankNumber)); + assert_param(IS_FMC_CAS_LATENCY(Init->CASLatency)); + assert_param(IS_FMC_WRITE_PROTECTION(Init->WriteProtection)); + assert_param(IS_FMC_SDCLOCK_PERIOD(Init->SDClockPeriod)); + assert_param(IS_FMC_READ_BURST(Init->ReadBurst)); + assert_param(IS_FMC_READPIPE_DELAY(Init->ReadPipeDelay)); + + /* Set SDRAM bank configuration parameters */ + if (Init->SDBank != FMC_SDRAM_BANK2) + { + tmpr1 = Device->SDCR[FMC_SDRAM_BANK1]; + + /* Clear NC, NR, MWID, NB, CAS, WP, SDCLK, RBURST, and RPIPE bits */ + tmpr1 &= ((uint32_t)~(FMC_SDCR1_NC | FMC_SDCR1_NR | FMC_SDCR1_MWID | \ + FMC_SDCR1_NB | FMC_SDCR1_CAS | FMC_SDCR1_WP | \ + FMC_SDCR1_SDCLK | FMC_SDCR1_RBURST | FMC_SDCR1_RPIPE)); + + tmpr1 |= (uint32_t)(Init->ColumnBitsNumber |\ + Init->RowBitsNumber |\ + Init->MemoryDataWidth |\ + Init->InternalBankNumber |\ + Init->CASLatency |\ + Init->WriteProtection |\ + Init->SDClockPeriod |\ + Init->ReadBurst |\ + Init->ReadPipeDelay + ); + Device->SDCR[FMC_SDRAM_BANK1] = tmpr1; + } + else /* FMC_Bank2_SDRAM */ + { + tmpr1 = Device->SDCR[FMC_SDRAM_BANK1]; + + /* Clear SDCLK, RBURST, and RPIPE bits */ + tmpr1 &= ((uint32_t)~(FMC_SDCR1_SDCLK | FMC_SDCR1_RBURST | FMC_SDCR1_RPIPE)); + + tmpr1 |= (uint32_t)(Init->SDClockPeriod |\ + Init->ReadBurst |\ + Init->ReadPipeDelay); + + tmpr2 = Device->SDCR[FMC_SDRAM_BANK2]; + + /* Clear NC, NR, MWID, NB, CAS, WP, SDCLK, RBURST, and RPIPE bits */ + tmpr2 &= ((uint32_t)~(FMC_SDCR1_NC | FMC_SDCR1_NR | FMC_SDCR1_MWID | \ + FMC_SDCR1_NB | FMC_SDCR1_CAS | FMC_SDCR1_WP | \ + FMC_SDCR1_SDCLK | FMC_SDCR1_RBURST | FMC_SDCR1_RPIPE)); + + tmpr2 |= (uint32_t)(Init->ColumnBitsNumber |\ + Init->RowBitsNumber |\ + Init->MemoryDataWidth |\ + Init->InternalBankNumber |\ + Init->CASLatency |\ + Init->WriteProtection); + + Device->SDCR[FMC_SDRAM_BANK1] = tmpr1; + Device->SDCR[FMC_SDRAM_BANK2] = tmpr2; + } + + return HAL_OK; +} + + +/** + * @brief Initializes the FMC_SDRAM device timing according to the specified + * parameters in the FMC_SDRAM_TimingTypeDef + * @param Device Pointer to SDRAM device instance + * @param Timing Pointer to SDRAM Timing structure + * @param Bank SDRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_SDRAM_Timing_Init(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_TimingTypeDef *Timing, uint32_t Bank) +{ + uint32_t tmpr1 = 0; + uint32_t tmpr2 = 0; + + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_LOADTOACTIVE_DELAY(Timing->LoadToActiveDelay)); + assert_param(IS_FMC_EXITSELFREFRESH_DELAY(Timing->ExitSelfRefreshDelay)); + assert_param(IS_FMC_SELFREFRESH_TIME(Timing->SelfRefreshTime)); + assert_param(IS_FMC_ROWCYCLE_DELAY(Timing->RowCycleDelay)); + assert_param(IS_FMC_WRITE_RECOVERY_TIME(Timing->WriteRecoveryTime)); + assert_param(IS_FMC_RP_DELAY(Timing->RPDelay)); + assert_param(IS_FMC_RCD_DELAY(Timing->RCDDelay)); + assert_param(IS_FMC_SDRAM_BANK(Bank)); + + /* Set SDRAM device timing parameters */ + if (Bank != FMC_SDRAM_BANK2) + { + tmpr1 = Device->SDTR[FMC_SDRAM_BANK1]; + + /* Clear TMRD, TXSR, TRAS, TRC, TWR, TRP and TRCD bits */ + tmpr1 &= ((uint32_t)~(FMC_SDTR1_TMRD | FMC_SDTR1_TXSR | FMC_SDTR1_TRAS | \ + FMC_SDTR1_TRC | FMC_SDTR1_TWR | FMC_SDTR1_TRP | \ + FMC_SDTR1_TRCD)); + + tmpr1 |= (uint32_t)(((Timing->LoadToActiveDelay)-1) |\ + (((Timing->ExitSelfRefreshDelay)-1) << 4) |\ + (((Timing->SelfRefreshTime)-1) << 8) |\ + (((Timing->RowCycleDelay)-1) << 12) |\ + (((Timing->WriteRecoveryTime)-1) <<16) |\ + (((Timing->RPDelay)-1) << 20) |\ + (((Timing->RCDDelay)-1) << 24)); + Device->SDTR[FMC_SDRAM_BANK1] = tmpr1; + } + else /* FMC_Bank2_SDRAM */ + { + tmpr1 = Device->SDTR[FMC_SDRAM_BANK1]; + + /* Clear TRC and TRP bits */ + tmpr1 &= ((uint32_t)~(FMC_SDTR1_TRC | FMC_SDTR1_TRP)); + + tmpr1 |= (uint32_t)((((Timing->RowCycleDelay)-1) << 12) |\ + (((Timing->RPDelay)-1) << 20)); + + tmpr2 = Device->SDTR[FMC_SDRAM_BANK2]; + + /* Clear TMRD, TXSR, TRAS, TRC, TWR, TRP and TRCD bits */ + tmpr2 &= ((uint32_t)~(FMC_SDTR1_TMRD | FMC_SDTR1_TXSR | FMC_SDTR1_TRAS | \ + FMC_SDTR1_TRC | FMC_SDTR1_TWR | FMC_SDTR1_TRP | \ + FMC_SDTR1_TRCD)); + + tmpr2 |= (uint32_t)(((Timing->LoadToActiveDelay)-1) |\ + (((Timing->ExitSelfRefreshDelay)-1) << 4) |\ + (((Timing->SelfRefreshTime)-1) << 8) |\ + (((Timing->WriteRecoveryTime)-1) <<16) |\ + (((Timing->RCDDelay)-1) << 24)); + + Device->SDTR[FMC_SDRAM_BANK1] = tmpr1; + Device->SDTR[FMC_SDRAM_BANK2] = tmpr2; + } + + return HAL_OK; +} + +/** + * @brief DeInitializes the FMC_SDRAM peripheral + * @param Device Pointer to SDRAM device instance + * @retval HAL status + */ +HAL_StatusTypeDef FMC_SDRAM_DeInit(FMC_SDRAM_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_SDRAM_BANK(Bank)); + + /* De-initialize the SDRAM device */ + Device->SDCR[Bank] = 0x000002D0; + Device->SDTR[Bank] = 0x0FFFFFFF; + Device->SDCMR = 0x00000000; + Device->SDRTR = 0x00000000; + Device->SDSR = 0x00000000; + + return HAL_OK; +} + +/** + * @} + */ + +/** @addtogroup FMC_LL_SDRAMPrivate_Functions_Group2 + * @brief management functions + * +@verbatim + ============================================================================== + ##### FMC_SDRAM Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control dynamically + the FMC SDRAM interface. + +@endverbatim + * @{ + */ + +/** + * @brief Enables dynamically FMC_SDRAM write protection. + * @param Device Pointer to SDRAM device instance + * @param Bank SDRAM bank number + * @retval HAL status + */ +HAL_StatusTypeDef FMC_SDRAM_WriteProtection_Enable(FMC_SDRAM_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_SDRAM_BANK(Bank)); + + /* Enable write protection */ + Device->SDCR[Bank] |= FMC_SDRAM_WRITE_PROTECTION_ENABLE; + + return HAL_OK; +} + +/** + * @brief Disables dynamically FMC_SDRAM write protection. + * @param hsdram FMC_SDRAM handle + * @retval HAL status + */ +HAL_StatusTypeDef FMC_SDRAM_WriteProtection_Disable(FMC_SDRAM_TypeDef *Device, uint32_t Bank) +{ + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_SDRAM_BANK(Bank)); + + /* Disable write protection */ + Device->SDCR[Bank] &= ~FMC_SDRAM_WRITE_PROTECTION_ENABLE; + + return HAL_OK; +} + +/** + * @brief Send Command to the FMC SDRAM bank + * @param Device Pointer to SDRAM device instance + * @param Command Pointer to SDRAM command structure + * @param Timing Pointer to SDRAM Timing structure + * @param Timeout Timeout wait value + * @retval HAL state + */ +HAL_StatusTypeDef FMC_SDRAM_SendCommand(FMC_SDRAM_TypeDef *Device, FMC_SDRAM_CommandTypeDef *Command, uint32_t Timeout) +{ + __IO uint32_t tmpr = 0; + + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_COMMAND_MODE(Command->CommandMode)); + assert_param(IS_FMC_COMMAND_TARGET(Command->CommandTarget)); + assert_param(IS_FMC_AUTOREFRESH_NUMBER(Command->AutoRefreshNumber)); + assert_param(IS_FMC_MODE_REGISTER(Command->ModeRegisterDefinition)); + + /* Set command register */ + tmpr = (uint32_t)((Command->CommandMode) |\ + (Command->CommandTarget) |\ + (((Command->AutoRefreshNumber)-1) << 5) |\ + ((Command->ModeRegisterDefinition) << 9) + ); + + Device->SDCMR = tmpr; + + return HAL_OK; +} + +/** + * @brief Program the SDRAM Memory Refresh rate. + * @param Device Pointer to SDRAM device instance + * @param RefreshRate The SDRAM refresh rate value. + * @retval HAL state + */ +HAL_StatusTypeDef FMC_SDRAM_ProgramRefreshRate(FMC_SDRAM_TypeDef *Device, uint32_t RefreshRate) +{ + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_REFRESH_RATE(RefreshRate)); + + /* Set the refresh rate in command register */ + Device->SDRTR |= (RefreshRate<<1); + + return HAL_OK; +} + +/** + * @brief Set the Number of consecutive SDRAM Memory auto Refresh commands. + * @param Device Pointer to SDRAM device instance + * @param AutoRefreshNumber Specifies the auto Refresh number. + * @retval None + */ +HAL_StatusTypeDef FMC_SDRAM_SetAutoRefreshNumber(FMC_SDRAM_TypeDef *Device, uint32_t AutoRefreshNumber) +{ + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_AUTOREFRESH_NUMBER(AutoRefreshNumber)); + + /* Set the Auto-refresh number in command register */ + Device->SDCMR |= (AutoRefreshNumber << 5); + + return HAL_OK; +} + +/** + * @brief Returns the indicated FMC SDRAM bank mode status. + * @param Device Pointer to SDRAM device instance + * @param Bank Defines the FMC SDRAM bank. This parameter can be + * FMC_Bank1_SDRAM or FMC_Bank2_SDRAM. + * @retval The FMC SDRAM bank mode status, could be on of the following values: + * FMC_SDRAM_NORMAL_MODE, FMC_SDRAM_SELF_REFRESH_MODE or + * FMC_SDRAM_POWER_DOWN_MODE. + */ +uint32_t FMC_SDRAM_GetModeStatus(FMC_SDRAM_TypeDef *Device, uint32_t Bank) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_FMC_SDRAM_DEVICE(Device)); + assert_param(IS_FMC_SDRAM_BANK(Bank)); + + /* Get the corresponding bank mode */ + if(Bank == FMC_SDRAM_BANK1) + { + tmpreg = (uint32_t)(Device->SDSR & FMC_SDSR_MODES1); + } + else + { + tmpreg = ((uint32_t)(Device->SDSR & FMC_SDSR_MODES2) >> 2); + } + + /* Return the mode status */ + return tmpreg; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* HAL_SRAM_MODULE_ENABLED || HAL_NOR_MODULE_ENABLED || HAL_NAND_MODULE_ENABLED || HAL_SDRAM_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c new file mode 100644 index 000000000..3cd5b1f59 --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_sdmmc.c @@ -0,0 +1,1521 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_sdmmc.c + * @author MCD Application Team + * @brief SDMMC Low Layer HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the SDMMC peripheral: + * + Initialization/de-initialization functions + * + I/O operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### SDMMC peripheral features ##### + ============================================================================== + [..] The SD/SDMMC MMC card host interface (SDMMC) provides an interface between the AHB + peripheral bus and MultiMedia cards (MMCs), SD memory cards, SDMMC cards and CE-ATA + devices. + + [..] The SDMMC features include the following: + (+) Full compliance with MultiMedia Card System Specification Version 4.2. Card support + for three different databus modes: 1-bit (default), 4-bit and 8-bit + (+) Full compatibility with previous versions of MultiMedia Cards (forward compatibility) + (+) Full compliance with SD Memory Card Specifications Version 2.0 + (+) Full compliance with SD I/O Card Specification Version 2.0: card support for two + different data bus modes: 1-bit (default) and 4-bit + (+) Full support of the CE-ATA features (full compliance with CE-ATA digital protocol + Rev1.1) + (+) Data transfer up to 48 MHz for the 8 bit mode + (+) Data and command output enable signals to control external bidirectional drivers + + ##### How to use this driver ##### + ============================================================================== + [..] + This driver is a considered as a driver of service for external devices drivers + that interfaces with the SDMMC peripheral. + According to the device used (SD card/ MMC card / SDMMC card ...), a set of APIs + is used in the device's driver to perform SDMMC operations and functionalities. + + This driver is almost transparent for the final user, it is only used to implement other + functionalities of the external device. + + [..] + (+) The SDMMC clock (SDMMCCLK = 48 MHz) is coming from a specific output (MSI, PLLUSB1CLK, + PLLUSB2CLK). Before start working with SDMMC peripheral make sure that the + PLL is well configured. + The SDMMC peripheral uses two clock signals: + (++) SDMMC adapter clock (SDMMCCLK = 48 MHz) + (++) APB2 bus clock (PCLK2) + + -@@- PCLK2 and SDMMC_CK clock frequencies must respect the following condition: + Frequency(PCLK2) >= (3 / 8 x Frequency(SDMMC_CK)) + + (+) Enable/Disable peripheral clock using RCC peripheral macros related to SDMMC + peripheral. + + (+) Enable the Power ON State using the SDMMC_PowerState_ON() + function and disable it using the function SDMMC_PowerState_OFF(). + + (+) Enable/Disable the clock using the __SDMMC_ENABLE()/__SDMMC_DISABLE() macros. + + (+) Enable/Disable the peripheral interrupts using the macros __SDMMC_ENABLE_IT() + and __SDMMC_DISABLE_IT() if you need to use interrupt mode. + + (+) When using the DMA mode + (++) Configure the DMA in the MSP layer of the external device + (++) Active the needed channel Request + (++) Enable the DMA using __SDMMC_DMA_ENABLE() macro or Disable it using the macro + __SDMMC_DMA_DISABLE(). + + (+) To control the CPSM (Command Path State Machine) and send + commands to the card use the SDMMC_SendCommand(), + SDMMC_GetCommandResponse() and SDMMC_GetResponse() functions. First, user has + to fill the command structure (pointer to SDMMC_CmdInitTypeDef) according + to the selected command to be sent. + The parameters that should be filled are: + (++) Command Argument + (++) Command Index + (++) Command Response type + (++) Command Wait + (++) CPSM Status (Enable or Disable). + + -@@- To check if the command is well received, read the SDMMC_CMDRESP + register using the SDMMC_GetCommandResponse(). + The SDMMC responses registers (SDMMC_RESP1 to SDMMC_RESP2), use the + SDMMC_GetResponse() function. + + (+) To control the DPSM (Data Path State Machine) and send/receive + data to/from the card use the SDMMC_DataConfig(), SDMMC_GetDataCounter(), + SDMMC_ReadFIFO(), SDMMC_WriteFIFO() and SDMMC_GetFIFOCount() functions. + + *** Read Operations *** + ======================= + [..] + (#) First, user has to fill the data structure (pointer to + SDMMC_DataInitTypeDef) according to the selected data type to be received. + The parameters that should be filled are: + (++) Data TimeOut + (++) Data Length + (++) Data Block size + (++) Data Transfer direction: should be from card (To SDMMC) + (++) Data Transfer mode + (++) DPSM Status (Enable or Disable) + + (#) Configure the SDMMC resources to receive the data from the card + according to selected transfer mode (Refer to Step 8, 9 and 10). + + (#) Send the selected Read command (refer to step 11). + + (#) Use the SDMMC flags/interrupts to check the transfer status. + + *** Write Operations *** + ======================== + [..] + (#) First, user has to fill the data structure (pointer to + SDMMC_DataInitTypeDef) according to the selected data type to be received. + The parameters that should be filled are: + (++) Data TimeOut + (++) Data Length + (++) Data Block size + (++) Data Transfer direction: should be to card (To CARD) + (++) Data Transfer mode + (++) DPSM Status (Enable or Disable) + + (#) Configure the SDMMC resources to send the data to the card according to + selected transfer mode. + + (#) Send the selected Write command. + + (#) Use the SDMMC flags/interrupts to check the transfer status. + + *** Command management operations *** + ===================================== + [..] + (#) The commands used for Read/Write/Erase operations are managed in + separate functions. + Each function allows to send the needed command with the related argument, + then check the response. + By the same approach, you could implement a command and check the response. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +#if defined(SDMMC1) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup SDMMC_LL SDMMC Low Layer + * @brief Low layer module for SD + * @{ + */ + +#if defined(HAL_SD_MODULE_ENABLED) || defined(HAL_MMC_MODULE_ENABLED) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static uint32_t SDMMC_GetCmdError(SDMMC_TypeDef *SDMMCx); +static uint32_t SDMMC_GetCmdResp1(SDMMC_TypeDef *SDMMCx, uint8_t SD_CMD, uint32_t Timeout); +static uint32_t SDMMC_GetCmdResp2(SDMMC_TypeDef *SDMMCx); +static uint32_t SDMMC_GetCmdResp3(SDMMC_TypeDef *SDMMCx); +static uint32_t SDMMC_GetCmdResp7(SDMMC_TypeDef *SDMMCx); +static uint32_t SDMMC_GetCmdResp6(SDMMC_TypeDef *SDMMCx, uint8_t SD_CMD, uint16_t *pRCA); + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup SDMMC_LL_Exported_Functions SDMMC Low Layer Exported Functions + * @{ + */ + +/** @defgroup HAL_SDMMC_LL_Group1 Initialization de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization/de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the SDMMC according to the specified + * parameters in the SDMMC_InitTypeDef and create the associated handle. + * @param SDMMCx: Pointer to SDMMC register base + * @param Init: SDMMC initialization structure + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_Init(SDMMC_TypeDef *SDMMCx, SDMMC_InitTypeDef Init) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_SDMMC_ALL_INSTANCE(SDMMCx)); + assert_param(IS_SDMMC_CLOCK_EDGE(Init.ClockEdge)); + assert_param(IS_SDMMC_CLOCK_BYPASS(Init.ClockBypass)); + assert_param(IS_SDMMC_CLOCK_POWER_SAVE(Init.ClockPowerSave)); + assert_param(IS_SDMMC_BUS_WIDE(Init.BusWide)); + assert_param(IS_SDMMC_HARDWARE_FLOW_CONTROL(Init.HardwareFlowControl)); + assert_param(IS_SDMMC_CLKDIV(Init.ClockDiv)); + + /* Set SDMMC configuration parameters */ + tmpreg |= (Init.ClockEdge |\ + Init.ClockBypass |\ + Init.ClockPowerSave |\ + Init.BusWide |\ + Init.HardwareFlowControl |\ + Init.ClockDiv + ); + + /* Write to SDMMC CLKCR */ + MODIFY_REG(SDMMCx->CLKCR, CLKCR_CLEAR_MASK, tmpreg); + + return HAL_OK; +} + + +/** + * @} + */ + +/** @defgroup HAL_SDMMC_LL_Group2 IO operation functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### I/O operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the SDMMC data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Read data (word) from Rx FIFO in blocking mode (polling) + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_ReadFIFO(SDMMC_TypeDef *SDMMCx) +{ + /* Read data from Rx FIFO */ + return (SDMMCx->FIFO); +} + +/** + * @brief Write data (word) to Tx FIFO in blocking mode (polling) + * @param SDMMCx: Pointer to SDMMC register base + * @param pWriteData: pointer to data to write + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_WriteFIFO(SDMMC_TypeDef *SDMMCx, uint32_t *pWriteData) +{ + /* Write data to FIFO */ + SDMMCx->FIFO = *pWriteData; + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup HAL_SDMMC_LL_Group3 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the SDMMC data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Set SDMMC Power state to ON. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_PowerState_ON(SDMMC_TypeDef *SDMMCx) +{ + /* Set power state to ON */ + SDMMCx->POWER = SDMMC_POWER_PWRCTRL; + + /* 1ms: required power up waiting time before starting the SD initialization + sequence */ + HAL_Delay(2); + + return HAL_OK; +} + +/** + * @brief Set SDMMC Power state to OFF. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_PowerState_OFF(SDMMC_TypeDef *SDMMCx) +{ + /* Set power state to OFF */ + SDMMCx->POWER = (uint32_t)0x00000000; + + return HAL_OK; +} + +/** + * @brief Get SDMMC Power state. + * @param SDMMCx: Pointer to SDMMC register base + * @retval Power status of the controller. The returned value can be one of the + * following values: + * - 0x00: Power OFF + * - 0x02: Power UP + * - 0x03: Power ON + */ +uint32_t SDMMC_GetPowerState(SDMMC_TypeDef *SDMMCx) +{ + return (SDMMCx->POWER & SDMMC_POWER_PWRCTRL); +} + +/** + * @brief Configure the SDMMC command path according to the specified parameters in + * SDMMC_CmdInitTypeDef structure and send the command + * @param SDMMCx: Pointer to SDMMC register base + * @param Command: pointer to a SDMMC_CmdInitTypeDef structure that contains + * the configuration information for the SDMMC command + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_SendCommand(SDMMC_TypeDef *SDMMCx, SDMMC_CmdInitTypeDef *Command) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_SDMMC_CMD_INDEX(Command->CmdIndex)); + assert_param(IS_SDMMC_RESPONSE(Command->Response)); + assert_param(IS_SDMMC_WAIT(Command->WaitForInterrupt)); + assert_param(IS_SDMMC_CPSM(Command->CPSM)); + + /* Set the SDMMC Argument value */ + SDMMCx->ARG = Command->Argument; + + /* Set SDMMC command parameters */ + tmpreg |= (uint32_t)(Command->CmdIndex |\ + Command->Response |\ + Command->WaitForInterrupt |\ + Command->CPSM); + + /* Write to SDMMC CMD register */ + MODIFY_REG(SDMMCx->CMD, CMD_CLEAR_MASK, tmpreg); + + return HAL_OK; +} + +/** + * @brief Return the command index of last command for which response received + * @param SDMMCx: Pointer to SDMMC register base + * @retval Command index of the last command response received + */ +uint8_t SDMMC_GetCommandResponse(SDMMC_TypeDef *SDMMCx) +{ + return (uint8_t)(SDMMCx->RESPCMD); +} + + +/** + * @brief Return the response received from the card for the last command + * @param SDMMCx: Pointer to SDMMC register base + * @param Response: Specifies the SDMMC response register. + * This parameter can be one of the following values: + * @arg SDMMC_RESP1: Response Register 1 + * @arg SDMMC_RESP2: Response Register 2 + * @arg SDMMC_RESP3: Response Register 3 + * @arg SDMMC_RESP4: Response Register 4 + * @retval The Corresponding response register value + */ +uint32_t SDMMC_GetResponse(SDMMC_TypeDef *SDMMCx, uint32_t Response) +{ + uint32_t tmp; + + /* Check the parameters */ + assert_param(IS_SDMMC_RESP(Response)); + + /* Get the response */ + tmp = (uint32_t)(&(SDMMCx->RESP1)) + Response; + + return (*(__IO uint32_t *) tmp); +} + +/** + * @brief Configure the SDMMC data path according to the specified + * parameters in the SDMMC_DataInitTypeDef. + * @param SDMMCx: Pointer to SDMMC register base + * @param Data : pointer to a SDMMC_DataInitTypeDef structure + * that contains the configuration information for the SDMMC data. + * @retval HAL status + */ +HAL_StatusTypeDef SDMMC_ConfigData(SDMMC_TypeDef *SDMMCx, SDMMC_DataInitTypeDef* Data) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_SDMMC_DATA_LENGTH(Data->DataLength)); + assert_param(IS_SDMMC_BLOCK_SIZE(Data->DataBlockSize)); + assert_param(IS_SDMMC_TRANSFER_DIR(Data->TransferDir)); + assert_param(IS_SDMMC_TRANSFER_MODE(Data->TransferMode)); + assert_param(IS_SDMMC_DPSM(Data->DPSM)); + + /* Set the SDMMC Data TimeOut value */ + SDMMCx->DTIMER = Data->DataTimeOut; + + /* Set the SDMMC DataLength value */ + SDMMCx->DLEN = Data->DataLength; + + /* Set the SDMMC data configuration parameters */ + tmpreg |= (uint32_t)(Data->DataBlockSize |\ + Data->TransferDir |\ + Data->TransferMode |\ + Data->DPSM); + + /* Write to SDMMC DCTRL */ + MODIFY_REG(SDMMCx->DCTRL, DCTRL_CLEAR_MASK, tmpreg); + + return HAL_OK; + +} + +/** + * @brief Returns number of remaining data bytes to be transferred. + * @param SDMMCx: Pointer to SDMMC register base + * @retval Number of remaining data bytes to be transferred + */ +uint32_t SDMMC_GetDataCounter(SDMMC_TypeDef *SDMMCx) +{ + return (SDMMCx->DCOUNT); +} + +/** + * @brief Get the FIFO data + * @param SDMMCx: Pointer to SDMMC register base + * @retval Data received + */ +uint32_t SDMMC_GetFIFOCount(SDMMC_TypeDef *SDMMCx) +{ + return (SDMMCx->FIFO); +} + +/** + * @brief Sets one of the two options of inserting read wait interval. + * @param SDMMCx: Pointer to SDMMC register base + * @param SDMMC_ReadWaitMode: SDMMC Read Wait operation mode. + * This parameter can be: + * @arg SDMMC_READ_WAIT_MODE_CLK: Read Wait control by stopping SDMMCCLK + * @arg SDMMC_READ_WAIT_MODE_DATA2: Read Wait control using SDMMC_DATA2 + * @retval None + */ +HAL_StatusTypeDef SDMMC_SetSDMMCReadWaitMode(SDMMC_TypeDef *SDMMCx, uint32_t SDMMC_ReadWaitMode) +{ + /* Check the parameters */ + assert_param(IS_SDMMC_READWAIT_MODE(SDMMC_ReadWaitMode)); + + /* Set SDMMC read wait mode */ + MODIFY_REG(SDMMCx->DCTRL, SDMMC_DCTRL_RWMOD, SDMMC_ReadWaitMode); + + return HAL_OK; +} + +/** + * @} + */ + + +/** @defgroup HAL_SDMMC_LL_Group4 Command management functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### Commands management functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the needed commands. + +@endverbatim + * @{ + */ + +/** + * @brief Send the Data Block Lenght command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdBlockLength(SDMMC_TypeDef *SDMMCx, uint32_t BlockSize) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)BlockSize; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SET_BLOCKLEN; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SET_BLOCKLEN, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Read Single Block command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdReadSingleBlock(SDMMC_TypeDef *SDMMCx, uint32_t ReadAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)ReadAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_READ_SINGLE_BLOCK; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_READ_SINGLE_BLOCK, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Read Multi Block command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdReadMultiBlock(SDMMC_TypeDef *SDMMCx, uint32_t ReadAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)ReadAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_READ_MULT_BLOCK; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_READ_MULT_BLOCK, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Write Single Block command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdWriteSingleBlock(SDMMC_TypeDef *SDMMCx, uint32_t WriteAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)WriteAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_WRITE_SINGLE_BLOCK; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_WRITE_SINGLE_BLOCK, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Write Multi Block command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdWriteMultiBlock(SDMMC_TypeDef *SDMMCx, uint32_t WriteAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)WriteAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_WRITE_MULT_BLOCK; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_WRITE_MULT_BLOCK, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Start Address Erase command for SD and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdSDEraseStartAdd(SDMMC_TypeDef *SDMMCx, uint32_t StartAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)StartAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SD_ERASE_GRP_START; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SD_ERASE_GRP_START, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the End Address Erase command for SD and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdSDEraseEndAdd(SDMMC_TypeDef *SDMMCx, uint32_t EndAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)EndAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SD_ERASE_GRP_END; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SD_ERASE_GRP_END, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Start Address Erase command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdEraseStartAdd(SDMMC_TypeDef *SDMMCx, uint32_t StartAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)StartAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_ERASE_GRP_START; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_ERASE_GRP_START, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the End Address Erase command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdEraseEndAdd(SDMMC_TypeDef *SDMMCx, uint32_t EndAdd) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = (uint32_t)EndAdd; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_ERASE_GRP_END; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_ERASE_GRP_END, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Erase command and check the response + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdErase(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Set Block Size for Card */ + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_ERASE; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_ERASE, SDMMC_MAXERASETIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Stop Transfer command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdStopTransfer(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD12 STOP_TRANSMISSION */ + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_STOP_TRANSMISSION; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_STOP_TRANSMISSION, SDMMC_STOPTRANSFERTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Select Deselect command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param addr: Address of the card to be selected + * @retval HAL status + */ +uint32_t SDMMC_CmdSelDesel(SDMMC_TypeDef *SDMMCx, uint64_t Addr) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD7 SDMMC_SEL_DESEL_CARD */ + sdmmc_cmdinit.Argument = (uint32_t)Addr; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SEL_DESEL_CARD; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SEL_DESEL_CARD, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Go Idle State command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdGoIdleState(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_GO_IDLE_STATE; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_NO; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdError(SDMMCx); + + return errorstate; +} + +/** + * @brief Send the Operating Condition command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdOperCond(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD8 to verify SD card interface operating condition */ + /* Argument: - [31:12]: Reserved (shall be set to '0') + - [11:8]: Supply Voltage (VHS) 0x1 (Range: 2.7-3.6 V) + - [7:0]: Check Pattern (recommended 0xAA) */ + /* CMD Response: R7 */ + sdmmc_cmdinit.Argument = SDMMC_CHECK_PATTERN; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_HS_SEND_EXT_CSD; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp7(SDMMCx); + + return errorstate; +} + +/** + * @brief Send the Application command to verify that that the next command + * is an application specific com-mand rather than a standard command + * and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param Argument: Command Argument + * @retval HAL status + */ +uint32_t SDMMC_CmdAppCommand(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = (uint32_t)Argument; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_APP_CMD; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + /* If there is a HAL_ERROR, it is a MMC card, else + it is a SD card: SD card 2.0 (voltage range mismatch) + or SD card 1.x */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_APP_CMD, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the command asking the accessed card to send its operating + * condition register (OCR) + * @param SDMMCx: Pointer to SDMMC register base + * @param Argument: Command Argument + * @retval HAL status + */ +uint32_t SDMMC_CmdAppOperCommand(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = SDMMC_VOLTAGE_WINDOW_SD | Argument; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SD_APP_OP_COND; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp3(SDMMCx); + + return errorstate; +} + +/** + * @brief Send the Bus Width command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param BusWidth: BusWidth + * @retval HAL status + */ +uint32_t SDMMC_CmdBusWidth(SDMMC_TypeDef *SDMMCx, uint32_t BusWidth) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = (uint32_t)BusWidth; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_APP_SD_SET_BUSWIDTH; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_APP_SD_SET_BUSWIDTH, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Send SCR command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdSendSCR(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD51 SD_APP_SEND_SCR */ + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SD_APP_SEND_SCR; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SD_APP_SEND_SCR, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Send CID command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdSendCID(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD2 ALL_SEND_CID */ + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_ALL_SEND_CID; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_LONG; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp2(SDMMCx); + + return errorstate; +} + +/** + * @brief Send the Send CSD command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param Argument: Command Argument + * @retval HAL status + */ +uint32_t SDMMC_CmdSendCSD(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD9 SEND_CSD */ + sdmmc_cmdinit.Argument = Argument; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SEND_CSD; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_LONG; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp2(SDMMCx); + + return errorstate; +} + +/** + * @brief Send the Send CSD command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param pRCA: Card RCA + * @retval HAL status + */ +uint32_t SDMMC_CmdSetRelAdd(SDMMC_TypeDef *SDMMCx, uint16_t *pRCA) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD3 SD_CMD_SET_REL_ADDR */ + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SET_REL_ADDR; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp6(SDMMCx, SDMMC_CMD_SET_REL_ADDR, pRCA); + + return errorstate; +} + +/** + * @brief Send the Status command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @param Argument: Command Argument + * @retval HAL status + */ +uint32_t SDMMC_CmdSendStatus(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = Argument; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SEND_STATUS; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SEND_STATUS, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Send the Status register command and check the response. + * @param SDMMCx: Pointer to SDMMC register base + * @retval HAL status + */ +uint32_t SDMMC_CmdStatusRegister(SDMMC_TypeDef *SDMMCx) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = 0U; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SD_APP_STATUS; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_SD_APP_STATUS, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @brief Sends host capacity support information and activates the card's + * initialization process. Send SDMMC_CMD_SEND_OP_COND command + * @param SDMMCx: Pointer to SDMMC register base + * @parame Argument: Argument used for the command + * @retval HAL status + */ +uint32_t SDMMC_CmdOpCondition(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + sdmmc_cmdinit.Argument = Argument; + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_SEND_OP_COND; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp3(SDMMCx); + + return errorstate; +} + +/** + * @brief Checks switchable function and switch card function. SDMMC_CMD_HS_SWITCH comand + * @param SDMMCx: Pointer to SDMMC register base + * @parame Argument: Argument used for the command + * @retval HAL status + */ +uint32_t SDMMC_CmdSwitch(SDMMC_TypeDef *SDMMCx, uint32_t Argument) +{ + SDMMC_CmdInitTypeDef sdmmc_cmdinit; + uint32_t errorstate; + + /* Send CMD6 to activate SDR50 Mode and Power Limit 1.44W */ + /* CMD Response: R1 */ + sdmmc_cmdinit.Argument = Argument; /* SDMMC_SDR25_SWITCH_PATTERN */ + sdmmc_cmdinit.CmdIndex = SDMMC_CMD_HS_SWITCH; + sdmmc_cmdinit.Response = SDMMC_RESPONSE_SHORT; + sdmmc_cmdinit.WaitForInterrupt = SDMMC_WAIT_NO; + sdmmc_cmdinit.CPSM = SDMMC_CPSM_ENABLE; + (void)SDMMC_SendCommand(SDMMCx, &sdmmc_cmdinit); + + /* Check for error conditions */ + errorstate = SDMMC_GetCmdResp1(SDMMCx, SDMMC_CMD_HS_SWITCH, SDMMC_CMDTIMEOUT); + + return errorstate; +} + +/** + * @} + */ + +/* Private function ----------------------------------------------------------*/ +/** @addtogroup SD_Private_Functions + * @{ + */ + +/** + * @brief Checks for error conditions for CMD0. + * @param hsd: SD handle + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdError(SDMMC_TypeDef *SDMMCx) +{ + /* 8 is the number of required instructions cycles for the below loop statement. + The SDMMC_CMDTIMEOUT is expressed in ms */ + register uint32_t count = SDMMC_CMDTIMEOUT * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + + }while(!__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CMDSENT)); + + /* Clear all the static flags */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_STATIC_CMD_FLAGS); + + return SDMMC_ERROR_NONE; +} + +/** + * @brief Checks for error conditions for R1 response. + * @param hsd: SD handle + * @param SD_CMD: The sent command index + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdResp1(SDMMC_TypeDef *SDMMCx, uint8_t SD_CMD, uint32_t Timeout) +{ + uint32_t response_r1; + uint32_t sta_reg; + + /* 8 is the number of required instructions cycles for the below loop statement. + The Timeout is expressed in ms */ + register uint32_t count = Timeout * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + sta_reg = SDMMCx->STA; + }while(((sta_reg & (SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CMDREND | SDMMC_FLAG_CTIMEOUT)) == 0U) || + ((sta_reg & SDMMC_FLAG_CMDACT) != 0U )); + + if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT); + + return SDMMC_ERROR_CMD_RSP_TIMEOUT; + } + else if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL); + + return SDMMC_ERROR_CMD_CRC_FAIL; + } + else + { + /* Nothing to do */ + } + + /* Clear all the static flags */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_STATIC_CMD_FLAGS); + + /* Check response received is of desired command */ + if(SDMMC_GetCommandResponse(SDMMCx) != SD_CMD) + { + return SDMMC_ERROR_CMD_CRC_FAIL; + } + + /* We have received response, retrieve it for analysis */ + response_r1 = SDMMC_GetResponse(SDMMCx, SDMMC_RESP1); + + if((response_r1 & SDMMC_OCR_ERRORBITS) == SDMMC_ALLZERO) + { + return SDMMC_ERROR_NONE; + } + else if((response_r1 & SDMMC_OCR_ADDR_OUT_OF_RANGE) == SDMMC_OCR_ADDR_OUT_OF_RANGE) + { + return SDMMC_ERROR_ADDR_OUT_OF_RANGE; + } + else if((response_r1 & SDMMC_OCR_ADDR_MISALIGNED) == SDMMC_OCR_ADDR_MISALIGNED) + { + return SDMMC_ERROR_ADDR_MISALIGNED; + } + else if((response_r1 & SDMMC_OCR_BLOCK_LEN_ERR) == SDMMC_OCR_BLOCK_LEN_ERR) + { + return SDMMC_ERROR_BLOCK_LEN_ERR; + } + else if((response_r1 & SDMMC_OCR_ERASE_SEQ_ERR) == SDMMC_OCR_ERASE_SEQ_ERR) + { + return SDMMC_ERROR_ERASE_SEQ_ERR; + } + else if((response_r1 & SDMMC_OCR_BAD_ERASE_PARAM) == SDMMC_OCR_BAD_ERASE_PARAM) + { + return SDMMC_ERROR_BAD_ERASE_PARAM; + } + else if((response_r1 & SDMMC_OCR_WRITE_PROT_VIOLATION) == SDMMC_OCR_WRITE_PROT_VIOLATION) + { + return SDMMC_ERROR_WRITE_PROT_VIOLATION; + } + else if((response_r1 & SDMMC_OCR_LOCK_UNLOCK_FAILED) == SDMMC_OCR_LOCK_UNLOCK_FAILED) + { + return SDMMC_ERROR_LOCK_UNLOCK_FAILED; + } + else if((response_r1 & SDMMC_OCR_COM_CRC_FAILED) == SDMMC_OCR_COM_CRC_FAILED) + { + return SDMMC_ERROR_COM_CRC_FAILED; + } + else if((response_r1 & SDMMC_OCR_ILLEGAL_CMD) == SDMMC_OCR_ILLEGAL_CMD) + { + return SDMMC_ERROR_ILLEGAL_CMD; + } + else if((response_r1 & SDMMC_OCR_CARD_ECC_FAILED) == SDMMC_OCR_CARD_ECC_FAILED) + { + return SDMMC_ERROR_CARD_ECC_FAILED; + } + else if((response_r1 & SDMMC_OCR_CC_ERROR) == SDMMC_OCR_CC_ERROR) + { + return SDMMC_ERROR_CC_ERR; + } + else if((response_r1 & SDMMC_OCR_STREAM_READ_UNDERRUN) == SDMMC_OCR_STREAM_READ_UNDERRUN) + { + return SDMMC_ERROR_STREAM_READ_UNDERRUN; + } + else if((response_r1 & SDMMC_OCR_STREAM_WRITE_OVERRUN) == SDMMC_OCR_STREAM_WRITE_OVERRUN) + { + return SDMMC_ERROR_STREAM_WRITE_OVERRUN; + } + else if((response_r1 & SDMMC_OCR_CID_CSD_OVERWRITE) == SDMMC_OCR_CID_CSD_OVERWRITE) + { + return SDMMC_ERROR_CID_CSD_OVERWRITE; + } + else if((response_r1 & SDMMC_OCR_WP_ERASE_SKIP) == SDMMC_OCR_WP_ERASE_SKIP) + { + return SDMMC_ERROR_WP_ERASE_SKIP; + } + else if((response_r1 & SDMMC_OCR_CARD_ECC_DISABLED) == SDMMC_OCR_CARD_ECC_DISABLED) + { + return SDMMC_ERROR_CARD_ECC_DISABLED; + } + else if((response_r1 & SDMMC_OCR_ERASE_RESET) == SDMMC_OCR_ERASE_RESET) + { + return SDMMC_ERROR_ERASE_RESET; + } + else if((response_r1 & SDMMC_OCR_AKE_SEQ_ERROR) == SDMMC_OCR_AKE_SEQ_ERROR) + { + return SDMMC_ERROR_AKE_SEQ_ERR; + } + else + { + return SDMMC_ERROR_GENERAL_UNKNOWN_ERR; + } +} + +/** + * @brief Checks for error conditions for R2 (CID or CSD) response. + * @param hsd: SD handle + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdResp2(SDMMC_TypeDef *SDMMCx) +{ + uint32_t sta_reg; + /* 8 is the number of required instructions cycles for the below loop statement. + The SDMMC_CMDTIMEOUT is expressed in ms */ + register uint32_t count = SDMMC_CMDTIMEOUT * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + sta_reg = SDMMCx->STA; + }while(((sta_reg & (SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CMDREND | SDMMC_FLAG_CTIMEOUT)) == 0U) || + ((sta_reg & SDMMC_FLAG_CMDACT) != 0U )); + + if (__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT); + + return SDMMC_ERROR_CMD_RSP_TIMEOUT; + } + else if (__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL); + + return SDMMC_ERROR_CMD_CRC_FAIL; + } + else + { + /* No error flag set */ + /* Clear all the static flags */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_STATIC_CMD_FLAGS); + } + + return SDMMC_ERROR_NONE; +} + +/** + * @brief Checks for error conditions for R3 (OCR) response. + * @param hsd: SD handle + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdResp3(SDMMC_TypeDef *SDMMCx) +{ + uint32_t sta_reg; + /* 8 is the number of required instructions cycles for the below loop statement. + The SDMMC_CMDTIMEOUT is expressed in ms */ + register uint32_t count = SDMMC_CMDTIMEOUT * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + sta_reg = SDMMCx->STA; + }while(((sta_reg & (SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CMDREND | SDMMC_FLAG_CTIMEOUT)) == 0U) || + ((sta_reg & SDMMC_FLAG_CMDACT) != 0U )); + + if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT); + + return SDMMC_ERROR_CMD_RSP_TIMEOUT; + } + else + { + /* Clear all the static flags */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_STATIC_CMD_FLAGS); + } + + return SDMMC_ERROR_NONE; +} + +/** + * @brief Checks for error conditions for R6 (RCA) response. + * @param hsd: SD handle + * @param SD_CMD: The sent command index + * @param pRCA: Pointer to the variable that will contain the SD card relative + * address RCA + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdResp6(SDMMC_TypeDef *SDMMCx, uint8_t SD_CMD, uint16_t *pRCA) +{ + uint32_t response_r1; + uint32_t sta_reg; + + /* 8 is the number of required instructions cycles for the below loop statement. + The SDMMC_CMDTIMEOUT is expressed in ms */ + register uint32_t count = SDMMC_CMDTIMEOUT * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + sta_reg = SDMMCx->STA; + }while(((sta_reg & (SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CMDREND | SDMMC_FLAG_CTIMEOUT)) == 0U) || + ((sta_reg & SDMMC_FLAG_CMDACT) != 0U )); + + if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT); + + return SDMMC_ERROR_CMD_RSP_TIMEOUT; + } + else if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL)) + { + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL); + + return SDMMC_ERROR_CMD_CRC_FAIL; + } + else + { + /* Nothing to do */ + } + + /* Check response received is of desired command */ + if(SDMMC_GetCommandResponse(SDMMCx) != SD_CMD) + { + return SDMMC_ERROR_CMD_CRC_FAIL; + } + + /* Clear all the static flags */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_STATIC_CMD_FLAGS); + + /* We have received response, retrieve it. */ + response_r1 = SDMMC_GetResponse(SDMMCx, SDMMC_RESP1); + + if((response_r1 & (SDMMC_R6_GENERAL_UNKNOWN_ERROR | SDMMC_R6_ILLEGAL_CMD | SDMMC_R6_COM_CRC_FAILED)) == SDMMC_ALLZERO) + { + *pRCA = (uint16_t) (response_r1 >> 16); + + return SDMMC_ERROR_NONE; + } + else if((response_r1 & SDMMC_R6_ILLEGAL_CMD) == SDMMC_R6_ILLEGAL_CMD) + { + return SDMMC_ERROR_ILLEGAL_CMD; + } + else if((response_r1 & SDMMC_R6_COM_CRC_FAILED) == SDMMC_R6_COM_CRC_FAILED) + { + return SDMMC_ERROR_COM_CRC_FAILED; + } + else + { + return SDMMC_ERROR_GENERAL_UNKNOWN_ERR; + } +} + +/** + * @brief Checks for error conditions for R7 response. + * @param hsd: SD handle + * @retval SD Card error state + */ +static uint32_t SDMMC_GetCmdResp7(SDMMC_TypeDef *SDMMCx) +{ + uint32_t sta_reg; + /* 8 is the number of required instructions cycles for the below loop statement. + The SDMMC_CMDTIMEOUT is expressed in ms */ + register uint32_t count = SDMMC_CMDTIMEOUT * (SystemCoreClock / 8U /1000U); + + do + { + if (count-- == 0U) + { + return SDMMC_ERROR_TIMEOUT; + } + sta_reg = SDMMCx->STA; + }while(((sta_reg & (SDMMC_FLAG_CCRCFAIL | SDMMC_FLAG_CMDREND | SDMMC_FLAG_CTIMEOUT)) == 0U) || + ((sta_reg & SDMMC_FLAG_CMDACT) != 0U )); + + if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT)) + { + /* Card is SD V2.0 compliant */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CTIMEOUT); + + return SDMMC_ERROR_CMD_RSP_TIMEOUT; + } + else if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL)) + { + /* Card is SD V2.0 compliant */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CCRCFAIL); + + return SDMMC_ERROR_CMD_CRC_FAIL; + } + else + { + /* Nothing to do */ + } + + if(__SDMMC_GET_FLAG(SDMMCx, SDMMC_FLAG_CMDREND)) + { + /* Card is SD V2.0 compliant */ + __SDMMC_CLEAR_FLAG(SDMMCx, SDMMC_FLAG_CMDREND); + } + + return SDMMC_ERROR_NONE; + +} + +/** + * @} + */ + +#endif /* HAL_SD_MODULE_ENABLED || HAL_MMC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + +#endif /* SDMMC1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c new file mode 100644 index 000000000..cb59bad1a --- /dev/null +++ b/src/third_party/stm32_cubeide/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c @@ -0,0 +1,2119 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_usb.c + * @author MCD Application Team + * @brief USB Low Layer HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Initialization/de-initialization functions + * + I/O operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Fill parameters of Init structure in USB_OTG_CfgTypeDef structure. + + (#) Call USB_CoreInit() API to initialize the USB Core peripheral. + + (#) The upper HAL HCD/PCD driver will call the right routines for its internal processes. + + @endverbatim + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_LL_USB_DRIVER + * @{ + */ + +#if defined (HAL_PCD_MODULE_ENABLED) || defined (HAL_HCD_MODULE_ENABLED) +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx); + +#ifdef USB_HS_PHYC +static HAL_StatusTypeDef USB_HS_PHYCInit(USB_OTG_GlobalTypeDef *USBx); +#endif + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup USB_LL_Exported_Functions USB Low Layer Exported Functions + * @{ + */ + +/** @defgroup USB_LL_Exported_Functions_Group1 Initialization/de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization/de-initialization functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the USB Core + * @param USBx USB Instance + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + HAL_StatusTypeDef ret; + + if (cfg.phy_itface == USB_OTG_ULPI_PHY) + { + USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); + + /* Init The ULPI Interface */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_TSDPS | USB_OTG_GUSBCFG_ULPIFSLS | USB_OTG_GUSBCFG_PHYSEL); + +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) + /* Select ULPI Interface */ + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPI_UTMI_SEL; +#endif /* defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) */ + + /* Select vbus source */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_ULPIEVBUSD | USB_OTG_GUSBCFG_ULPIEVBUSI); + if (cfg.use_external_vbus == 1U) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPIEVBUSD; + } + /* Reset after a PHY select */ + ret = USB_CoreReset(USBx); + } +#ifdef USB_HS_PHYC + else if (cfg.phy_itface == USB_OTG_HS_EMBEDDED_PHY) + { + USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); + + /* Init The UTMI Interface */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_TSDPS | USB_OTG_GUSBCFG_ULPIFSLS | USB_OTG_GUSBCFG_PHYSEL); + + /* Select vbus source */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_ULPIEVBUSD | USB_OTG_GUSBCFG_ULPIEVBUSI); + + /* Select UTMI Interace */ + USBx->GUSBCFG &= ~USB_OTG_GUSBCFG_ULPI_UTMI_SEL; + USBx->GCCFG |= USB_OTG_GCCFG_PHYHSEN; + + /* Enables control of a High Speed USB PHY */ + if (USB_HS_PHYCInit(USBx) != HAL_OK) + { + return HAL_ERROR; + } + + if (cfg.use_external_vbus == 1U) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPIEVBUSD; + } + /* Reset after a PHY select */ + ret = USB_CoreReset(USBx); + } +#endif + else /* FS interface (embedded Phy) */ + { + /* Select FS Embedded PHY */ + USBx->GUSBCFG |= USB_OTG_GUSBCFG_PHYSEL; + + /* Reset after a PHY select and set Host mode */ + ret = USB_CoreReset(USBx); + + /* Activate the USB Transceiver */ + USBx->GCCFG |= USB_OTG_GCCFG_PWRDWN; + } + + if (cfg.dma_enable == 1U) + { + USBx->GAHBCFG |= USB_OTG_GAHBCFG_HBSTLEN_2; + USBx->GAHBCFG |= USB_OTG_GAHBCFG_DMAEN; + } + + return ret; +} + + +/** + * @brief Set the USB turnaround time + * @param USBx USB Instance + * @param hclk: AHB clock frequency + * @retval USB turnaround time In PHY Clocks number + */ +HAL_StatusTypeDef USB_SetTurnaroundTime(USB_OTG_GlobalTypeDef *USBx, + uint32_t hclk, uint8_t speed) +{ + uint32_t UsbTrd; + + /* The USBTRD is configured according to the tables below, depending on AHB frequency + used by application. In the low AHB frequency range it is used to stretch enough the USB response + time to IN tokens, the USB turnaround time, so to compensate for the longer AHB read access + latency to the Data FIFO */ + if (speed == USBD_FS_SPEED) + { + if ((hclk >= 14200000U) && (hclk < 15000000U)) + { + /* hclk Clock Range between 14.2-15 MHz */ + UsbTrd = 0xFU; + } + else if ((hclk >= 15000000U) && (hclk < 16000000U)) + { + /* hclk Clock Range between 15-16 MHz */ + UsbTrd = 0xEU; + } + else if ((hclk >= 16000000U) && (hclk < 17200000U)) + { + /* hclk Clock Range between 16-17.2 MHz */ + UsbTrd = 0xDU; + } + else if ((hclk >= 17200000U) && (hclk < 18500000U)) + { + /* hclk Clock Range between 17.2-18.5 MHz */ + UsbTrd = 0xCU; + } + else if ((hclk >= 18500000U) && (hclk < 20000000U)) + { + /* hclk Clock Range between 18.5-20 MHz */ + UsbTrd = 0xBU; + } + else if ((hclk >= 20000000U) && (hclk < 21800000U)) + { + /* hclk Clock Range between 20-21.8 MHz */ + UsbTrd = 0xAU; + } + else if ((hclk >= 21800000U) && (hclk < 24000000U)) + { + /* hclk Clock Range between 21.8-24 MHz */ + UsbTrd = 0x9U; + } + else if ((hclk >= 24000000U) && (hclk < 27700000U)) + { + /* hclk Clock Range between 24-27.7 MHz */ + UsbTrd = 0x8U; + } + else if ((hclk >= 27700000U) && (hclk < 32000000U)) + { + /* hclk Clock Range between 27.7-32 MHz */ + UsbTrd = 0x7U; + } + else /* if(hclk >= 32000000) */ + { + /* hclk Clock Range between 32-200 MHz */ + UsbTrd = 0x6U; + } + } + else if (speed == USBD_HS_SPEED) + { + UsbTrd = USBD_HS_TRDT_VALUE; + } + else + { + UsbTrd = USBD_DEFAULT_TRDT_VALUE; + } + + USBx->GUSBCFG &= ~USB_OTG_GUSBCFG_TRDT; + USBx->GUSBCFG |= (uint32_t)((UsbTrd << 10) & USB_OTG_GUSBCFG_TRDT); + + return HAL_OK; +} + +/** + * @brief USB_EnableGlobalInt + * Enables the controller's Global Int in the AHB Config reg + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_EnableGlobalInt(USB_OTG_GlobalTypeDef *USBx) +{ + USBx->GAHBCFG |= USB_OTG_GAHBCFG_GINT; + return HAL_OK; +} + +/** + * @brief USB_DisableGlobalInt + * Disable the controller's Global Int in the AHB Config reg + * @param USBx Selected device + * @retval HAL status +*/ +HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx) +{ + USBx->GAHBCFG &= ~USB_OTG_GAHBCFG_GINT; + return HAL_OK; +} + +/** + * @brief USB_SetCurrentMode : Set functional mode + * @param USBx Selected device + * @param mode current core mode + * This parameter can be one of these values: + * @arg USB_DEVICE_MODE: Peripheral mode + * @arg USB_HOST_MODE: Host mode + * @arg USB_DRD_MODE: Dual Role Device mode + * @retval HAL status + */ +HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode) +{ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_FHMOD | USB_OTG_GUSBCFG_FDMOD); + + if (mode == USB_HOST_MODE) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_FHMOD; + } + else if (mode == USB_DEVICE_MODE) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_FDMOD; + } + else + { + return HAL_ERROR; + } + HAL_Delay(50U); + + return HAL_OK; +} + +/** + * @brief USB_DevInit : Initializes the USB_OTG controller registers + * for device mode + * @param USBx Selected device + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + for (i = 0U; i < 15U; i++) + { + USBx->DIEPTXF[i] = 0U; + } + + /* VBUS Sensing setup */ + if (cfg.vbus_sensing_enable == 0U) + { + USBx_DEVICE->DCTL |= USB_OTG_DCTL_SDIS; + + /* Deactivate VBUS Sensing B */ + USBx->GCCFG &= ~USB_OTG_GCCFG_VBDEN; + + /* B-peripheral session valid override enable */ + USBx->GOTGCTL |= USB_OTG_GOTGCTL_BVALOEN; + USBx->GOTGCTL |= USB_OTG_GOTGCTL_BVALOVAL; + } + else + { + /* Enable HW VBUS sensing */ + USBx->GCCFG |= USB_OTG_GCCFG_VBDEN; + } + + /* Restart the Phy Clock */ + USBx_PCGCCTL = 0U; + + /* Device mode configuration */ + USBx_DEVICE->DCFG |= DCFG_FRAME_INTERVAL_80; + + if (cfg.phy_itface == USB_OTG_ULPI_PHY) + { + if (cfg.speed == USBD_HS_SPEED) + { + /* Set Core speed to High speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH); + } + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH_IN_FULL); + } + } + else if (cfg.phy_itface == USB_OTG_HS_EMBEDDED_PHY) + { + if (cfg.speed == USBD_HS_SPEED) + { + /* Set Core speed to High speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH); + } + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH_IN_FULL); + } + } + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_FULL); + } + + /* Flush the FIFOs */ + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } + + /* Clear all pending Device Interrupts */ + USBx_DEVICE->DIEPMSK = 0U; + USBx_DEVICE->DOEPMSK = 0U; + USBx_DEVICE->DAINTMSK = 0U; + + for (i = 0U; i < cfg.dev_endpoints; i++) + { + if ((USBx_INEP(i)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + if (i == 0U) + { + USBx_INEP(i)->DIEPCTL = USB_OTG_DIEPCTL_SNAK; + } + else + { + USBx_INEP(i)->DIEPCTL = USB_OTG_DIEPCTL_EPDIS | USB_OTG_DIEPCTL_SNAK; + } + } + else + { + USBx_INEP(i)->DIEPCTL = 0U; + } + + USBx_INEP(i)->DIEPTSIZ = 0U; + USBx_INEP(i)->DIEPINT = 0xFB7FU; + } + + for (i = 0U; i < cfg.dev_endpoints; i++) + { + if ((USBx_OUTEP(i)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + if (i == 0U) + { + USBx_OUTEP(i)->DOEPCTL = USB_OTG_DOEPCTL_SNAK; + } + else + { + USBx_OUTEP(i)->DOEPCTL = USB_OTG_DOEPCTL_EPDIS | USB_OTG_DOEPCTL_SNAK; + } + } + else + { + USBx_OUTEP(i)->DOEPCTL = 0U; + } + + USBx_OUTEP(i)->DOEPTSIZ = 0U; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + } + + USBx_DEVICE->DIEPMSK &= ~(USB_OTG_DIEPMSK_TXFURM); + + /* Disable all interrupts. */ + USBx->GINTMSK = 0U; + + /* Clear any pending interrupts */ + USBx->GINTSTS = 0xBFFFFFFFU; + + /* Enable the common interrupts */ + if (cfg.dma_enable == 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_RXFLVLM; + } + + /* Enable interrupts matching to the Device mode ONLY */ + USBx->GINTMSK |= USB_OTG_GINTMSK_USBSUSPM | USB_OTG_GINTMSK_USBRST | + USB_OTG_GINTMSK_ENUMDNEM | USB_OTG_GINTMSK_IEPINT | + USB_OTG_GINTMSK_OEPINT | USB_OTG_GINTMSK_IISOIXFRM | + USB_OTG_GINTMSK_PXFRM_IISOOXFRM | USB_OTG_GINTMSK_WUIM; + + if (cfg.Sof_enable != 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_SOFM; + } + + if (cfg.vbus_sensing_enable == 1U) + { + USBx->GINTMSK |= (USB_OTG_GINTMSK_SRQIM | USB_OTG_GINTMSK_OTGINT); + } + + return ret; +} + +/** + * @brief USB_OTG_FlushTxFifo : Flush a Tx FIFO + * @param USBx Selected device + * @param num FIFO number + * This parameter can be a value from 1 to 15 + 15 means Flush all Tx FIFOs + * @retval HAL status + */ +HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num) +{ + uint32_t count = 0U; + + USBx->GRSTCTL = (USB_OTG_GRSTCTL_TXFFLSH | (num << 6)); + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_TXFFLSH) == USB_OTG_GRSTCTL_TXFFLSH); + + return HAL_OK; +} + +/** + * @brief USB_FlushRxFifo : Flush Rx FIFO + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t count = 0; + + USBx->GRSTCTL = USB_OTG_GRSTCTL_RXFFLSH; + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_RXFFLSH) == USB_OTG_GRSTCTL_RXFFLSH); + + return HAL_OK; +} + +/** + * @brief USB_SetDevSpeed Initializes the DevSpd field of DCFG register + * depending the PHY type and the enumeration speed of the device. + * @param USBx Selected device + * @param speed device speed + * This parameter can be one of these values: + * @arg USB_OTG_SPEED_HIGH: High speed mode + * @arg USB_OTG_SPEED_HIGH_IN_FULL: High speed core in Full Speed mode + * @arg USB_OTG_SPEED_FULL: Full speed mode + * @retval Hal status + */ +HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCFG |= speed; + return HAL_OK; +} + +/** + * @brief USB_GetDevSpeed Return the Dev Speed + * @param USBx Selected device + * @retval speed device speed + * This parameter can be one of these values: + * @arg PCD_SPEED_HIGH: High speed mode + * @arg PCD_SPEED_FULL: Full speed mode + */ +uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint8_t speed; + uint32_t DevEnumSpeed = USBx_DEVICE->DSTS & USB_OTG_DSTS_ENUMSPD; + + if (DevEnumSpeed == DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ) + { + speed = USBD_HS_SPEED; + } + else if ((DevEnumSpeed == DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ) || + (DevEnumSpeed == DSTS_ENUMSPD_FS_PHY_48MHZ)) + { + speed = USBD_FS_SPEED; + } + else + { + speed = 0xFU; + } + + return speed; +} + +/** + * @brief Activate and configure an endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + USBx_DEVICE->DAINTMSK |= USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK)); + + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_USBAEP) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= (ep->maxpacket & USB_OTG_DIEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_USBAEP; + } + } + else + { + USBx_DEVICE->DAINTMSK |= USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16); + + if (((USBx_OUTEP(epnum)->DOEPCTL) & USB_OTG_DOEPCTL_USBAEP) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= (ep->maxpacket & USB_OTG_DOEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DOEPCTL_USBAEP; + } + } + return HAL_OK; +} + +/** + * @brief Activate and configure a dedicated endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if (((USBx_INEP(epnum)->DIEPCTL) & USB_OTG_DIEPCTL_USBAEP) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= (ep->maxpacket & USB_OTG_DIEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_USBAEP; + } + + USBx_DEVICE->DEACHMSK |= USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK)); + } + else + { + if (((USBx_OUTEP(epnum)->DOEPCTL) & USB_OTG_DOEPCTL_USBAEP) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= (ep->maxpacket & USB_OTG_DOEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DOEPCTL_USBAEP; + } + + USBx_DEVICE->DEACHMSK |= USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16); + } + + return HAL_OK; +} + +/** + * @brief De-activate and de-initialize an endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_EPDIS; + } + + USBx_DEVICE->DEACHMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + USBx_INEP(epnum)->DIEPCTL &= ~(USB_OTG_DIEPCTL_USBAEP | + USB_OTG_DIEPCTL_MPSIZ | + USB_OTG_DIEPCTL_TXFNUM | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_EPTYP); + } + else + { + if ((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_EPDIS; + } + + USBx_DEVICE->DEACHMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + USBx_OUTEP(epnum)->DOEPCTL &= ~(USB_OTG_DOEPCTL_USBAEP | + USB_OTG_DOEPCTL_MPSIZ | + USB_OTG_DOEPCTL_SD0PID_SEVNFRM | + USB_OTG_DOEPCTL_EPTYP); + } + + return HAL_OK; +} + +/** + * @brief De-activate and de-initialize a dedicated endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_EPDIS; + } + + USBx_INEP(epnum)->DIEPCTL &= ~ USB_OTG_DIEPCTL_USBAEP; + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + } + else + { + if ((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_EPDIS; + } + + USBx_OUTEP(epnum)->DOEPCTL &= ~USB_OTG_DOEPCTL_USBAEP; + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + } + + return HAL_OK; +} + +/** + * @brief USB_EPStartXfer : setup and starts a transfer over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + uint16_t pktcnt; + + /* IN endpoint */ + if (ep->is_in == 1U) + { + /* Zero Length Packet? */ + if (ep->xfer_len == 0U) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + } + else + { + /* Program the transfer size and packet count + * as follows: xfersize = N * maxpacket + + * short_packet pktcnt = N + (short_packet + * exist ? 1 : 0) + */ + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket) << 19)); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); + + if (ep->type == EP_TYPE_ISOC) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_MULCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_MULCNT & (1U << 29)); + } + } + + if (dma == 1U) + { + if ((uint32_t)ep->dma_addr != 0U) + { + USBx_INEP(epnum)->DIEPDMA = (uint32_t)(ep->dma_addr); + } + + if (ep->type == EP_TYPE_ISOC) + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SODDFRM; + } + else + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; + } + } + + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + } + else + { + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + + if (ep->type != EP_TYPE_ISOC) + { + /* Enable the Tx FIFO Empty Interrupt for this EP */ + if (ep->xfer_len > 0U) + { + USBx_DEVICE->DIEPEMPMSK |= 1UL << (ep->num & EP_ADDR_MSK); + } + } + else + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SODDFRM; + } + else + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; + } + + (void)USB_WritePacket(USBx, ep->xfer_buff, ep->num, (uint16_t)ep->xfer_len, dma); + } + } + } + else /* OUT endpoint */ + { + /* Program the transfer size and packet count as follows: + * pktcnt = N + * xfersize = N * maxpacket + */ + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); + + if (ep->xfer_len == 0U) + { + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & ep->maxpacket); + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + } + else + { + pktcnt = (uint16_t)((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket); + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_PKTCNT & ((uint32_t)pktcnt << 19); + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket * pktcnt); + } + + if (dma == 1U) + { + if ((uint32_t)ep->xfer_buff != 0U) + { + USBx_OUTEP(epnum)->DOEPDMA = (uint32_t)(ep->xfer_buff); + } + } + + if (ep->type == EP_TYPE_ISOC) + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SODDFRM; + } + else + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SD0PID_SEVNFRM; + } + } + /* EP enable */ + USBx_OUTEP(epnum)->DOEPCTL |= (USB_OTG_DOEPCTL_CNAK | USB_OTG_DOEPCTL_EPENA); + } + + return HAL_OK; +} + +/** + * @brief USB_EP0StartXfer : setup and starts a transfer over the EP 0 + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* IN endpoint */ + if (ep->is_in == 1U) + { + /* Zero Length Packet? */ + if (ep->xfer_len == 0U) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + } + else + { + /* Program the transfer size and packet count + * as follows: xfersize = N * maxpacket + + * short_packet pktcnt = N + (short_packet + * exist ? 1 : 0) + */ + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + + if (ep->xfer_len > ep->maxpacket) + { + ep->xfer_len = ep->maxpacket; + } + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); + } + + if (dma == 1U) + { + if ((uint32_t)ep->dma_addr != 0U) + { + USBx_INEP(epnum)->DIEPDMA = (uint32_t)(ep->dma_addr); + } + + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + } + else + { + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + + /* Enable the Tx FIFO Empty Interrupt for this EP */ + if (ep->xfer_len > 0U) + { + USBx_DEVICE->DIEPEMPMSK |= 1UL << (ep->num & EP_ADDR_MSK); + } + } + } + else /* OUT endpoint */ + { + /* Program the transfer size and packet count as follows: + * pktcnt = N + * xfersize = N * maxpacket + */ + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); + + if (ep->xfer_len > 0U) + { + ep->xfer_len = ep->maxpacket; + } + + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket)); + + if (dma == 1U) + { + if ((uint32_t)ep->xfer_buff != 0U) + { + USBx_OUTEP(epnum)->DOEPDMA = (uint32_t)(ep->xfer_buff); + } + } + + /* EP enable */ + USBx_OUTEP(epnum)->DOEPCTL |= (USB_OTG_DOEPCTL_CNAK | USB_OTG_DOEPCTL_EPENA); + } + + return HAL_OK; +} + +/** + * @brief USB_WritePacket : Writes a packet into the Tx FIFO associated + * with the EP/channel + * @param USBx Selected device + * @param src pointer to source buffer + * @param ch_ep_num endpoint or host channel number + * @param len Number of bytes to write + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, uint8_t ch_ep_num, uint16_t len, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t *pSrc = (uint32_t *)src; + uint32_t count32b, i; + + if (dma == 0U) + { + count32b = ((uint32_t)len + 3U) / 4U; + for (i = 0U; i < count32b; i++) + { + USBx_DFIFO((uint32_t)ch_ep_num) = __UNALIGNED_UINT32_READ(pSrc); + pSrc++; + } + } + + return HAL_OK; +} + +/** + * @brief USB_ReadPacket : read a packet from the RX FIFO + * @param USBx Selected device + * @param dest source pointer + * @param len Number of bytes to read + * @retval pointer to destination buffer + */ +void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t *pDest = (uint32_t *)dest; + uint32_t i; + uint32_t count32b = ((uint32_t)len + 3U) / 4U; + + for (i = 0U; i < count32b; i++) + { + __UNALIGNED_UINT32_WRITE(pDest, USBx_DFIFO(0U)); + pDest++; + } + + return ((void *)pDest); +} + +/** + * @brief USB_EPSetStall : set a stall condition over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + if (((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == 0U) && (epnum != 0U)) + { + USBx_INEP(epnum)->DIEPCTL &= ~(USB_OTG_DIEPCTL_EPDIS); + } + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_STALL; + } + else + { + if (((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == 0U) && (epnum != 0U)) + { + USBx_OUTEP(epnum)->DOEPCTL &= ~(USB_OTG_DOEPCTL_EPDIS); + } + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_STALL; + } + + return HAL_OK; +} + +/** + * @brief USB_EPClearStall : Clear a stall condition over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + USBx_INEP(epnum)->DIEPCTL &= ~USB_OTG_DIEPCTL_STALL; + if ((ep->type == EP_TYPE_INTR) || (ep->type == EP_TYPE_BULK)) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; /* DATA0 */ + } + } + else + { + USBx_OUTEP(epnum)->DOEPCTL &= ~USB_OTG_DOEPCTL_STALL; + if ((ep->type == EP_TYPE_INTR) || (ep->type == EP_TYPE_BULK)) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SD0PID_SEVNFRM; /* DATA0 */ + } + } + return HAL_OK; +} + +/** + * @brief USB_StopDevice : Stop the usb device mode + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx) +{ + HAL_StatusTypeDef ret; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + /* Clear Pending interrupt */ + for (i = 0U; i < 15U; i++) + { + USBx_INEP(i)->DIEPINT = 0xFB7FU; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + } + + /* Clear interrupt masks */ + USBx_DEVICE->DIEPMSK = 0U; + USBx_DEVICE->DOEPMSK = 0U; + USBx_DEVICE->DAINTMSK = 0U; + + /* Flush the FIFO */ + ret = USB_FlushRxFifo(USBx); + if (ret != HAL_OK) + { + return ret; + } + + ret = USB_FlushTxFifo(USBx, 0x10U); + if (ret != HAL_OK) + { + return ret; + } + + return ret; +} + +/** + * @brief USB_SetDevAddress : Stop the usb device mode + * @param USBx Selected device + * @param address new device address to be assigned + * This parameter can be a value from 0 to 255 + * @retval HAL status + */ +HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCFG &= ~(USB_OTG_DCFG_DAD); + USBx_DEVICE->DCFG |= ((uint32_t)address << 4) & USB_OTG_DCFG_DAD; + + return HAL_OK; +} + +/** + * @brief USB_DevConnect : Connect the USB device by enabling the pull-up/pull-down + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_SDIS; + HAL_Delay(3U); + + return HAL_OK; +} + +/** + * @brief USB_DevDisconnect : Disconnect the USB device by disabling the pull-up/pull-down + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCTL |= USB_OTG_DCTL_SDIS; + HAL_Delay(3U); + + return HAL_OK; +} + +/** + * @brief USB_ReadInterrupts: return the global USB interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t tmpreg; + + tmpreg = USBx->GINTSTS; + tmpreg &= USBx->GINTMSK; + + return tmpreg; +} + +/** + * @brief USB_ReadDevAllOutEpInterrupt: return the USB device OUT endpoints interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_DEVICE->DAINT; + tmpreg &= USBx_DEVICE->DAINTMSK; + + return ((tmpreg & 0xffff0000U) >> 16); +} + +/** + * @brief USB_ReadDevAllInEpInterrupt: return the USB device IN endpoints interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_DEVICE->DAINT; + tmpreg &= USBx_DEVICE->DAINTMSK; + + return ((tmpreg & 0xFFFFU)); +} + +/** + * @brief Returns Device OUT EP Interrupt register + * @param USBx Selected device + * @param epnum endpoint number + * This parameter can be a value from 0 to 15 + * @retval Device OUT EP Interrupt register + */ +uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_OUTEP((uint32_t)epnum)->DOEPINT; + tmpreg &= USBx_DEVICE->DOEPMSK; + + return tmpreg; +} + +/** + * @brief Returns Device IN EP Interrupt register + * @param USBx Selected device + * @param epnum endpoint number + * This parameter can be a value from 0 to 15 + * @retval Device IN EP Interrupt register + */ +uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg, msk, emp; + + msk = USBx_DEVICE->DIEPMSK; + emp = USBx_DEVICE->DIEPEMPMSK; + msk |= ((emp >> (epnum & EP_ADDR_MSK)) & 0x1U) << 7; + tmpreg = USBx_INEP((uint32_t)epnum)->DIEPINT & msk; + + return tmpreg; +} + +/** + * @brief USB_ClearInterrupts: clear a USB interrupt + * @param USBx Selected device + * @param interrupt interrupt flag + * @retval None + */ +void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt) +{ + USBx->GINTSTS |= interrupt; +} + +/** + * @brief Returns USB core mode + * @param USBx Selected device + * @retval return core mode : Host or Device + * This parameter can be one of these values: + * 0 : Host + * 1 : Device + */ +uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx) +{ + return ((USBx->GINTSTS) & 0x1U); +} + +/** + * @brief Activate EP0 for Setup transactions + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* Set the MPS of the IN EP0 to 64 bytes */ + USBx_INEP(0U)->DIEPCTL &= ~USB_OTG_DIEPCTL_MPSIZ; + + USBx_DEVICE->DCTL |= USB_OTG_DCTL_CGINAK; + + return HAL_OK; +} + +/** + * @brief Prepare the EP0 to start the first control setup + * @param USBx Selected device + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @param psetup pointer to setup packet + * @retval HAL status + */ +HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + + if (gSNPSiD > USB_OTG_CORE_ID_300A) + { + if ((USBx_OUTEP(0U)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + return HAL_OK; + } + } + + USBx_OUTEP(0U)->DOEPTSIZ = 0U; + USBx_OUTEP(0U)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + USBx_OUTEP(0U)->DOEPTSIZ |= (3U * 8U); + USBx_OUTEP(0U)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_STUPCNT; + + if (dma == 1U) + { + USBx_OUTEP(0U)->DOEPDMA = (uint32_t)psetup; + /* EP enable */ + USBx_OUTEP(0U)->DOEPCTL |= USB_OTG_DOEPCTL_EPENA | USB_OTG_DOEPCTL_USBAEP; + } + + return HAL_OK; +} + +/** + * @brief Reset the USB Core (needed after USB clock settings change) + * @param USBx Selected device + * @retval HAL status + */ +static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t count = 0U; + + /* Wait for AHB master IDLE state. */ + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + + /* Core Soft Reset */ + count = 0U; + USBx->GRSTCTL |= USB_OTG_GRSTCTL_CSRST; + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_CSRST) == USB_OTG_GRSTCTL_CSRST); + + return HAL_OK; +} + +#ifdef USB_HS_PHYC +/** + * @brief Enables control of a High Speed USB PHYÂ’s + * Init the low level hardware : GPIO, CLOCK, NVIC... + * @param USBx Selected device + * @retval HAL status + */ +static HAL_StatusTypeDef USB_HS_PHYCInit(USB_OTG_GlobalTypeDef *USBx) +{ + UNUSED(USBx); + uint32_t count = 0U; + + /* Enable LDO */ + USB_HS_PHYC->USB_HS_PHYC_LDO |= USB_HS_PHYC_LDO_ENABLE; + + /* wait for LDO Ready */ + while ((USB_HS_PHYC->USB_HS_PHYC_LDO & USB_HS_PHYC_LDO_STATUS) == 0U) + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + + /* Controls PHY frequency operation selection */ + if (HSE_VALUE == 12000000U) /* HSE = 12MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x0U << 1); + } + else if (HSE_VALUE == 12500000U) /* HSE = 12.5MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x2U << 1); + } + else if (HSE_VALUE == 16000000U) /* HSE = 16MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x3U << 1); + } + else if (HSE_VALUE == 24000000U) /* HSE = 24MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x4U << 1); + } + else if (HSE_VALUE == 25000000U) /* HSE = 25MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x5U << 1); + } + else if (HSE_VALUE == 32000000U) /* HSE = 32MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x7U << 1); + } + else + { + /* ... */ + } + + /* Control the tuning interface of the High Speed PHY */ + USB_HS_PHYC->USB_HS_PHYC_TUNE |= USB_HS_PHYC_TUNE_VALUE; + + /* Enable PLL internal PHY */ + USB_HS_PHYC->USB_HS_PHYC_PLL |= USB_HS_PHYC_PLL_PLLEN; + + /* 2ms Delay required to get internal phy clock stable */ + HAL_Delay(2U); + + return HAL_OK; +} + +#endif /* USB_HS_PHYC */ +/** + * @brief USB_HostInit : Initializes the USB OTG controller registers + * for Host mode + * @param USBx Selected device + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + /* Restart the Phy Clock */ + USBx_PCGCCTL = 0U; + + /* Disable VBUS sensing */ + USBx->GCCFG &= ~(USB_OTG_GCCFG_VBDEN); + + if ((USBx->CID & (0x1U << 8)) != 0U) + { + if (cfg.speed == USBH_FSLS_SPEED) + { + /* Force Device Enumeration to FS/LS mode only */ + USBx_HOST->HCFG |= USB_OTG_HCFG_FSLSS; + } + else + { + /* Set default Max speed support */ + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSS); + } + } + else + { + /* Set default Max speed support */ + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSS); + } + + /* Make sure the FIFOs are flushed. */ + (void)USB_FlushTxFifo(USBx, 0x10U); /* all Tx FIFOs */ + (void)USB_FlushRxFifo(USBx); + + /* Clear all pending HC Interrupts */ + for (i = 0U; i < cfg.Host_channels; i++) + { + USBx_HC(i)->HCINT = 0xFFFFFFFFU; + USBx_HC(i)->HCINTMSK = 0U; + } + + /* Enable VBUS driving */ + (void)USB_DriveVbus(USBx, 1U); + + HAL_Delay(200U); + + /* Disable all interrupts. */ + USBx->GINTMSK = 0U; + + /* Clear any pending interrupts */ + USBx->GINTSTS = 0xFFFFFFFFU; + + if ((USBx->CID & (0x1U << 8)) != 0U) + { + /* set Rx FIFO size */ + USBx->GRXFSIZ = 0x200U; + USBx->DIEPTXF0_HNPTXFSIZ = (uint32_t)(((0x100U << 16) & USB_OTG_NPTXFD) | 0x200U); + USBx->HPTXFSIZ = (uint32_t)(((0xE0U << 16) & USB_OTG_HPTXFSIZ_PTXFD) | 0x300U); + } + else + { + /* set Rx FIFO size */ + USBx->GRXFSIZ = 0x80U; + USBx->DIEPTXF0_HNPTXFSIZ = (uint32_t)(((0x60U << 16) & USB_OTG_NPTXFD) | 0x80U); + USBx->HPTXFSIZ = (uint32_t)(((0x40U << 16)& USB_OTG_HPTXFSIZ_PTXFD) | 0xE0U); + } + + /* Enable the common interrupts */ + if (cfg.dma_enable == 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_RXFLVLM; + } + + /* Enable interrupts matching to the Host mode ONLY */ + USBx->GINTMSK |= (USB_OTG_GINTMSK_PRTIM | USB_OTG_GINTMSK_HCIM | \ + USB_OTG_GINTMSK_SOFM | USB_OTG_GINTSTS_DISCINT | \ + USB_OTG_GINTMSK_PXFRM_IISOOXFRM | USB_OTG_GINTMSK_WUIM); + + return HAL_OK; +} + +/** + * @brief USB_InitFSLSPClkSel : Initializes the FSLSPClkSel field of the + * HCFG register on the PHY type and set the right frame interval + * @param USBx Selected device + * @param freq clock frequency + * This parameter can be one of these values: + * HCFG_48_MHZ : Full Speed 48 MHz Clock + * HCFG_6_MHZ : Low Speed 6 MHz Clock + * @retval HAL status + */ +HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSPCS); + USBx_HOST->HCFG |= (uint32_t)freq & USB_OTG_HCFG_FSLSPCS; + + if (freq == HCFG_48_MHZ) + { + USBx_HOST->HFIR = 48000U; + } + else if (freq == HCFG_6_MHZ) + { + USBx_HOST->HFIR = 6000U; + } + else + { + /* ... */ + } + + return HAL_OK; +} + +/** +* @brief USB_OTG_ResetPort : Reset Host Port + * @param USBx Selected device + * @retval HAL status + * @note (1)The application must wait at least 10 ms + * before clearing the reset bit. + */ +HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + + hprt0 &= ~(USB_OTG_HPRT_PENA | USB_OTG_HPRT_PCDET | + USB_OTG_HPRT_PENCHNG | USB_OTG_HPRT_POCCHNG); + + USBx_HPRT0 = (USB_OTG_HPRT_PRST | hprt0); + HAL_Delay(100U); /* See Note #1 */ + USBx_HPRT0 = ((~USB_OTG_HPRT_PRST) & hprt0); + HAL_Delay(10U); + + return HAL_OK; +} + +/** + * @brief USB_DriveVbus : activate or de-activate vbus + * @param state VBUS state + * This parameter can be one of these values: + * 0 : VBUS Active + * 1 : VBUS Inactive + * @retval HAL status +*/ +HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + + hprt0 &= ~(USB_OTG_HPRT_PENA | USB_OTG_HPRT_PCDET | + USB_OTG_HPRT_PENCHNG | USB_OTG_HPRT_POCCHNG); + + if (((hprt0 & USB_OTG_HPRT_PPWR) == 0U) && (state == 1U)) + { + USBx_HPRT0 = (USB_OTG_HPRT_PPWR | hprt0); + } + if (((hprt0 & USB_OTG_HPRT_PPWR) == USB_OTG_HPRT_PPWR) && (state == 0U)) + { + USBx_HPRT0 = ((~USB_OTG_HPRT_PPWR) & hprt0); + } + return HAL_OK; +} + +/** + * @brief Return Host Core speed + * @param USBx Selected device + * @retval speed : Host speed + * This parameter can be one of these values: + * @arg HCD_SPEED_HIGH: High speed mode + * @arg HCD_SPEED_FULL: Full speed mode + * @arg HCD_SPEED_LOW: Low speed mode + */ +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + return ((hprt0 & USB_OTG_HPRT_PSPD) >> 17); +} + +/** + * @brief Return Host Current Frame number + * @param USBx Selected device + * @retval current frame number +*/ +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + return (USBx_HOST->HFNUM & USB_OTG_HFNUM_FRNUM); +} + +/** + * @brief Initialize a host channel + * @param USBx Selected device + * @param ch_num Channel number + * This parameter can be a value from 1 to 15 + * @param epnum Endpoint number + * This parameter can be a value from 1 to 15 + * @param dev_address Current device address + * This parameter can be a value from 0 to 255 + * @param speed Current device speed + * This parameter can be one of these values: + * @arg USB_OTG_SPEED_HIGH: High speed mode + * @arg USB_OTG_SPEED_FULL: Full speed mode + * @arg USB_OTG_SPEED_LOW: Low speed mode + * @param ep_type Endpoint Type + * This parameter can be one of these values: + * @arg EP_TYPE_CTRL: Control type + * @arg EP_TYPE_ISOC: Isochronous type + * @arg EP_TYPE_BULK: Bulk type + * @arg EP_TYPE_INTR: Interrupt type + * @param mps Max Packet Size + * This parameter can be a value from 0 to32K + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, + uint8_t ch_num, + uint8_t epnum, + uint8_t dev_address, + uint8_t speed, + uint8_t ep_type, + uint16_t mps) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t HCcharEpDir, HCcharLowSpeed; + + /* Clear old interrupt conditions for this host channel. */ + USBx_HC((uint32_t)ch_num)->HCINT = 0xFFFFFFFFU; + + /* Enable channel interrupts required for this transfer. */ + switch (ep_type) + { + case EP_TYPE_CTRL: + case EP_TYPE_BULK: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_STALLM | + USB_OTG_HCINTMSK_TXERRM | + USB_OTG_HCINTMSK_DTERRM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_NAKM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_BBERRM; + } + else + { + if ((USBx->CID & (0x1U << 8)) != 0U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= (USB_OTG_HCINTMSK_NYET | USB_OTG_HCINTMSK_ACKM); + } + } + break; + + case EP_TYPE_INTR: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_STALLM | + USB_OTG_HCINTMSK_TXERRM | + USB_OTG_HCINTMSK_DTERRM | + USB_OTG_HCINTMSK_NAKM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_FRMORM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_BBERRM; + } + + break; + + case EP_TYPE_ISOC: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_ACKM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_FRMORM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= (USB_OTG_HCINTMSK_TXERRM | USB_OTG_HCINTMSK_BBERRM); + } + break; + + default: + ret = HAL_ERROR; + break; + } + + /* Enable the top level host channel interrupt. */ + USBx_HOST->HAINTMSK |= 1UL << (ch_num & 0xFU); + + /* Make sure host channel interrupts are enabled. */ + USBx->GINTMSK |= USB_OTG_GINTMSK_HCIM; + + /* Program the HCCHAR register */ + if ((epnum & 0x80U) == 0x80U) + { + HCcharEpDir = (0x1U << 15) & USB_OTG_HCCHAR_EPDIR; + } + else + { + HCcharEpDir = 0U; + } + + if (speed == HPRT0_PRTSPD_LOW_SPEED) + { + HCcharLowSpeed = (0x1U << 17) & USB_OTG_HCCHAR_LSDEV; + } + else + { + HCcharLowSpeed = 0U; + } + + USBx_HC((uint32_t)ch_num)->HCCHAR = (((uint32_t)dev_address << 22) & USB_OTG_HCCHAR_DAD) | + ((((uint32_t)epnum & 0x7FU) << 11) & USB_OTG_HCCHAR_EPNUM) | + (((uint32_t)ep_type << 18) & USB_OTG_HCCHAR_EPTYP) | + ((uint32_t)mps & USB_OTG_HCCHAR_MPSIZ) | HCcharEpDir | HCcharLowSpeed; + + if (ep_type == EP_TYPE_INTR) + { + USBx_HC((uint32_t)ch_num)->HCCHAR |= USB_OTG_HCCHAR_ODDFRM ; + } + + return ret; +} + +/** + * @brief Start a transfer over a host channel + * @param USBx Selected device + * @param hc pointer to host channel structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDef *hc, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t ch_num = (uint32_t)hc->ch_num; + static __IO uint32_t tmpreg = 0U; + uint8_t is_oddframe; + uint16_t len_words; + uint16_t num_packets; + uint16_t max_hc_pkt_count = 256U; + + if (((USBx->CID & (0x1U << 8)) != 0U) && (hc->speed == USBH_HS_SPEED)) + { + if ((dma == 0U) && (hc->do_ping == 1U)) + { + (void)USB_DoPing(USBx, hc->ch_num); + return HAL_OK; + } + else if (dma == 1U) + { + USBx_HC(ch_num)->HCINTMSK &= ~(USB_OTG_HCINTMSK_NYET | USB_OTG_HCINTMSK_ACKM); + hc->do_ping = 0U; + } + else + { + /* ... */ + } + } + + /* Compute the expected number of packets associated to the transfer */ + if (hc->xfer_len > 0U) + { + num_packets = (uint16_t)((hc->xfer_len + hc->max_packet - 1U) / hc->max_packet); + + if (num_packets > max_hc_pkt_count) + { + num_packets = max_hc_pkt_count; + hc->xfer_len = (uint32_t)num_packets * hc->max_packet; + } + } + else + { + num_packets = 1U; + } + if (hc->ep_is_in != 0U) + { + hc->xfer_len = (uint32_t)num_packets * hc->max_packet; + } + + /* Initialize the HCTSIZn register */ + USBx_HC(ch_num)->HCTSIZ = (hc->xfer_len & USB_OTG_HCTSIZ_XFRSIZ) | + (((uint32_t)num_packets << 19) & USB_OTG_HCTSIZ_PKTCNT) | + (((uint32_t)hc->data_pid << 29) & USB_OTG_HCTSIZ_DPID); + + if (dma != 0U) + { + /* xfer_buff MUST be 32-bits aligned */ + USBx_HC(ch_num)->HCDMA = (uint32_t)hc->xfer_buff; + } + + is_oddframe = (((uint32_t)USBx_HOST->HFNUM & 0x01U) != 0U) ? 0U : 1U; + USBx_HC(ch_num)->HCCHAR &= ~USB_OTG_HCCHAR_ODDFRM; + USBx_HC(ch_num)->HCCHAR |= (uint32_t)is_oddframe << 29; + + /* Set host channel enable */ + tmpreg = USBx_HC(ch_num)->HCCHAR; + tmpreg &= ~USB_OTG_HCCHAR_CHDIS; + + /* make sure to set the correct ep direction */ + if (hc->ep_is_in != 0U) + { + tmpreg |= USB_OTG_HCCHAR_EPDIR; + } + else + { + tmpreg &= ~USB_OTG_HCCHAR_EPDIR; + } + tmpreg |= USB_OTG_HCCHAR_CHENA; + USBx_HC(ch_num)->HCCHAR = tmpreg; + + if (dma == 0U) /* Slave mode */ + { + if ((hc->ep_is_in == 0U) && (hc->xfer_len > 0U)) + { + switch (hc->ep_type) + { + /* Non periodic transfer */ + case EP_TYPE_CTRL: + case EP_TYPE_BULK: + + len_words = (uint16_t)((hc->xfer_len + 3U) / 4U); + + /* check if there is enough space in FIFO space */ + if (len_words > (USBx->HNPTXSTS & 0xFFFFU)) + { + /* need to process data in nptxfempty interrupt */ + USBx->GINTMSK |= USB_OTG_GINTMSK_NPTXFEM; + } + break; + + /* Periodic transfer */ + case EP_TYPE_INTR: + case EP_TYPE_ISOC: + len_words = (uint16_t)((hc->xfer_len + 3U) / 4U); + /* check if there is enough space in FIFO space */ + if (len_words > (USBx_HOST->HPTXSTS & 0xFFFFU)) /* split the transfer */ + { + /* need to process data in ptxfempty interrupt */ + USBx->GINTMSK |= USB_OTG_GINTMSK_PTXFEM; + } + break; + + default: + break; + } + + /* Write packet into the Tx FIFO. */ + (void)USB_WritePacket(USBx, hc->xfer_buff, hc->ch_num, (uint16_t)hc->xfer_len, 0); + } + } + + return HAL_OK; +} + +/** + * @brief Read all host channel interrupts status + * @param USBx Selected device + * @retval HAL state + */ +uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + return ((USBx_HOST->HAINT) & 0xFFFFU); +} + +/** + * @brief Halt a host channel + * @param USBx Selected device + * @param hc_num Host Channel number + * This parameter can be a value from 1 to 15 + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t hcnum = (uint32_t)hc_num; + uint32_t count = 0U; + uint32_t HcEpType = (USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_EPTYP) >> 18; + + /* Check for space in the request queue to issue the halt. */ + if ((HcEpType == HCCHAR_CTRL) || (HcEpType == HCCHAR_BULK)) + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHDIS; + + if ((USBx->HNPTXSTS & (0xFFU << 16)) == 0U) + { + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_EPDIR; + do + { + if (++count > 1000U) + { + break; + } + } + while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + } + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHDIS; + + if ((USBx_HOST->HPTXSTS & (0xFFU << 16)) == 0U) + { + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_EPDIR; + do + { + if (++count > 1000U) + { + break; + } + } + while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + } + } + + return HAL_OK; +} + +/** + * @brief Initiate Do Ping protocol + * @param USBx Selected device + * @param hc_num Host Channel number + * This parameter can be a value from 1 to 15 + * @retval HAL state + */ +HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t chnum = (uint32_t)ch_num; + uint32_t num_packets = 1U; + uint32_t tmpreg; + + USBx_HC(chnum)->HCTSIZ = ((num_packets << 19) & USB_OTG_HCTSIZ_PKTCNT) | + USB_OTG_HCTSIZ_DOPING; + + /* Set host channel enable */ + tmpreg = USBx_HC(chnum)->HCCHAR; + tmpreg &= ~USB_OTG_HCCHAR_CHDIS; + tmpreg |= USB_OTG_HCCHAR_CHENA; + USBx_HC(chnum)->HCCHAR = tmpreg; + + return HAL_OK; +} + +/** + * @brief Stop Host Core + * @param USBx Selected device + * @retval HAL state + */ +HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t count = 0U; + uint32_t value; + uint32_t i; + + (void)USB_DisableGlobalInt(USBx); + + /* Flush FIFO */ + (void)USB_FlushTxFifo(USBx, 0x10U); + (void)USB_FlushRxFifo(USBx); + + /* Flush out any leftover queued requests. */ + for (i = 0U; i <= 15U; i++) + { + value = USBx_HC(i)->HCCHAR; + value |= USB_OTG_HCCHAR_CHDIS; + value &= ~USB_OTG_HCCHAR_CHENA; + value &= ~USB_OTG_HCCHAR_EPDIR; + USBx_HC(i)->HCCHAR = value; + } + + /* Halt all channels to put them into a known state. */ + for (i = 0U; i <= 15U; i++) + { + value = USBx_HC(i)->HCCHAR; + value |= USB_OTG_HCCHAR_CHDIS; + value |= USB_OTG_HCCHAR_CHENA; + value &= ~USB_OTG_HCCHAR_EPDIR; + USBx_HC(i)->HCCHAR = value; + + do + { + if (++count > 1000U) + { + break; + } + } + while ((USBx_HC(i)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + + /* Clear any pending Host interrupts */ + USBx_HOST->HAINT = 0xFFFFFFFFU; + USBx->GINTSTS = 0xFFFFFFFFU; + + (void)USB_EnableGlobalInt(USBx); + + return HAL_OK; +} + +/** + * @brief USB_ActivateRemoteWakeup active remote wakeup signalling + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + if ((USBx_DEVICE->DSTS & USB_OTG_DSTS_SUSPSTS) == USB_OTG_DSTS_SUSPSTS) + { + /* active Remote wakeup signalling */ + USBx_DEVICE->DCTL |= USB_OTG_DCTL_RWUSIG; + } + + return HAL_OK; +} + +/** + * @brief USB_DeActivateRemoteWakeup de-active remote wakeup signalling + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* active Remote wakeup signalling */ + USBx_DEVICE->DCTL &= ~(USB_OTG_DCTL_RWUSIG); + + return HAL_OK; +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* defined (HAL_PCD_MODULE_ENABLED) || defined (HAL_HCD_MODULE_ENABLED) */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/FATFS/App/fatfs.c b/src/third_party/stm32_cubeide/FATFS/App/fatfs.c new file mode 100644 index 000000000..cec41cb9d --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/App/fatfs.c @@ -0,0 +1,72 @@ +/** + ****************************************************************************** + * @file fatfs.c + * @brief Code for fatfs applications + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +#include "fatfs.h" + +uint8_t retSD; /* Return value for SD */ +char SDPath[4]; /* SD logical drive path */ +FATFS SDFatFS; /* File system object for SD logical drive */ +FIL SDFile; /* File object for SD */ + +/* USER CODE BEGIN Variables */ + +#include +#include + +/* USER CODE END Variables */ + +void MX_FATFS_Init(void) +{ + /*## FatFS: Link the SD driver ###########################*/ + retSD = FATFS_LinkDriver(&SD_Driver, SDPath); + + /* USER CODE BEGIN Init */ + /* additional user code for init */ + /* USER CODE END Init */ +} + +/** + * @brief Gets Time from RTC + * @param None + * @retval Time in DWORD + */ +DWORD get_fattime(void) +{ + /* USER CODE BEGIN get_fattime */ + RTC_TimeTypeDef time; + HAL_RTC_GetTime(&hrtc, &time, 0); + + RTC_DateTypeDef date; + HAL_RTC_GetDate(&hrtc, &date, 0); + + DWORD res = (((DWORD)(2000 + date.Year - 1980)) << 25) + | ((DWORD)date.Month << 21) + | ((DWORD)date.Date << 16) + | (WORD)(time.Hours << 11) + | (WORD)(time.Minutes << 5) + | (WORD)(time.Seconds >> 1); + + return res; + /* USER CODE END get_fattime */ +} + +/* USER CODE BEGIN Application */ + +/* USER CODE END Application */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/FATFS/App/fatfs.h b/src/third_party/stm32_cubeide/FATFS/App/fatfs.h new file mode 100644 index 000000000..a51a42ad4 --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/App/fatfs.h @@ -0,0 +1,49 @@ +/** + ****************************************************************************** + * @file fatfs.h + * @brief Header for fatfs applications + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __fatfs_H +#define __fatfs_H +#ifdef __cplusplus + extern "C" { +#endif + +#include "ff.h" +#include "ff_gen_drv.h" +#include "sd_diskio.h" /* defines SD_Driver as external */ + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +extern uint8_t retSD; /* Return value for SD */ +extern char SDPath[4]; /* SD logical drive path */ +extern FATFS SDFatFS; /* File system object for SD logical drive */ +extern FIL SDFile; /* File object for SD */ + +void MX_FATFS_Init(void); + +/* USER CODE BEGIN Prototypes */ + +/* USER CODE END Prototypes */ +#ifdef __cplusplus +} +#endif +#endif /*__fatfs_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.c b/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.c new file mode 100644 index 000000000..a2a32ec9d --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.c @@ -0,0 +1,310 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file bsp_driver_sd.c for F7 (based on stm32756g_eval_sd.c) + * @brief This file includes a generic uSD card driver. + * To be completed by the user according to the board used for the project. + * @note Some functions generated as weak: they can be overriden by + * - code in user files + * - or BSP code from the FW pack files + * if such files are added to the generated project (by the user). + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +#ifdef OLD_API +/* kept to avoid issue when migrating old projects. */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +#else +/* USER CODE BEGIN FirstSection */ +/* can be used to modify / undefine following code or add new definitions */ +/* USER CODE END FirstSection */ +/* Includes ------------------------------------------------------------------*/ +#include "bsp_driver_sd.h" + +/* Extern variables ---------------------------------------------------------*/ + +extern SD_HandleTypeDef hsd1; + +/* USER CODE BEGIN BeforeInitSection */ +int g_sdCardIsPresent = 0; +/* can be used to modify / undefine following code or add code */ +/* USER CODE END BeforeInitSection */ +/** + * @brief Initializes the SD card device. + * @retval SD status + */ +__weak uint8_t BSP_SD_Init(void) +{ + uint8_t sd_state = MSD_OK; + /* Check if the SD card is plugged in the slot */ + if (BSP_SD_IsDetected() != SD_PRESENT) + { + return MSD_ERROR_SD_NOT_PRESENT; + } + /* HAL SD initialization */ + sd_state = HAL_SD_Init(&hsd1); + + return sd_state; +} +/* USER CODE BEGIN AfterInitSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END AfterInitSection */ + +/* USER CODE BEGIN InterruptMode */ +/** + * @brief Configures Interrupt mode for SD detection pin. + * @retval Returns 0 + */ +__weak uint8_t BSP_SD_ITConfig(void) +{ + /* Code to be updated by the user or replaced by one from the FW pack (in a stmxxxx_sd.c file) */ + + return (uint8_t)0; +} + +/* USER CODE END InterruptMode */ + +/* USER CODE BEGIN BeforeReadBlocksSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeReadBlocksSection */ +/** + * @brief Reads block(s) from a specified address in an SD card, in polling mode. + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param ReadAddr: Address from where data is to be read + * @param NumOfBlocks: Number of SD blocks to read + * @param Timeout: Timeout for read operation + * @retval SD status + */ +__weak uint8_t BSP_SD_ReadBlocks(uint32_t *pData, uint32_t ReadAddr, uint32_t NumOfBlocks, uint32_t Timeout) +{ + uint8_t sd_state = MSD_OK; + + if (HAL_SD_ReadBlocks(&hsd1, (uint8_t *)pData, ReadAddr, NumOfBlocks, Timeout) != HAL_OK) + { + sd_state = MSD_ERROR; + } + + return sd_state; +} + +/* USER CODE BEGIN BeforeWriteBlocksSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeWriteBlocksSection */ +/** + * @brief Writes block(s) to a specified address in an SD card, in polling mode. + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param WriteAddr: Address from where data is to be written + * @param NumOfBlocks: Number of SD blocks to write + * @param Timeout: Timeout for write operation + * @retval SD status + */ +__weak uint8_t BSP_SD_WriteBlocks(uint32_t *pData, uint32_t WriteAddr, uint32_t NumOfBlocks, uint32_t Timeout) +{ + uint8_t sd_state = MSD_OK; + + if (HAL_SD_WriteBlocks(&hsd1, (uint8_t *)pData, WriteAddr, NumOfBlocks, Timeout) != HAL_OK) + { + sd_state = MSD_ERROR; + } + + return sd_state; +} + +/* USER CODE BEGIN BeforeReadDMABlocksSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeReadDMABlocksSection */ +/** + * @brief Reads block(s) from a specified address in an SD card, in DMA mode. + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param ReadAddr: Address from where data is to be read + * @param NumOfBlocks: Number of SD blocks to read + * @retval SD status + */ +__weak uint8_t BSP_SD_ReadBlocks_DMA(uint32_t *pData, uint32_t ReadAddr, uint32_t NumOfBlocks) +{ + uint8_t sd_state = MSD_OK; + + /* Read block(s) in DMA transfer mode */ + if (HAL_SD_ReadBlocks_DMA(&hsd1, (uint8_t *)pData, ReadAddr, NumOfBlocks) != HAL_OK) + { + sd_state = MSD_ERROR; + } + + return sd_state; +} + +/* USER CODE BEGIN BeforeWriteDMABlocksSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeWriteDMABlocksSection */ +/** + * @brief Writes block(s) to a specified address in an SD card, in DMA mode. + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param WriteAddr: Address from where data is to be written + * @param NumOfBlocks: Number of SD blocks to write + * @retval SD status + */ +__weak uint8_t BSP_SD_WriteBlocks_DMA(uint32_t *pData, uint32_t WriteAddr, uint32_t NumOfBlocks) +{ + uint8_t sd_state = MSD_OK; + + /* Write block(s) in DMA transfer mode */ + if (HAL_SD_WriteBlocks_DMA(&hsd1, (uint8_t *)pData, WriteAddr, NumOfBlocks) != HAL_OK) + { + sd_state = MSD_ERROR; + } + + return sd_state; +} + +/* USER CODE BEGIN BeforeEraseSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeEraseSection */ +/** + * @brief Erases the specified memory area of the given SD card. + * @param StartAddr: Start byte address + * @param EndAddr: End byte address + * @retval SD status + */ +__weak uint8_t BSP_SD_Erase(uint32_t StartAddr, uint32_t EndAddr) +{ + uint8_t sd_state = MSD_OK; + + if (HAL_SD_Erase(&hsd1, StartAddr, EndAddr) != HAL_OK) + { + sd_state = MSD_ERROR; + } + + return sd_state; +} + +/* USER CODE BEGIN BeforeGetCardStateSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeGetCardStateSection */ + +/** + * @brief Gets the current SD card data status. + * @param None + * @retval Data transfer state. + * This value can be one of the following values: + * @arg SD_TRANSFER_OK: No data transfer is acting + * @arg SD_TRANSFER_BUSY: Data transfer is acting + */ +__weak uint8_t BSP_SD_GetCardState(void) +{ + return ((HAL_SD_GetCardState(&hsd1) == HAL_SD_CARD_TRANSFER ) ? SD_TRANSFER_OK : SD_TRANSFER_BUSY); +} + +/** + * @brief Get SD information about specific SD card. + * @param CardInfo: Pointer to HAL_SD_CardInfoTypedef structure + * @retval None + */ +__weak void BSP_SD_GetCardInfo(HAL_SD_CardInfoTypeDef *CardInfo) +{ + /* Get SD card Information */ + HAL_SD_GetCardInfo(&hsd1, CardInfo); +} + +/* USER CODE BEGIN BeforeCallBacksSection */ +/* can be used to modify previous code / undefine following code / add code */ +/* USER CODE END BeforeCallBacksSection */ +/** + * @brief SD Abort callbacks + * @param hsd: SD handle + * @retval None + */ +void HAL_SD_AbortCallback(SD_HandleTypeDef *hsd) +{ + BSP_SD_AbortCallback(); +} + +/** + * @brief Tx Transfer completed callback + * @param hsd: SD handle + * @retval None + */ +void HAL_SD_TxCpltCallback(SD_HandleTypeDef *hsd) +{ + BSP_SD_WriteCpltCallback(); +} + +/** + * @brief Rx Transfer completed callback + * @param hsd: SD handle + * @retval None + */ +void HAL_SD_RxCpltCallback(SD_HandleTypeDef *hsd) +{ + BSP_SD_ReadCpltCallback(); +} + +/* USER CODE BEGIN CallBacksSection_C */ +/** + * @brief BSP SD Abort callback + * @retval None + * @note empty (up to the user to fill it in or to remove it if useless) + */ +__weak void BSP_SD_AbortCallback(void) +{ + +} + +/** + * @brief BSP Tx Transfer completed callback + * @retval None + * @note empty (up to the user to fill it in or to remove it if useless) + */ +__weak void BSP_SD_WriteCpltCallback(void) +{ + +} + +/** + * @brief BSP Rx Transfer completed callback + * @retval None + * @note empty (up to the user to fill it in or to remove it if useless) + */ +__weak void BSP_SD_ReadCpltCallback(void) +{ + +} +/* USER CODE END CallBacksSection_C */ +#endif + +/** + * @brief Detects if SD card is correctly plugged in the memory slot or not. + * @param None + * @retval Returns if SD is detected or not + */ +__weak uint8_t BSP_SD_IsDetected(void) +{ + __IO uint8_t status = SD_PRESENT; + + /* USER CODE BEGIN 1 */ + if (!g_sdCardIsPresent) { + g_sdCardIsPresent = SD_NOT_PRESENT; + } + /* USER CODE END 1 */ + + return status; +} + +/* USER CODE BEGIN AdditionalCode */ +/* user code can be inserted here */ +/* USER CODE END AdditionalCode */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.h b/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.h new file mode 100644 index 000000000..656456cda --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/Target/bsp_driver_sd.h @@ -0,0 +1,89 @@ +/** + ****************************************************************************** + * @file bsp_driver_sd.h (based on stm32756g_eval_sd.h) + * @brief This file contains the common defines and functions prototypes for + * the bsp_driver_sd.c driver. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7_SD_H +#define __STM32F7_SD_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/* Exported types --------------------------------------------------------*/ +/** + * @brief SD Card information structure + */ +#define BSP_SD_CardInfo HAL_SD_CardInfoTypeDef + +/* Exported constants --------------------------------------------------------*/ +/** + * @brief SD status structure definition + */ +#define MSD_OK ((uint8_t)0x00) +#define MSD_ERROR ((uint8_t)0x01) +#define MSD_ERROR_SD_NOT_PRESENT ((uint8_t)0x02) + +/** + * @brief SD transfer state definition + */ +#define SD_TRANSFER_OK ((uint8_t)0x00) +#define SD_TRANSFER_BUSY ((uint8_t)0x01) + +#define SD_PRESENT ((uint8_t)0x01) +#define SD_NOT_PRESENT ((uint8_t)0x00) +#define SD_DATATIMEOUT ((uint32_t)100000000) + +#ifdef OLD_API +/* kept to avoid issue when migrating old projects. */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +#else +/* USER CODE BEGIN BSP_H_CODE */ + +/* Exported functions --------------------------------------------------------*/ +uint8_t BSP_SD_Init(void); +uint8_t BSP_SD_ITConfig(void); +uint8_t BSP_SD_ReadBlocks(uint32_t *pData, uint32_t ReadAddr, uint32_t NumOfBlocks, uint32_t Timeout); +uint8_t BSP_SD_WriteBlocks(uint32_t *pData, uint32_t WriteAddr, uint32_t NumOfBlocks, uint32_t Timeout); +uint8_t BSP_SD_ReadBlocks_DMA(uint32_t *pData, uint32_t ReadAddr, uint32_t NumOfBlocks); +uint8_t BSP_SD_WriteBlocks_DMA(uint32_t *pData, uint32_t WriteAddr, uint32_t NumOfBlocks); +uint8_t BSP_SD_Erase(uint32_t StartAddr, uint32_t EndAddr); +uint8_t BSP_SD_GetCardState(void); +void BSP_SD_GetCardInfo(BSP_SD_CardInfo *CardInfo); +uint8_t BSP_SD_IsDetected(void); + +/* These functions can be modified in case the current settings (e.g. DMA stream) + need to be changed for specific application needs */ +void BSP_SD_AbortCallback(void); +void BSP_SD_WriteCpltCallback(void); +void BSP_SD_ReadCpltCallback(void); +/* USER CODE END BSP_H_CODE */ +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7_SD_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/FATFS/Target/ffconf.h b/src/third_party/stm32_cubeide/FATFS/Target/ffconf.h new file mode 100644 index 000000000..8356368a3 --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/Target/ffconf.h @@ -0,0 +1,270 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * FatFs - Generic FAT file system module R0.12c (C)ChaN, 2017 + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +#ifndef _FFCONF +#define _FFCONF 68300 /* Revision ID */ + +/*-----------------------------------------------------------------------------/ +/ Additional user header to be used +/-----------------------------------------------------------------------------*/ + +#include "main.h" +#include "stm32f7xx_hal.h" +#include "bsp_driver_sd.h" +#include "cmsis_os.h" /* _FS_REENTRANT set to 1 and CMSIS API chosen */ + +/*-----------------------------------------------------------------------------/ +/ Function Configurations +/-----------------------------------------------------------------------------*/ + +#define _FS_READONLY 0 /* 0:Read/Write or 1:Read only */ +/* This option switches read-only configuration. (0:Read/Write or 1:Read-only) +/ Read-only configuration removes writing API functions, f_write(), f_sync(), +/ f_unlink(), f_mkdir(), f_chmod(), f_rename(), f_truncate(), f_getfree() +/ and optional writing functions as well. */ + +#define _FS_MINIMIZE 0 /* 0 to 3 */ +/* This option defines minimization level to remove some basic API functions. +/ +/ 0: All basic functions are enabled. +/ 1: f_stat(), f_getfree(), f_unlink(), f_mkdir(), f_truncate() and f_rename() +/ are removed. +/ 2: f_opendir(), f_readdir() and f_closedir() are removed in addition to 1. +/ 3: f_lseek() function is removed in addition to 2. */ + +#define _USE_STRFUNC 0 /* 0:Disable or 1-2:Enable */ +/* This option switches string functions, f_gets(), f_putc(), f_puts() and +/ f_printf(). +/ +/ 0: Disable string functions. +/ 1: Enable without LF-CRLF conversion. +/ 2: Enable with LF-CRLF conversion. */ + +#define _USE_FIND 1 +/* This option switches filtered directory read functions, f_findfirst() and +/ f_findnext(). (0:Disable, 1:Enable 2:Enable with matching altname[] too) */ + +#define _USE_MKFS 1 +/* This option switches f_mkfs() function. (0:Disable or 1:Enable) */ + +#define _USE_FASTSEEK 1 +/* This option switches fast seek feature. (0:Disable or 1:Enable) */ + +#define _USE_EXPAND 0 +/* This option switches f_expand function. (0:Disable or 1:Enable) */ + +#define _USE_CHMOD 0 +/* This option switches attribute manipulation functions, f_chmod() and f_utime(). +/ (0:Disable or 1:Enable) Also _FS_READONLY needs to be 0 to enable this option. */ + +#define _USE_LABEL 0 +/* This option switches volume label functions, f_getlabel() and f_setlabel(). +/ (0:Disable or 1:Enable) */ + +#define _USE_FORWARD 0 +/* This option switches f_forward() function. (0:Disable or 1:Enable) */ + +/*-----------------------------------------------------------------------------/ +/ Locale and Namespace Configurations +/-----------------------------------------------------------------------------*/ + +#define _CODE_PAGE 850 +/* This option specifies the OEM code page to be used on the target system. +/ Incorrect setting of the code page can cause a file open failure. +/ +/ 1 - ASCII (No extended character. Non-LFN cfg. only) +/ 437 - U.S. +/ 720 - Arabic +/ 737 - Greek +/ 771 - KBL +/ 775 - Baltic +/ 850 - Latin 1 +/ 852 - Latin 2 +/ 855 - Cyrillic +/ 857 - Turkish +/ 860 - Portuguese +/ 861 - Icelandic +/ 862 - Hebrew +/ 863 - Canadian French +/ 864 - Arabic +/ 865 - Nordic +/ 866 - Russian +/ 869 - Greek 2 +/ 932 - Japanese (DBCS) +/ 936 - Simplified Chinese (DBCS) +/ 949 - Korean (DBCS) +/ 950 - Traditional Chinese (DBCS) +*/ + +#define _USE_LFN 2 /* 0 to 3 */ +#define _MAX_LFN 255 /* Maximum LFN length to handle (12 to 255) */ +/* The _USE_LFN switches the support of long file name (LFN). +/ +/ 0: Disable support of LFN. _MAX_LFN has no effect. +/ 1: Enable LFN with static working buffer on the BSS. Always NOT thread-safe. +/ 2: Enable LFN with dynamic working buffer on the STACK. +/ 3: Enable LFN with dynamic working buffer on the HEAP. +/ +/ To enable the LFN, Unicode handling functions (option/unicode.c) must be added +/ to the project. The working buffer occupies (_MAX_LFN + 1) * 2 bytes and +/ additional 608 bytes at exFAT enabled. _MAX_LFN can be in range from 12 to 255. +/ It should be set 255 to support full featured LFN operations. +/ When use stack for the working buffer, take care on stack overflow. When use heap +/ memory for the working buffer, memory management functions, ff_memalloc() and +/ ff_memfree(), must be added to the project. */ + +#define _LFN_UNICODE 0 /* 0:ANSI/OEM or 1:Unicode */ +/* This option switches character encoding on the API. (0:ANSI/OEM or 1:UTF-16) +/ To use Unicode string for the path name, enable LFN and set _LFN_UNICODE = 1. +/ This option also affects behavior of string I/O functions. */ + +#define _STRF_ENCODE 3 +/* When _LFN_UNICODE == 1, this option selects the character encoding ON THE FILE to +/ be read/written via string I/O functions, f_gets(), f_putc(), f_puts and f_printf(). +/ +/ 0: ANSI/OEM +/ 1: UTF-16LE +/ 2: UTF-16BE +/ 3: UTF-8 +/ +/ This option has no effect when _LFN_UNICODE == 0. */ + +#define _FS_RPATH 0 /* 0 to 2 */ +/* This option configures support of relative path. +/ +/ 0: Disable relative path and remove related functions. +/ 1: Enable relative path. f_chdir() and f_chdrive() are available. +/ 2: f_getcwd() function is available in addition to 1. +*/ + +/*---------------------------------------------------------------------------/ +/ Drive/Volume Configurations +/----------------------------------------------------------------------------*/ + +#define _VOLUMES 1 +/* Number of volumes (logical drives) to be used. */ + +/* USER CODE BEGIN Volumes */ +#define _STR_VOLUME_ID 0 /* 0:Use only 0-9 for drive ID, 1:Use strings for drive ID */ +#define _VOLUME_STRS "RAM","NAND","CF","SD1","SD2","USB1","USB2","USB3" +/* _STR_VOLUME_ID switches string support of volume ID. +/ When _STR_VOLUME_ID is set to 1, also pre-defined strings can be used as drive +/ number in the path name. _VOLUME_STRS defines the drive ID strings for each +/ logical drives. Number of items must be equal to _VOLUMES. Valid characters for +/ the drive ID strings are: A-Z and 0-9. */ +/* USER CODE END Volumes */ + +#define _MULTI_PARTITION 0 /* 0:Single partition, 1:Multiple partition */ +/* This option switches support of multi-partition on a physical drive. +/ By default (0), each logical drive number is bound to the same physical drive +/ number and only an FAT volume found on the physical drive will be mounted. +/ When multi-partition is enabled (1), each logical drive number can be bound to +/ arbitrary physical drive and partition listed in the VolToPart[]. Also f_fdisk() +/ funciton will be available. */ +#define _MIN_SS 512 /* 512, 1024, 2048 or 4096 */ +#define _MAX_SS 512 /* 512, 1024, 2048 or 4096 */ +/* These options configure the range of sector size to be supported. (512, 1024, +/ 2048 or 4096) Always set both 512 for most systems, all type of memory cards and +/ harddisk. But a larger value may be required for on-board flash memory and some +/ type of optical media. When _MAX_SS is larger than _MIN_SS, FatFs is configured +/ to variable sector size and GET_SECTOR_SIZE command must be implemented to the +/ disk_ioctl() function. */ + +#define _USE_TRIM 0 +/* This option switches support of ATA-TRIM. (0:Disable or 1:Enable) +/ To enable Trim function, also CTRL_TRIM command should be implemented to the +/ disk_ioctl() function. */ + +#define _FS_NOFSINFO 0 /* 0,1,2 or 3 */ +/* If you need to know correct free space on the FAT32 volume, set bit 0 of this +/ option, and f_getfree() function at first time after volume mount will force +/ a full FAT scan. Bit 1 controls the use of last allocated cluster number. +/ +/ bit0=0: Use free cluster count in the FSINFO if available. +/ bit0=1: Do not trust free cluster count in the FSINFO. +/ bit1=0: Use last allocated cluster number in the FSINFO if available. +/ bit1=1: Do not trust last allocated cluster number in the FSINFO. +*/ + +/*---------------------------------------------------------------------------/ +/ System Configurations +/----------------------------------------------------------------------------*/ + +#define _FS_TINY 0 /* 0:Normal or 1:Tiny */ +/* This option switches tiny buffer configuration. (0:Normal or 1:Tiny) +/ At the tiny configuration, size of file object (FIL) is reduced _MAX_SS bytes. +/ Instead of private sector buffer eliminated from the file object, common sector +/ buffer in the file system object (FATFS) is used for the file data transfer. */ + +#define _FS_EXFAT 0 +/* This option switches support of exFAT file system. (0:Disable or 1:Enable) +/ When enable exFAT, also LFN needs to be enabled. (_USE_LFN >= 1) +/ Note that enabling exFAT discards C89 compatibility. */ + +#define _FS_NORTC 0 +#define _NORTC_MON 6 +#define _NORTC_MDAY 4 +#define _NORTC_YEAR 2015 +/* The option _FS_NORTC switches timestamp functiton. If the system does not have +/ any RTC function or valid timestamp is not needed, set _FS_NORTC = 1 to disable +/ the timestamp function. All objects modified by FatFs will have a fixed timestamp +/ defined by _NORTC_MON, _NORTC_MDAY and _NORTC_YEAR in local time. +/ To enable timestamp function (_FS_NORTC = 0), get_fattime() function need to be +/ added to the project to get current time form real-time clock. _NORTC_MON, +/ _NORTC_MDAY and _NORTC_YEAR have no effect. +/ These options have no effect at read-only configuration (_FS_READONLY = 1). */ + +#define _FS_LOCK 0 /* 0:Disable or >=1:Enable */ +/* The option _FS_LOCK switches file lock function to control duplicated file open +/ and illegal operation to open objects. This option must be 0 when _FS_READONLY +/ is 1. +/ +/ 0: Disable file lock function. To avoid volume corruption, application program +/ should avoid illegal open, remove and rename to the open objects. +/ >0: Enable file lock function. The value defines how many files/sub-directories +/ can be opened simultaneously under file lock control. Note that the file +/ lock control is independent of re-entrancy. */ + +#define _FS_REENTRANT 1 /* 0:Disable or 1:Enable */ +#define _FS_TIMEOUT 1000 /* Timeout period in unit of time ticks */ +#define _SYNC_t osSemaphoreId +/* The option _FS_REENTRANT switches the re-entrancy (thread safe) of the FatFs +/ module itself. Note that regardless of this option, file access to different +/ volume is always re-entrant and volume control functions, f_mount(), f_mkfs() +/ and f_fdisk() function, are always not re-entrant. Only file/directory access +/ to the same volume is under control of this function. +/ +/ 0: Disable re-entrancy. _FS_TIMEOUT and _SYNC_t have no effect. +/ 1: Enable re-entrancy. Also user provided synchronization handlers, +/ ff_req_grant(), ff_rel_grant(), ff_del_syncobj() and ff_cre_syncobj() +/ function, must be added to the project. Samples are available in +/ option/syscall.c. +/ +/ The _FS_TIMEOUT defines timeout period in unit of time tick. +/ The _SYNC_t defines O/S dependent sync object type. e.g. HANDLE, ID, OS_EVENT*, +/ SemaphoreHandle_t and etc.. A header file for O/S definitions needs to be +/ included somewhere in the scope of ff.h. */ + +/* define the ff_malloc ff_free macros as FreeRTOS pvPortMalloc and vPortFree macros */ +#if !defined(ff_malloc) && !defined(ff_free) +#define ff_malloc pvPortMalloc +#define ff_free vPortFree +#endif + +#endif /* _FFCONF */ diff --git a/src/third_party/stm32_cubeide/FATFS/Target/sd_diskio.c b/src/third_party/stm32_cubeide/FATFS/Target/sd_diskio.c new file mode 100644 index 000000000..5bd0ad968 --- /dev/null +++ b/src/third_party/stm32_cubeide/FATFS/Target/sd_diskio.c @@ -0,0 +1,687 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file sd_diskio.c + * @brief SD Disk I/O driver + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Note: code generation based on sd_diskio_dma_rtos_template_bspv1.c v2.1.4 + as FreeRTOS is enabled. */ + +/* USER CODE BEGIN firstSection */ +/* can be used to modify / undefine following code or add new definitions */ +/* USER CODE END firstSection*/ + +/* Includes ------------------------------------------------------------------*/ +#include "ff_gen_drv.h" +#include "sd_diskio.h" + +#include +#include + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ + +#define QUEUE_SIZE (uint32_t) 10 +#define READ_CPLT_MSG (uint32_t) 1 +#define WRITE_CPLT_MSG (uint32_t) 2 +/* +================================================================== +enable the defines below to send custom rtos messages +when an error or an abort occurs. +Notice: depending on the HAL/SD driver the HAL_SD_ErrorCallback() +may not be available. +See BSP_SD_ErrorCallback() and BSP_SD_AbortCallback() below +================================================================== + +#define RW_ERROR_MSG (uint32_t) 3 +#define RW_ABORT_MSG (uint32_t) 4 +*/ +/* + * the following Timeout is useful to give the control back to the applications + * in case of errors in either BSP_SD_ReadCpltCallback() or BSP_SD_WriteCpltCallback() + * the value by default is as defined in the BSP platform driver otherwise 30 secs + */ +#define SD_TIMEOUT 250 + +#define SD_DEFAULT_BLOCK_SIZE 512 + +/* + * Depending on the use case, the SD card initialization could be done at the + * application level: if it is the case define the flag below to disable + * the BSP_SD_Init() call in the SD_Initialize() and add a call to + * BSP_SD_Init() elsewhere in the application. + */ +/* USER CODE BEGIN disableSDInit */ +/* #define DISABLE_SD_INIT */ +/* USER CODE END disableSDInit */ + +/* + * when using cachable memory region, it may be needed to maintain the cache + * validity. Enable the define below to activate a cache maintenance at each + * read and write operation. + * Notice: This is applicable only for cortex M7 based platform. + */ +/* USER CODE BEGIN enableSDDmaCacheMaintenance */ +/* #define ENABLE_SD_DMA_CACHE_MAINTENANCE 1 */ +/* USER CODE BEGIN enableSDDmaCacheMaintenance */ + +/* +* Some DMA requires 4-Byte aligned address buffer to correctly read/wite data, +* in FatFs some accesses aren't thus we need a 4-byte aligned scratch buffer to correctly +* transfer data +*/ +/* USER CODE BEGIN enableScratchBuffer */ +/* #define ENABLE_SCRATCH_BUFFER */ +/* USER CODE END enableScratchBuffer */ + +/* Private variables ---------------------------------------------------------*/ +#if defined(ENABLE_SCRATCH_BUFFER) +#if defined (ENABLE_SD_DMA_CACHE_MAINTENANCE) +ALIGN_32BYTES(static uint8_t scratch[BLOCKSIZE]); // 32-Byte aligned for cache maintenance +#else +__ALIGN_BEGIN static uint8_t scratch[BLOCKSIZE] __ALIGN_END; +#endif +#endif +/* Disk status */ +static volatile DSTATUS Stat = STA_NOINIT; + +#if (osCMSIS <= 0x20000U) +static osMessageQId SDQueueID = NULL; +#else +static osMessageQueueId_t SDQueueID = NULL; +#endif +/* Private function prototypes -----------------------------------------------*/ +static DSTATUS SD_CheckStatus(BYTE lun); +DSTATUS SD_initialize (BYTE); +DSTATUS SD_status (BYTE); +DRESULT SD_read (BYTE, BYTE*, DWORD, UINT); +#if _USE_WRITE == 1 +DRESULT SD_write (BYTE, const BYTE*, DWORD, UINT); +#endif /* _USE_WRITE == 1 */ +#if _USE_IOCTL == 1 +DRESULT SD_ioctl (BYTE, BYTE, void*); +#endif /* _USE_IOCTL == 1 */ + +const Diskio_drvTypeDef SD_Driver = +{ + SD_initialize, + SD_status, + SD_read, +#if _USE_WRITE == 1 + SD_write, +#endif /* _USE_WRITE == 1 */ + +#if _USE_IOCTL == 1 + SD_ioctl, +#endif /* _USE_IOCTL == 1 */ +}; + +/* USER CODE BEGIN beforeFunctionSection */ +/* can be used to modify / undefine following code or add new code */ +/* USER CODE END beforeFunctionSection */ + +/* Private functions ---------------------------------------------------------*/ + +static int SD_CheckStatusWithTimeout(uint32_t timeout) +{ + uint32_t timer; + /* block until SDIO peripherial is ready again or a timeout occur */ +#if (osCMSIS <= 0x20000U) + timer = osKernelSysTick(); + while( osKernelSysTick() - timer < timeout) +#else + timer = osKernelGetTickCount(); + while( osKernelGetTickCount() - timer < timeout) +#endif + { + if (BSP_SD_GetCardState() == SD_TRANSFER_OK) + { + return 0; + } + } + + return -1; +} + +static DSTATUS SD_CheckStatus(BYTE lun) +{ + Stat = STA_NOINIT; + + if(BSP_SD_GetCardState() == SD_TRANSFER_OK) + { + Stat &= ~STA_NOINIT; + } + + return Stat; +} + +/** + * @brief Initializes a Drive + * @param lun : not used + * @retval DSTATUS: Operation status + */ +DSTATUS SD_initialize(BYTE lun) +{ +Stat = STA_NOINIT; + + /* + * check that the kernel has been started before continuing + * as the osMessage API will fail otherwise + */ +#if (osCMSIS <= 0x20000U) + if(osKernelRunning()) +#else + if(osKernelGetState() == osKernelRunning) +#endif + { +#if !defined(DISABLE_SD_INIT) + + if(BSP_SD_Init() == MSD_OK) + { + Stat = SD_CheckStatus(lun); + } + +#else + Stat = SD_CheckStatus(lun); +#endif + + /* + * if the SD is correctly initialized, create the operation queue + * if not already created + */ + + if (Stat != STA_NOINIT) + { + if (SDQueueID == NULL) + { + #if (osCMSIS <= 0x20000U) + osMessageQDef(SD_Queue, QUEUE_SIZE, uint16_t); + SDQueueID = osMessageCreate (osMessageQ(SD_Queue), NULL); +#else + SDQueueID = osMessageQueueNew(QUEUE_SIZE, 2, NULL); +#endif + } + + if (SDQueueID == NULL) + { + Stat |= STA_NOINIT; + } + } + } + + return Stat; +} + +/** + * @brief Gets Disk Status + * @param lun : not used + * @retval DSTATUS: Operation status + */ +DSTATUS SD_status(BYTE lun) +{ + return SD_CheckStatus(lun); +} + +/* USER CODE BEGIN beforeReadSection */ +/* can be used to modify previous code / undefine following code / add new code */ +/* USER CODE END beforeReadSection */ +/** + * @brief Reads Sector(s) + * @param lun : not used + * @param *buff: Data buffer to store read data + * @param sector: Sector address (LBA) + * @param count: Number of sectors to read (1..128) + * @retval DRESULT: Operation result + */ + +DRESULT SD_read(BYTE lun, BYTE *buff, DWORD sector, UINT count) +{ + DRESULT res = RES_ERROR; + uint32_t timer; +#if (osCMSIS < 0x20000U) + osEvent event; +#else + uint16_t event; + osStatus_t status; +#endif +#if (ENABLE_SD_DMA_CACHE_MAINTENANCE == 1) + uint32_t alignedAddr; +#endif + /* + * ensure the SDCard is ready for a new operation + */ + + if (SD_CheckStatusWithTimeout(SD_TIMEOUT) < 0) + { + return res; + } + +#if defined(ENABLE_SCRATCH_BUFFER) + if (!((uint32_t)buff & 0x3)) + { +#endif + /* Fast path cause destination buffer is correctly aligned */ + uint8_t ret = BSP_SD_ReadBlocks_DMA((uint32_t*)buff, (uint32_t)(sector), count); + + if (ret == MSD_OK) { +#if (osCMSIS < 0x20000U) + /* wait for a message from the queue or a timeout */ + event = osMessageGet(SDQueueID, SD_TIMEOUT); + + if (event.status == osEventMessage) + { + if (event.value.v == READ_CPLT_MSG) + { + timer = osKernelSysTick(); + /* block until SDIO IP is ready or a timeout occur */ + while(osKernelSysTick() - timer
© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.
+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Note: code generation based on sd_diskio_dma_rtos_template.h */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __SD_DISKIO_H +#define __SD_DISKIO_H + +/* USER CODE BEGIN firstSection */ +/* can be used to modify / undefine following code or add new definitions */ +/* USER CODE END firstSection */ + +/* Includes ------------------------------------------------------------------*/ +#include "bsp_driver_sd.h" +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/* Exported functions ------------------------------------------------------- */ +extern const Diskio_drvTypeDef SD_Driver; + +/* USER CODE BEGIN lastSection */ +/* can be used to modify / undefine previous code or add new definitions */ +/* USER CODE END lastSection */ + +#endif /* __SD_DISKIO_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.c b/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.c new file mode 100644 index 000000000..44702d42e --- /dev/null +++ b/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.c @@ -0,0 +1,59 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : libjpeg.c + * Description : This file provides code for the configuration + * of the libjpeg instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "libjpeg.h" + +/* USER CODE BEGIN 0 */ +/* USER CODE END 0 */ + +/* USER CODE BEGIN 1 */ +/* USER CODE END 1 */ + +/* Global variables ---------------------------------------------------------*/ + +/* USER CODE BEGIN 2 */ +/* USER CODE END 2 */ + +/* LIBJPEG init function */ +void MX_LIBJPEG_Init(void) +{ +/***************************************/ + /** + */ + + /* USER CODE BEGIN 3 */ + /* USER CODE END 3 */ + +} + +/* USER CODE BEGIN 4 */ +/* USER CODE END 4 */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.h b/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.h new file mode 100644 index 000000000..b90a691b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/LIBJPEG/App/libjpeg.h @@ -0,0 +1,58 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : libjpeg.h + * Description : This file provides code for the configuration + * of the libjpeg instances. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __libjpeg_H +#define __libjpeg_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "jpeglib.h" + +/* USER CODE BEGIN 0 */ +/* USER CODE END 0 */ + +/* Global variables ---------------------------------------------------------*/ + +/* USER CODE BEGIN 1 */ +/* USER CODE END 1 */ + +/* LIBJPEG init function */ +void MX_LIBJPEG_Init(void); + +/* USER CODE BEGIN 2 */ +/* USER CODE END 2 */ + +#ifdef __cplusplus +} +#endif +#endif /*__libjpeg_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LIBJPEG/Target/jconfig.h b/src/third_party/stm32_cubeide/LIBJPEG/Target/jconfig.h new file mode 100644 index 000000000..9b9141b90 --- /dev/null +++ b/src/third_party/stm32_cubeide/LIBJPEG/Target/jconfig.h @@ -0,0 +1,179 @@ +/* + * jconfig.h + * + * Copyright (C) 1991-1994, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file documents the configuration options that are required to + * customize the JPEG software for a particular system. + * + * The actual configuration options for a particular installation are stored + * in jconfig.h. On many machines, jconfig.h can be generated automatically + * or copied from one of the "canned" jconfig files that we supply. But if + * you need to generate a jconfig.h file by hand, this file tells you how. + * + */ + +/* + * these macros provide simple implementation of the system memory + * dependent portion of the JPEG memory manager. This implementation + * assumes that no backing-store files are needed: all required space + * can be obtained from malloc(). + * This is very portable in the sense that it'll compile on almost anything, + * but you'd better have lots of main memory (or virtual memory) if you want + * to process big images. + * Note that the max_memory_to_use option is ignored by this implementation. + */ + +/*Import File manager wrapping*/ +#include "jdata_conf.h" + +/* + * These symbols indicate the properties of your machine or compiler. + * #define the symbol if yes, #undef it if no. + */ + +#define NO_GETENV +#undef USE_MSDOS_MEMMGR +#undef USE_MAC_MEMMGR +/*Enabling 'USE_HEAP_MEM' disables the use of 'temp files' from 'backing-store management' : refer to 'jmemsys.h' for details. +Moreover, this porting version does not proposed validated 'temp files' support : it is recommended to keep this macro 'defined'.*/ +#define USE_HEAP_MEM +#define MAX_ALLOC_CHUNK 0x10000 /* 64kB */ + +/* Does your compiler support function prototypes? + * (If not, you also need to use ansi2knr, see install.txt) + */ +#define HAVE_PROTOTYPES + +/* Does your compiler support the declaration "unsigned char" ? + * How about "unsigned short" ? + */ +#define HAVE_UNSIGNED_CHAR +#define HAVE_UNSIGNED_SHORT + +/* Define "void" as "char" if your compiler doesn't know about type void. + * NOTE: be sure to define void such that "void *" represents the most general + * pointer type, e.g., that returned by malloc(). + */ +/* #define void char */ + +/* Define "const" as empty if your compiler doesn't know the "const" keyword. + */ +/* #define const */ + +/* Define this if an ordinary "char" type is unsigned. + * If you're not sure, leaving it undefined will work at some cost in speed. + * If you defined HAVE_UNSIGNED_CHAR then the speed difference is minimal. + */ +#undef CHAR_IS_UNSIGNED + +/* Define this if your system has an ANSI-conforming file. + */ +#define HAVE_STDDEF_H + +/* Define this if your system has an ANSI-conforming file. + */ +#define HAVE_STDLIB_H + +/* Define this if your system does not have an ANSI/SysV , + * but does have a BSD-style . + */ +#undef NEED_BSD_STRINGS + +/* Define this if your system does not provide typedef size_t in any of the + * ANSI-standard places (stddef.h, stdlib.h, or stdio.h), but places it in + * instead. + */ +#undef NEED_SYS_TYPES_H + +/* For 80x86 machines, you need to define NEED_FAR_POINTERS, + * unless you are using a large-data memory model or 80386 flat-memory mode. + * On less brain-damaged CPUs this symbol must not be defined. + * (Defining this symbol causes large data structures to be referenced through + * "far" pointers and to be allocated with a special version of malloc.) + */ +#undef NEED_FAR_POINTERS + +/* Define this if your linker needs global names to be unique in less + * than the first 15 characters. + */ +#undef NEED_SHORT_EXTERNAL_NAMES + +/* Although a real ANSI C compiler can deal perfectly well with pointers to + * unspecified structures (see "incomplete types" in the spec), a few pre-ANSI + * and pseudo-ANSI compilers get confused. To keep one of these bozos happy, + * define INCOMPLETE_TYPES_BROKEN. This is not recommended unless you + * actually get "missing structure definition" warnings or errors while + * compiling the JPEG code. + */ +#undef INCOMPLETE_TYPES_BROKEN + +/* Define "boolean" as unsigned char, not int, on Windows systems. + */ +#ifdef _WIN32 +#ifndef __RPCNDR_H__ /* don't conflict if rpcndr.h already read */ +typedef unsigned char boolean; +#endif +#define HAVE_BOOLEAN /* prevent jmorecfg.h from redefining it */ +#endif + +/* + * The following options affect code selection within the JPEG library, + * but they don't need to be visible to applications using the library. + * To minimize application namespace pollution, the symbols won't be + * defined unless JPEG_INTERNALS has been defined. + */ + +#ifdef JPEG_INTERNALS + +/* Define this if your compiler implements ">>" on signed values as a logical + * (unsigned) shift; leave it undefined if ">>" is a signed (arithmetic) shift, + * which is the normal and rational definition. + */ +#undef RIGHT_SHIFT_IS_UNSIGNED + +#endif /* JPEG_INTERNALS */ + +/* + * The remaining options do not affect the JPEG library proper, + * but only the sample applications cjpeg/djpeg (see cjpeg.c, djpeg.c). + * Other applications can ignore these. + */ + +#ifdef JPEG_CJPEG_DJPEG + +/* These defines indicate which image (non-JPEG) file formats are allowed. */ + +#define BMP_SUPPORTED /* BMP image file format */ +#define GIF_SUPPORTED /* GIF image file format */ +#define PPM_SUPPORTED /* PBMPLUS PPM/PGM image file format */ +#undef RLE_SUPPORTED /* Utah RLE image file format */ +#define TARGA_SUPPORTED /* Targa image file format */ + +/* Define this if you want to name both input and output files on the command + * line, rather than using stdout and optionally stdin. You MUST do this if + * your system can't cope with binary I/O to stdin/stdout. See comments at + * head of cjpeg.c or djpeg.c. + */ +#undef TWO_FILE_COMMANDLINE + +/* Define this if your system needs explicit cleanup of temporary files. + * This is crucial under MS-DOS, where the temporary "files" may be areas + * of extended memory; on most other systems it's not as important. + */ +#undef NEED_SIGNAL_CATCHER + +/* By default, we open image files with fopen(...,"rb") or fopen(...,"wb"). + * This is necessary on systems that distinguish text files from binary files, + * and is harmless on most systems that don't. If you have one of the rare + * systems that complains about the "b" spec, define this symbol. + */ +#undef DONT_USE_B_MODE + +/* Define this if you want percent-done progress reports from cjpeg/djpeg. + */ +#undef PROGRESS_REPORT + +#endif /* JPEG_CJPEG_DJPEG */ diff --git a/src/third_party/stm32_cubeide/LIBJPEG/Target/jdata_conf.h b/src/third_party/stm32_cubeide/LIBJPEG/Target/jdata_conf.h new file mode 100644 index 000000000..aa12d49cc --- /dev/null +++ b/src/third_party/stm32_cubeide/LIBJPEG/Target/jdata_conf.h @@ -0,0 +1,40 @@ +/** + ****************************************************************************** + * File Name : jdata_conf.h + * Description : This file provides header to "jdata_conf.h" module. + * It implements also file based read/write functions. + * + ****************************************************************************** + * + * Copyright (c) 2019 STMicroelectronics. All rights reserved. + * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** +**/ + +/* Includes ------------------------------------------------------------------*/ + +#include + +/*FreeRtos Api*/ +#include "cmsis_os.h" + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/*This defines the memory allocation methods.*/ +#define JMALLOC pvPortMalloc +#define JFREE vPortFree + +/*This defines the File data manager type.*/ +#undef JFILE + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LIBJPEG/Target/jmorecfg.h b/src/third_party/stm32_cubeide/LIBJPEG/Target/jmorecfg.h new file mode 100644 index 000000000..633049da1 --- /dev/null +++ b/src/third_party/stm32_cubeide/LIBJPEG/Target/jmorecfg.h @@ -0,0 +1,356 @@ +/* + * jmorecfg.h + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains additional configuration options that customize the + * JPEG software for special applications or support machine-dependent + * optimizations. Most users will not need to touch this file. + */ + +/* + * Define BITS_IN_JSAMPLE as either + * 8 for 8-bit sample values (the usual setting) + * 12 for 12-bit sample values + * Only 8 and 12 are legal data precisions for lossy JPEG according to the + * JPEG standard, and the IJG code does not support anything else! + * We do not support run-time selection of data precision, sorry. + */ + +#define BITS_IN_JSAMPLE 8 /* use 8 or 12 */ +/* + * Maximum number of components (color channels) allowed in JPEG image. + * To meet the letter of the JPEG spec, set this to 255. However, darn + * few applications need more than 4 channels (maybe 5 for CMYK + alpha + * mask). We recommend 10 as a reasonable compromise; use 4 if you are + * really short on memory. (Each allowed component costs a hundred or so + * bytes of storage, whether actually used in an image or not.) + */ + +#define MAX_COMPONENTS 10 /* maximum number of image components */ +/* + * Basic data types. + * You may need to change these if you have a machine with unusual data + * type sizes; for example, "char" not 8 bits, "short" not 16 bits, + * or "long" not 32 bits. We don't care whether "int" is 16 or 32 bits, + * but it had better be at least 16. + */ + +/* Representation of a single sample (pixel element value). + * We frequently allocate large arrays of these, so it's important to keep + * them small. But if you have memory to burn and access to char or short + * arrays is very slow on your hardware, you might want to change these. + */ + +#if BITS_IN_JSAMPLE == 8 +/* JSAMPLE should be the smallest type that will hold the values 0..255. + * You can use a signed char by having GETJSAMPLE mask it with 0xFF. + */ + +#ifdef HAVE_UNSIGNED_CHAR + +typedef unsigned char JSAMPLE; +#define GETJSAMPLE(value) ((int) (value)) + +#else /* not HAVE_UNSIGNED_CHAR */ + +typedef char JSAMPLE; +#ifdef CHAR_IS_UNSIGNED +#define GETJSAMPLE(value) ((int) (value)) +#else +#define GETJSAMPLE(value) ((int) (value) & 0xFF) +#endif /* CHAR_IS_UNSIGNED */ + +#endif /* HAVE_UNSIGNED_CHAR */ + +#define MAXJSAMPLE 255 +#define CENTERJSAMPLE 128 + +#endif /* BITS_IN_JSAMPLE == 8 */ + +#if BITS_IN_JSAMPLE == 12 +/* JSAMPLE should be the smallest type that will hold the values 0..4095. + * On nearly all machines "short" will do nicely. + */ + +typedef short JSAMPLE; +#define GETJSAMPLE(value) ((int) (value)) + +#define MAXJSAMPLE 4095 +#define CENTERJSAMPLE 2048 + +#endif /* BITS_IN_JSAMPLE == 12 */ + +/* Representation of a DCT frequency coefficient. + * This should be a signed value of at least 16 bits; "short" is usually OK. + * Again, we allocate large arrays of these, but you can change to int + * if you have memory to burn and "short" is really slow. + */ + +typedef short JCOEF; +/* Compressed datastreams are represented as arrays of JOCTET. + * These must be EXACTLY 8 bits wide, at least once they are written to + * external storage. Note that when using the stdio data source/destination + * managers, this is also the data type passed to fread/fwrite. + */ + +#ifdef HAVE_UNSIGNED_CHAR + +typedef unsigned char JOCTET; +#define GETJOCTET(value) (value) + +#else /* not HAVE_UNSIGNED_CHAR */ + +typedef char JOCTET; +#ifdef CHAR_IS_UNSIGNED +#define GETJOCTET(value) (value) +#else +#define GETJOCTET(value) ((value) & 0xFF) +#endif /* CHAR_IS_UNSIGNED */ + +#endif /* HAVE_UNSIGNED_CHAR */ + +/* These typedefs are used for various table entries and so forth. + * They must be at least as wide as specified; but making them too big + * won't cost a huge amount of memory, so we don't provide special + * extraction code like we did for JSAMPLE. (In other words, these + * typedefs live at a different point on the speed/space tradeoff curve.) + */ + +/* UINT8 must hold at least the values 0..255. */ + +#ifdef HAVE_UNSIGNED_CHAR +typedef unsigned char UINT8; +#else /* not HAVE_UNSIGNED_CHAR */ +#ifdef CHAR_IS_UNSIGNED +typedef char UINT8; +#else /* not CHAR_IS_UNSIGNED */ +typedef short UINT8; +#endif /* CHAR_IS_UNSIGNED */ +#endif /* HAVE_UNSIGNED_CHAR */ + +/* UINT16 must hold at least the values 0..65535. */ + +#ifdef HAVE_UNSIGNED_SHORT +typedef unsigned short UINT16; +#else /* not HAVE_UNSIGNED_SHORT */ +typedef unsigned int UINT16; +#endif /* HAVE_UNSIGNED_SHORT */ + +/* INT16 must hold at least the values -32768..32767. */ + +#ifndef XMD_H /* X11/xmd.h correctly defines INT16 */ +typedef short INT16; +#endif + +/* INT32 must hold at least signed 32-bit values. */ + +#ifndef XMD_H /* X11/xmd.h correctly defines INT32 */ +#ifndef _BASETSD_H_ /* Microsoft defines it in basetsd.h */ +#ifndef _BASETSD_H /* MinGW is slightly different */ +#ifndef QGLOBAL_H /* Qt defines it in qglobal.h */ +typedef long INT32; +#endif +#endif +#endif +#endif + +/* Datatype used for image dimensions. The JPEG standard only supports + * images up to 64K*64K due to 16-bit fields in SOF markers. Therefore + * "unsigned int" is sufficient on all machines. However, if you need to + * handle larger images and you don't mind deviating from the spec, you + * can change this datatype. + */ + +typedef unsigned int JDIMENSION; + +#define JPEG_MAX_DIMENSION 65500L /* a tad under 64K to prevent overflows */ + +/* These macros are used in all function definitions and extern declarations. + * You could modify them if you need to change function linkage conventions; + * in particular, you'll need to do that to make the library a Windows DLL. + * Another application is to make all functions global for use with debuggers + * or code profilers that require it. + */ + +/* a function called through method pointers: */ +#define METHODDEF(type) static type +/* a function used only in its module: */ +#define LOCAL(type) static type +/* a function referenced thru EXTERNs: */ +#define GLOBAL(type) type +/* a reference to a GLOBAL function: */ +#define EXTERN(type) extern type + +/* This macro is used to declare a "method", that is, a function pointer. + * We want to supply prototype parameters if the compiler can cope. + * Note that the arglist parameter must be parenthesized! + * Again, you can customize this if you need special linkage keywords. + */ + +#ifdef HAVE_PROTOTYPES +#define JMETHOD(type,methodname,arglist) type (*methodname) arglist +#else +#define JMETHOD(type,methodname,arglist) type (*methodname) () +#endif + +/* Here is the pseudo-keyword for declaring pointers that must be "far" + * on 80x86 machines. Most of the specialized coding for 80x86 is handled + * by just saying "FAR *" where such a pointer is needed. In a few places + * explicit coding is needed; see uses of the NEED_FAR_POINTERS symbol. + */ + +#ifndef FAR +#ifdef NEED_FAR_POINTERS +#define FAR far +#else +#define FAR +#endif +#endif + +/* + * On a few systems, type boolean and/or its values FALSE, TRUE may appear + * in standard header files. Or you may have conflicts with application- + * specific header files that you want to include together with these files. + * Defining HAVE_BOOLEAN before including jpeglib.h should make it work. + */ + +#ifndef HAVE_BOOLEAN +typedef int boolean; +#endif +#ifndef FALSE /* in case these macros already exist */ +#define FALSE 0 /* values of boolean */ +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +/* + * The remaining options affect code selection within the JPEG library, + * but they don't need to be visible to most applications using the library. + * To minimize application namespace pollution, the symbols won't be + * defined unless JPEG_INTERNALS or JPEG_INTERNAL_OPTIONS has been defined. + */ + +#ifdef JPEG_INTERNALS +#define JPEG_INTERNAL_OPTIONS +#endif + +#ifdef JPEG_INTERNAL_OPTIONS + +/* + * These defines indicate whether to include various optional functions. + * Undefining some of these symbols will produce a smaller but less capable + * library. Note that you can leave certain source files out of the + * compilation/linking process if you've #undef'd the corresponding symbols. + * (You may HAVE to do that if your compiler doesn't like null source files.) + */ + +/* Capability options common to encoder and decoder: */ + +#define DCT_ISLOW_SUPPORTED /* slow but accurate integer algorithm */ +#define DCT_IFAST_SUPPORTED /* faster, less accurate integer method */ +#define DCT_FLOAT_SUPPORTED /* floating-point: accurate, fast on fast HW */ + +/* Encoder capability options: */ + +#define C_ARITH_CODING_SUPPORTED /* Arithmetic coding back end? */ +#define C_MULTISCAN_FILES_SUPPORTED /* Multiple-scan JPEG files? */ +#define C_PROGRESSIVE_SUPPORTED /* Progressive JPEG? (Requires MULTISCAN) */ +#define DCT_SCALING_SUPPORTED /* Input rescaling via DCT? (Requires DCT_ISLOW) */ +#define ENTROPY_OPT_SUPPORTED /* Optimization of entropy coding parms? */ + +/* Note: if you selected 12-bit data precision, it is dangerous to turn off + * ENTROPY_OPT_SUPPORTED. The standard Huffman tables are only good for 8-bit + * precision, so jchuff.c normally uses entropy optimization to compute + * usable tables for higher precision. If you don't want to do optimization, + * you'll have to supply different default Huffman tables. + * The exact same statements apply for progressive JPEG: the default tables + * don't work for progressive mode. (This may get fixed, however.) + */ + +#define INPUT_SMOOTHING_SUPPORTED /* Input image smoothing option? */ + +/* Decoder capability options: */ + +#define D_ARITH_CODING_SUPPORTED /* Arithmetic coding back end? */ +#define D_MULTISCAN_FILES_SUPPORTED /* Multiple-scan JPEG files? */ +#define D_PROGRESSIVE_SUPPORTED /* Progressive JPEG? (Requires MULTISCAN) */ +#define IDCT_SCALING_SUPPORTED /* Output rescaling via IDCT? */ +#define SAVE_MARKERS_SUPPORTED /* jpeg_save_markers() needed? */ +#define BLOCK_SMOOTHING_SUPPORTED /* Block smoothing? (Progressive only) */ +#undef UPSAMPLE_SCALING_SUPPORTED /* Output rescaling at upsample stage? */ +#define UPSAMPLE_MERGING_SUPPORTED /* Fast path for sloppy upsampling? */ +#define QUANT_1PASS_SUPPORTED /* 1-pass color quantization? */ +#define QUANT_2PASS_SUPPORTED /* 2-pass color quantization? */ + +/* more capability options later, no doubt */ + +/* + * Ordering of RGB data in scanlines passed to or from the application. + * If your application wants to deal with data in the order B,G,R, just + * change these macros. You can also deal with formats such as R,G,B,X + * (one extra byte per pixel) by changing RGB_PIXELSIZE. Note that changing + * the offsets will also change the order in which colormap data is organized. + * RESTRICTIONS: + * 1. The sample applications cjpeg,djpeg do NOT support modified RGB formats. + * 2. The color quantizer modules will not behave desirably if RGB_PIXELSIZE + * is not 3 (they don't understand about dummy color components!). So you + * can't use color quantization if you change that value. + */ + +#define RGB_RED 0 /* Offset of Red in an RGB scanline element */ +#define RGB_GREEN 1 /* Offset of Green */ +#define RGB_BLUE 2 /* Offset of Blue */ +#define RGB_PIXELSIZE 3 /* JSAMPLEs per RGB scanline element */ + +/* Definitions for speed-related optimizations. */ + +/* If your compiler supports inline functions, define INLINE + * as the inline keyword; otherwise define it as empty. + */ + +#ifndef INLINE +#if defined ( __CC_ARM ) + #define INLINE __inline /*!< inline keyword for ARM Compiler */ + +#elif defined ( __ICCARM__ ) + #define INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */ + +#elif defined ( __GNUC__ ) + #define INLINE inline /*!< inline keyword for GNU Compiler */ +#endif +#ifndef INLINE +#define INLINE /* default is to define it as empty */ +#endif +#endif + +/* On some machines (notably 68000 series) "int" is 32 bits, but multiplying + * two 16-bit shorts is faster than multiplying two ints. Define MULTIPLIER + * as short on such a machine. MULTIPLIER must be at least 16 bits wide. + */ + +#ifndef MULTIPLIER +#define MULTIPLIER int /* type for fastest integer multiply */ +#endif + +/* FAST_FLOAT should be either float or double, whichever is done faster + * by your compiler. (Note that this type is only used in the floating point + * DCT routines, so it only matters if you've defined DCT_FLOAT_SUPPORTED.) + * Typically, float is faster in ANSI C compilers, while double is faster in + * pre-ANSI compilers (because they insist on converting to double anyway). + * The code below therefore chooses float if we have ANSI-style prototypes. + */ + +#ifndef FAST_FLOAT +#ifdef HAVE_PROTOTYPES +#define FAST_FLOAT float +#else +#define FAST_FLOAT double +#endif +#endif + +#endif /* JPEG_INTERNAL_OPTIONS */ diff --git a/src/third_party/stm32_cubeide/LWIP/App/lwip.c b/src/third_party/stm32_cubeide/LWIP/App/lwip.c new file mode 100644 index 000000000..69bf132bc --- /dev/null +++ b/src/third_party/stm32_cubeide/LWIP/App/lwip.c @@ -0,0 +1,167 @@ +/** + ****************************************************************************** + * File Name : LWIP.c + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "lwip.h" +#include "lwip/init.h" +#include "lwip/netif.h" +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +#include "lwip/sio.h" +#endif /* MDK ARM Compiler */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/* Private function prototypes -----------------------------------------------*/ +/* ETH Variables initialization ----------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/* Variables Initialization */ +struct netif gnetif; +ip4_addr_t ipaddr; +ip4_addr_t netmask; +ip4_addr_t gw; + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/** + * LwIP initialization function + */ +void MX_LWIP_Init(void) +{ + /* Initilialize the LwIP stack with RTOS */ + tcpip_init( NULL, NULL ); + + /* IP addresses initialization with DHCP (IPv4) */ + ipaddr.addr = 0; + netmask.addr = 0; + gw.addr = 0; + + /* add the network interface (IPv4/IPv6) with RTOS */ + netif_add(&gnetif, &ipaddr, &netmask, &gw, NULL, ðernetif_init, &tcpip_input); + + /* Registers the default network interface */ + netif_set_default(&gnetif); + + if (netif_is_link_up(&gnetif)) + { + /* When the netif is fully configured this function must be called */ + netif_set_up(&gnetif); + } + else + { + /* When the netif link is down this function must be called */ + netif_set_down(&gnetif); + } + + /* Start DHCP negotiation for a network interface (IPv4) */ + dhcp_start(&gnetif); + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ +} + +#ifdef USE_OBSOLETE_USER_CODE_SECTION_4 +/* Kept to help code migration. (See new 4_1, 4_2... sections) */ +/* Avoid to use this user section which will become obsolete. */ +/* USER CODE BEGIN 4 */ +/* USER CODE END 4 */ +#endif + +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum) +{ + sio_fd_t sd; + +/* USER CODE BEGIN 7 */ + sd = 0; // dummy code +/* USER CODE END 7 */ + + return sd; +} + +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd) +{ +/* USER CODE BEGIN 8 */ +/* USER CODE END 8 */ +} + +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 9 */ + recved_bytes = 0; // dummy code +/* USER CODE END 9 */ + return recved_bytes; +} + +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 10 */ + recved_bytes = 0; // dummy code +/* USER CODE END 10 */ + return recved_bytes; +} +#endif /* MDK ARM Compiler */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LWIP/App/lwip.h b/src/third_party/stm32_cubeide/LWIP/App/lwip.h new file mode 100644 index 000000000..94bed650c --- /dev/null +++ b/src/third_party/stm32_cubeide/LWIP/App/lwip.h @@ -0,0 +1,77 @@ +/** + ****************************************************************************** + * File Name : LWIP.h + * Description : This file provides code for the configuration + * of the LWIP. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ************************************************************************* + + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __mx_lwip_H +#define __mx_lwip_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "netif/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/netif.h" +#include "lwip/timeouts.h" +#include "ethernetif.h" + +/* Includes for RTOS ---------------------------------------------------------*/ +#if WITH_RTOS +#include "lwip/tcpip.h" +#endif /* WITH_RTOS */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Global Variables ----------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; + +/* LWIP init function */ +void MX_LWIP_Init(void); + +#if !WITH_RTOS +/* USER CODE BEGIN 1 */ +/* Function defined in lwip.c to: + * - Read a received packet from the Ethernet buffers + * - Send it to the lwIP stack for handling + * - Handle timeouts if NO_SYS_NO_TIMERS not set + */ +void MX_LWIP_Process(void); + +/* USER CODE END 1 */ +#endif /* WITH_RTOS */ + +#ifdef __cplusplus +} +#endif +#endif /*__ mx_lwip_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.c b/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.c new file mode 100644 index 000000000..ca44dcf24 --- /dev/null +++ b/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.c @@ -0,0 +1,822 @@ +/** + ****************************************************************************** + * File Name : ethernetif.c + * Description : This file provides code for the configuration + * of the Target/ethernetif.c MiddleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "netif/ethernet.h" +#include "netif/etharp.h" +#include "lwip/ethip6.h" +#include "ethernetif.h" +#include +#include "cmsis_os.h" +#include "lwip/tcpip.h" +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Private define ------------------------------------------------------------*/ +/* The time to block waiting for input. */ +#define TIME_WAITING_FOR_INPUT ( portMAX_DELAY ) +/* USER CODE BEGIN OS_THREAD_STACK_SIZE_WITH_RTOS */ +/* Stack size of the interface thread */ +#define INTERFACE_THREAD_STACK_SIZE ( 350 ) +/* USER CODE END OS_THREAD_STACK_SIZE_WITH_RTOS */ +/* Network interface name */ +#define IFNAME0 's' +#define IFNAME1 't' + +/* USER CODE BEGIN 1 */ + +// mvladic: Without this patch it works really slow and unreliable!!! +// https://community.st.com/s/question/0D50X00009XkYKOSA3/problem-with-slow-response-from-lwip-to-incoming-data-on-stm32f7 + +/* + @Note: The DMARxDscrTab and DMATxDscrTab must be declared in a device non cacheable memory region + In this example they are declared in the first 256 Byte of SRAM2 memory, so this + memory region is configured by MPU as a device memory. + + In this example the ETH buffers are located in the SRAM2 with MPU configured as normal + not cacheable memory. + + Please refer to MPU_Config() in main.c file. + */ +#if defined ( __CC_ARM ) +ETH_DMADescTypeDef DMARxDscrTab[ETH_RXBUFNB] __attribute__((at(0x2004C000)));/* Ethernet Rx DMA Descriptors */ + +ETH_DMADescTypeDef DMATxDscrTab[ETH_TXBUFNB] __attribute__((at(0x2004C080)));/* Ethernet Tx DMA Descriptors */ + +uint8_t Rx_Buff[ETH_RXBUFNB][ETH_RX_BUF_SIZE] __attribute__((at(0x2004C100))); /* Ethernet Receive Buffers */ + +uint8_t Tx_Buff[ETH_TXBUFNB][ETH_TX_BUF_SIZE] __attribute__((at(0x2004D8D0))); /* Ethernet Transmit Buffers */ + +#elif defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 + +#pragma location=0x2004C000 +__no_init ETH_DMADescTypeDef DMARxDscrTab[ETH_RXBUFNB];/* Ethernet Rx DMA Descriptors */ + +#pragma location=0x2004C080 +__no_init ETH_DMADescTypeDef DMATxDscrTab[ETH_TXBUFNB];/* Ethernet Tx DMA Descriptors */ + +#pragma location=0x2004C100 +__no_init uint8_t Rx_Buff[ETH_RXBUFNB][ETH_RX_BUF_SIZE]; /* Ethernet Receive Buffers */ + +#pragma location=0x2004D8D0 +__no_init uint8_t Tx_Buff[ETH_TXBUFNB][ETH_TX_BUF_SIZE]; /* Ethernet Transmit Buffers */ + +#elif defined ( __GNUC__ ) /*!< GNU Compiler */ + +ETH_DMADescTypeDef DMARxDscrTab[ETH_RXBUFNB] __attribute__((section(".RxDecripSection")));/* Ethernet Rx DMA Descriptors */ + +ETH_DMADescTypeDef DMATxDscrTab[ETH_TXBUFNB] __attribute__((section(".TxDescripSection")));/* Ethernet Tx DMA Descriptors */ + +uint8_t Rx_Buff[ETH_RXBUFNB][ETH_RX_BUF_SIZE] __attribute__((section(".RxarraySection"))); /* Ethernet Receive Buffers */ + +uint8_t Tx_Buff[ETH_TXBUFNB][ETH_TX_BUF_SIZE] __attribute__((section(".TxarraySection"))); /* Ethernet Transmit Buffers */ + +#endif + +// mvladic: Patch end + +/* USER CODE END 1 */ + +/* Private variables ---------------------------------------------------------*/ +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN ETH_DMADescTypeDef DMARxDscrTab[ETH_RXBUFNB] __ALIGN_END;/* Ethernet Rx MA Descriptor */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN ETH_DMADescTypeDef DMATxDscrTab[ETH_TXBUFNB] __ALIGN_END;/* Ethernet Tx DMA Descriptor */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN uint8_t Rx_Buff[ETH_RXBUFNB][ETH_RX_BUF_SIZE] __ALIGN_END; /* Ethernet Receive Buffer */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN uint8_t Tx_Buff[ETH_TXBUFNB][ETH_TX_BUF_SIZE] __ALIGN_END; /* Ethernet Transmit Buffer */ + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/* Semaphore to signal incoming packets */ +osSemaphoreId s_xSemaphore = NULL; +/* Global Ethernet handle */ +ETH_HandleTypeDef heth; + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ + +/* Private functions ---------------------------------------------------------*/ + +void HAL_ETH_MspInit(ETH_HandleTypeDef* ethHandle) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspInit 0 */ + + /* USER CODE END ETH_MspInit 0 */ + /* Enable Peripheral clock */ + __HAL_RCC_ETH_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PC2 ------> ETH_TXD2 + PC3 ------> ETH_TX_CLK + PA0/WKUP ------> ETH_CRS + PA1 ------> ETH_RX_CLK + PA2 ------> ETH_MDIO + PA3 ------> ETH_COL + PA7 ------> ETH_RX_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB10 ------> ETH_RX_ER + PB11 ------> ETH_TX_EN + PH6 ------> ETH_RXD2 + PH7 ------> ETH_RXD3 + PB12 ------> ETH_TXD0 + PB13 ------> ETH_TXD1 + PB8 ------> ETH_TXD3 + */ + GPIO_InitStruct.Pin = GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3|GPIO_PIN_4 + |GPIO_PIN_5; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3 + |GPIO_PIN_7; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_10|GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13 + |GPIO_PIN_8; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_6|GPIO_PIN_7; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOH, &GPIO_InitStruct); + + /* Peripheral interrupt init */ + HAL_NVIC_SetPriority(ETH_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(ETH_IRQn); + /* USER CODE BEGIN ETH_MspInit 1 */ + + /* USER CODE END ETH_MspInit 1 */ + } +} + +void HAL_ETH_MspDeInit(ETH_HandleTypeDef* ethHandle) +{ + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspDeInit 0 */ + + /* USER CODE END ETH_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_ETH_CLK_DISABLE(); + + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PC2 ------> ETH_TXD2 + PC3 ------> ETH_TX_CLK + PA0/WKUP ------> ETH_CRS + PA1 ------> ETH_RX_CLK + PA2 ------> ETH_MDIO + PA3 ------> ETH_COL + PA7 ------> ETH_RX_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB10 ------> ETH_RX_ER + PB11 ------> ETH_TX_EN + PH6 ------> ETH_RXD2 + PH7 ------> ETH_RXD3 + PB12 ------> ETH_TXD0 + PB13 ------> ETH_TXD1 + PB8 ------> ETH_TXD3 + */ + HAL_GPIO_DeInit(GPIOC, GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3|GPIO_PIN_4 + |GPIO_PIN_5); + + HAL_GPIO_DeInit(GPIOA, GPIO_PIN_0|GPIO_PIN_1|GPIO_PIN_2|GPIO_PIN_3 + |GPIO_PIN_7); + + HAL_GPIO_DeInit(GPIOB, GPIO_PIN_10|GPIO_PIN_11|GPIO_PIN_12|GPIO_PIN_13 + |GPIO_PIN_8); + + HAL_GPIO_DeInit(GPIOH, GPIO_PIN_6|GPIO_PIN_7); + + /* Peripheral interrupt Deinit*/ + HAL_NVIC_DisableIRQ(ETH_IRQn); + + /* USER CODE BEGIN ETH_MspDeInit 1 */ + + /* USER CODE END ETH_MspDeInit 1 */ + } +} + +/** + * @brief Ethernet Rx Transfer completed callback + * @param heth: ETH handle + * @retval None + */ +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + osSemaphoreRelease(s_xSemaphore); +} + +/* USER CODE BEGIN 4 */ +extern void getMacAddress(uint8_t macAddr[]); +/* USER CODE END 4 */ + +/******************************************************************************* + LL Driver Interface ( LwIP stack --> ETH) +*******************************************************************************/ +/** + * In this function, the hardware should be initialized. + * Called from ethernetif_init(). + * + * @param netif the already initialized lwip network interface structure + * for this ethernetif + */ +static void low_level_init(struct netif *netif) +{ + uint32_t regvalue = 0; + HAL_StatusTypeDef hal_eth_init_status; + +/* Init ETH */ + + uint8_t MACAddr[6] ; + heth.Instance = ETH; + heth.Init.AutoNegotiation = ETH_AUTONEGOTIATION_DISABLE; + heth.Init.Speed = ETH_SPEED_100M; + heth.Init.DuplexMode = ETH_MODE_FULLDUPLEX; + heth.Init.PhyAddress = DP83848_PHY_ADDRESS; + MACAddr[0] = 0x00; + MACAddr[1] = 0x80; + MACAddr[2] = 0xE1; + MACAddr[3] = 0x00; + MACAddr[4] = 0x00; + MACAddr[5] = 0x00; + heth.Init.MACAddr = &MACAddr[0]; + heth.Init.RxMode = ETH_RXINTERRUPT_MODE; + heth.Init.ChecksumMode = ETH_CHECKSUM_BY_HARDWARE; + heth.Init.MediaInterface = ETH_MEDIA_INTERFACE_MII; + + /* USER CODE BEGIN MACADDRESS */ + getMacAddress(MACAddr); + /* USER CODE END MACADDRESS */ + + hal_eth_init_status = HAL_ETH_Init(&heth); + + if (hal_eth_init_status == HAL_OK) + { + /* Set netif link flag */ + netif->flags |= NETIF_FLAG_LINK_UP; + } + /* Initialize Tx Descriptors list: Chain Mode */ + HAL_ETH_DMATxDescListInit(&heth, DMATxDscrTab, &Tx_Buff[0][0], ETH_TXBUFNB); + + /* Initialize Rx Descriptors list: Chain Mode */ + HAL_ETH_DMARxDescListInit(&heth, DMARxDscrTab, &Rx_Buff[0][0], ETH_RXBUFNB); + +#if LWIP_ARP || LWIP_ETHERNET + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + netif->hwaddr[0] = heth.Init.MACAddr[0]; + netif->hwaddr[1] = heth.Init.MACAddr[1]; + netif->hwaddr[2] = heth.Init.MACAddr[2]; + netif->hwaddr[3] = heth.Init.MACAddr[3]; + netif->hwaddr[4] = heth.Init.MACAddr[4]; + netif->hwaddr[5] = heth.Init.MACAddr[5]; + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* Accept broadcast address and ARP traffic */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + #if LWIP_ARP + netif->flags |= NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP; + #else + netif->flags |= NETIF_FLAG_BROADCAST; + #endif /* LWIP_ARP */ + +/* create a binary semaphore used for informing ethernetif of frame reception */ + osSemaphoreDef(SEM); + s_xSemaphore = osSemaphoreCreate(osSemaphore(SEM), 1); + +/* create the task that handles the ETH_MAC */ +/* USER CODE BEGIN OS_THREAD_DEF_CREATE_CMSIS_RTOS_V1 */ + osThreadDef(EthIf, ethernetif_input, osPriorityRealtime, 0, INTERFACE_THREAD_STACK_SIZE); + osThreadCreate (osThread(EthIf), netif); +/* USER CODE END OS_THREAD_DEF_CREATE_CMSIS_RTOS_V1 */ + /* Enable MAC and DMA transmission and reception */ + HAL_ETH_Start(&heth); + +/* USER CODE BEGIN PHY_PRE_CONFIG */ + +/* USER CODE END PHY_PRE_CONFIG */ + + /**** Configure PHY to generate an interrupt when Eth Link state changes ****/ + /* Read Register Configuration */ + HAL_ETH_ReadPHYRegister(&heth, PHY_MICR, ®value); + + regvalue |= (PHY_MICR_INT_EN | PHY_MICR_INT_OE); + + /* Enable Interrupts */ + HAL_ETH_WritePHYRegister(&heth, PHY_MICR, regvalue ); + + /* Read Register Configuration */ + HAL_ETH_ReadPHYRegister(&heth, PHY_MISR, ®value); + + regvalue |= PHY_MISR_LINK_INT_EN; + + /* Enable Interrupt on change of link status */ + HAL_ETH_WritePHYRegister(&heth, PHY_MISR, regvalue); + +/* USER CODE BEGIN PHY_POST_CONFIG */ + +/* USER CODE END PHY_POST_CONFIG */ + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +/* USER CODE BEGIN LOW_LEVEL_INIT */ + +/* USER CODE END LOW_LEVEL_INIT */ +} + +/** + * This function should do the actual transmission of the packet. The packet is + * contained in the pbuf that is passed to the function. This pbuf + * might be chained. + * + * @param netif the lwip network interface structure for this ethernetif + * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) + * @return ERR_OK if the packet could be sent + * an err_t value if the packet couldn't be sent + * + * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to + * strange results. You might consider waiting for space in the DMA queue + * to become availale since the stack doesn't retry to send a packet + * dropped because of memory failure (except for the TCP timers). + */ + +static err_t low_level_output(struct netif *netif, struct pbuf *p) +{ + err_t errval; + struct pbuf *q; + uint8_t *buffer = (uint8_t *)(heth.TxDesc->Buffer1Addr); + __IO ETH_DMADescTypeDef *DmaTxDesc; + uint32_t framelength = 0; + uint32_t bufferoffset = 0; + uint32_t byteslefttocopy = 0; + uint32_t payloadoffset = 0; + DmaTxDesc = heth.TxDesc; + bufferoffset = 0; + + /* copy frame from pbufs to driver buffers */ + for(q = p; q != NULL; q = q->next) + { + /* Is this buffer available? If not, goto error */ + if((DmaTxDesc->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + errval = ERR_USE; + goto error; + } + + /* Get bytes in current lwIP buffer */ + byteslefttocopy = q->len; + payloadoffset = 0; + + /* Check if the length of data to copy is bigger than Tx buffer size*/ + while( (byteslefttocopy + bufferoffset) > ETH_TX_BUF_SIZE ) + { + /* Copy data to Tx buffer*/ + memcpy( (uint8_t*)((uint8_t*)buffer + bufferoffset), (uint8_t*)((uint8_t*)q->payload + payloadoffset), (ETH_TX_BUF_SIZE - bufferoffset) ); + + /* Point to next descriptor */ + DmaTxDesc = (ETH_DMADescTypeDef *)(DmaTxDesc->Buffer2NextDescAddr); + + /* Check if the buffer is available */ + if((DmaTxDesc->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + errval = ERR_USE; + goto error; + } + + buffer = (uint8_t *)(DmaTxDesc->Buffer1Addr); + + byteslefttocopy = byteslefttocopy - (ETH_TX_BUF_SIZE - bufferoffset); + payloadoffset = payloadoffset + (ETH_TX_BUF_SIZE - bufferoffset); + framelength = framelength + (ETH_TX_BUF_SIZE - bufferoffset); + bufferoffset = 0; + } + + /* Copy the remaining bytes */ + memcpy( (uint8_t*)((uint8_t*)buffer + bufferoffset), (uint8_t*)((uint8_t*)q->payload + payloadoffset), byteslefttocopy ); + bufferoffset = bufferoffset + byteslefttocopy; + framelength = framelength + byteslefttocopy; + } + + /* Prepare transmit descriptors to give to DMA */ + HAL_ETH_TransmitFrame(&heth, framelength); + + errval = ERR_OK; + +error: + + /* When Transmit Underflow flag is set, clear it and issue a Transmit Poll Demand to resume transmission */ + if ((heth.Instance->DMASR & ETH_DMASR_TUS) != (uint32_t)RESET) + { + /* Clear TUS ETHERNET DMA flag */ + heth.Instance->DMASR = ETH_DMASR_TUS; + + /* Resume DMA transmission*/ + heth.Instance->DMATPDR = 0; + } + return errval; +} + +/** + * Should allocate a pbuf and transfer the bytes of the incoming + * packet from the interface into the pbuf. + * + * @param netif the lwip network interface structure for this ethernetif + * @return a pbuf filled with the received packet (including MAC header) + * NULL on memory error + */ +static struct pbuf * low_level_input(struct netif *netif) +{ + struct pbuf *p = NULL; + struct pbuf *q = NULL; + uint16_t len = 0; + uint8_t *buffer; + __IO ETH_DMADescTypeDef *dmarxdesc; + uint32_t bufferoffset = 0; + uint32_t payloadoffset = 0; + uint32_t byteslefttocopy = 0; + uint32_t i=0; + + + /* get received frame */ + if (HAL_ETH_GetReceivedFrame_IT(&heth) != HAL_OK) + + return NULL; + + /* Obtain the size of the packet and put it into the "len" variable. */ + len = heth.RxFrameInfos.length; + buffer = (uint8_t *)heth.RxFrameInfos.buffer; + + if (len > 0) + { + /* We allocate a pbuf chain of pbufs from the Lwip buffer pool */ + p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL); + } + + if (p != NULL) + { + dmarxdesc = heth.RxFrameInfos.FSRxDesc; + bufferoffset = 0; + for(q = p; q != NULL; q = q->next) + { + byteslefttocopy = q->len; + payloadoffset = 0; + + /* Check if the length of bytes to copy in current pbuf is bigger than Rx buffer size*/ + while( (byteslefttocopy + bufferoffset) > ETH_RX_BUF_SIZE ) + { + /* Copy data to pbuf */ + memcpy( (uint8_t*)((uint8_t*)q->payload + payloadoffset), (uint8_t*)((uint8_t*)buffer + bufferoffset), (ETH_RX_BUF_SIZE - bufferoffset)); + + /* Point to next descriptor */ + dmarxdesc = (ETH_DMADescTypeDef *)(dmarxdesc->Buffer2NextDescAddr); + buffer = (uint8_t *)(dmarxdesc->Buffer1Addr); + + byteslefttocopy = byteslefttocopy - (ETH_RX_BUF_SIZE - bufferoffset); + payloadoffset = payloadoffset + (ETH_RX_BUF_SIZE - bufferoffset); + bufferoffset = 0; + } + /* Copy remaining data in pbuf */ + memcpy( (uint8_t*)((uint8_t*)q->payload + payloadoffset), (uint8_t*)((uint8_t*)buffer + bufferoffset), byteslefttocopy); + bufferoffset = bufferoffset + byteslefttocopy; + } + } + + /* Release descriptors to DMA */ + /* Point to first descriptor */ + dmarxdesc = heth.RxFrameInfos.FSRxDesc; + /* Set Own bit in Rx descriptors: gives the buffers back to DMA */ + for (i=0; i< heth.RxFrameInfos.SegCount; i++) + { + dmarxdesc->Status |= ETH_DMARXDESC_OWN; + dmarxdesc = (ETH_DMADescTypeDef *)(dmarxdesc->Buffer2NextDescAddr); + } + + /* Clear Segment_Count */ + heth.RxFrameInfos.SegCount =0; + + /* When Rx Buffer unavailable flag is set: clear it and resume reception */ + if ((heth.Instance->DMASR & ETH_DMASR_RBUS) != (uint32_t)RESET) + { + /* Clear RBUS ETHERNET DMA flag */ + heth.Instance->DMASR = ETH_DMASR_RBUS; + /* Resume DMA reception */ + heth.Instance->DMARPDR = 0; + } + return p; +} + +/** + * This function should be called when a packet is ready to be read + * from the interface. It uses the function low_level_input() that + * should handle the actual reception of bytes from the network + * interface. Then the type of the received packet is determined and + * the appropriate input function is called. + * + * @param netif the lwip network interface structure for this ethernetif + */ +void ethernetif_input(void const * argument) +{ + struct pbuf *p; + struct netif *netif = (struct netif *) argument; + + for( ;; ) + { + if (osSemaphoreWait(s_xSemaphore, TIME_WAITING_FOR_INPUT) == osOK) + { + do + { + LOCK_TCPIP_CORE(); + p = low_level_input( netif ); + if (p != NULL) + { + if (netif->input( p, netif) != ERR_OK ) + { + pbuf_free(p); + } + } + UNLOCK_TCPIP_CORE(); + } while(p!=NULL); + } + } +} + +#if !LWIP_ARP +/** + * This function has to be completed by user in case of ARP OFF. + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if ... + */ +static err_t low_level_output_arp_off(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + err_t errval; + errval = ERR_OK; + +/* USER CODE BEGIN 5 */ + +/* USER CODE END 5 */ + + return errval; + +} +#endif /* LWIP_ARP */ + +/** + * Should be called at the beginning of the program to set up the + * network interface. It calls the function low_level_init() to do the + * actual setup of the hardware. + * + * This function should be passed as a parameter to netif_add(). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t ethernetif_init(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ + +#if LWIP_IPV4 +#if LWIP_ARP || LWIP_ETHERNET +#if LWIP_ARP + netif->output = etharp_output; +#else + /* The user should write ist own code in low_level_output_arp_off function */ + netif->output = low_level_output_arp_off; +#endif /* LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + + netif->linkoutput = low_level_output; + + /* initialize the hardware */ + low_level_init(netif); + + return ERR_OK; +} + +/* USER CODE BEGIN 6 */ + +/** +* @brief Returns the current time in milliseconds +* when LWIP_TIMERS == 1 and NO_SYS == 1 +* @param None +* @retval Time +*/ +u32_t sys_jiffies(void) +{ + return HAL_GetTick(); +} + +/** +* @brief Returns the current time in milliseconds +* when LWIP_TIMERS == 1 and NO_SYS == 1 +* @param None +* @retval Time +*/ +u32_t sys_now(void) +{ + return HAL_GetTick(); +} + +/* USER CODE END 6 */ + +/* USER CODE BEGIN 7 */ + +/* USER CODE END 7 */ + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @brief Link callback function, this function is called on change of link status + * to update low level driver configuration. +* @param netif: The network interface + * @retval None + */ +void ethernetif_update_config(struct netif *netif) +{ + __IO uint32_t tickstart = 0; + uint32_t regvalue = 0; + + if(netif_is_link_up(netif)) + { + /* Restart the auto-negotiation */ + if(heth.Init.AutoNegotiation != ETH_AUTONEGOTIATION_DISABLE) + { + /* Enable Auto-Negotiation */ + HAL_ETH_WritePHYRegister(&heth, PHY_BCR, PHY_AUTONEGOTIATION); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the auto-negotiation will be completed */ + do + { + HAL_ETH_ReadPHYRegister(&heth, PHY_BSR, ®value); + + /* Check for the Timeout ( 1s ) */ + if((HAL_GetTick() - tickstart ) > 1000) + { + /* In case of timeout */ + goto error; + } + } while (((regvalue & PHY_AUTONEGO_COMPLETE) != PHY_AUTONEGO_COMPLETE)); + + /* Read the result of the auto-negotiation */ + HAL_ETH_ReadPHYRegister(&heth, PHY_SR, ®value); + + /* Configure the MAC with the Duplex Mode fixed by the auto-negotiation process */ + if((regvalue & PHY_DUPLEX_STATUS) != (uint32_t)RESET) + { + /* Set Ethernet duplex mode to Full-duplex following the auto-negotiation */ + heth.Init.DuplexMode = ETH_MODE_FULLDUPLEX; + } + else + { + /* Set Ethernet duplex mode to Half-duplex following the auto-negotiation */ + heth.Init.DuplexMode = ETH_MODE_HALFDUPLEX; + } + /* Configure the MAC with the speed fixed by the auto-negotiation process */ + if(regvalue & PHY_SPEED_STATUS) + { + /* Set Ethernet speed to 10M following the auto-negotiation */ + heth.Init.Speed = ETH_SPEED_10M; + } + else + { + /* Set Ethernet speed to 100M following the auto-negotiation */ + heth.Init.Speed = ETH_SPEED_100M; + } + } + else /* AutoNegotiation Disable */ + { + error : + /* Check parameters */ + assert_param(IS_ETH_SPEED(heth.Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth.Init.DuplexMode)); + + /* Set MAC Speed and Duplex Mode to PHY */ + HAL_ETH_WritePHYRegister(&heth, PHY_BCR, ((uint16_t)(heth.Init.DuplexMode >> 3) | + (uint16_t)(heth.Init.Speed >> 1))); + } + + /* ETHERNET MAC Re-Configuration */ + HAL_ETH_ConfigMAC(&heth, (ETH_MACInitTypeDef *) NULL); + + /* Restart MAC interface */ + HAL_ETH_Start(&heth); + } + else + { + /* Stop MAC interface */ + HAL_ETH_Stop(&heth); + } + + ethernetif_notify_conn_changed(netif); +} + +/* USER CODE BEGIN 8 */ +/** + * @brief This function notify user about link status changement. + * @param netif: the network interface + * @retval None + */ +__weak void ethernetif_notify_conn_changed(struct netif *netif) +{ + /* NOTE : This is function could be implemented in user file + when the callback is needed, + */ + +} +/* USER CODE END 8 */ +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +/* USER CODE BEGIN 9 */ + +/* USER CODE END 9 */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.h b/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.h new file mode 100644 index 000000000..385ce7d3f --- /dev/null +++ b/src/third_party/stm32_cubeide/LWIP/Target/ethernetif.h @@ -0,0 +1,56 @@ +/** + ****************************************************************************** + * File Name : ethernetif.h + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + + +#ifndef __ETHERNETIF_H__ +#define __ETHERNETIF_H__ + +#include "lwip/err.h" +#include "lwip/netif.h" +#include "cmsis_os.h" + +/* Exported types ------------------------------------------------------------*/ +/* Structure that include link thread parameters */ +struct link_str { + struct netif *netif; + osSemaphoreId semaphore; +}; + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Exported functions ------------------------------------------------------- */ +err_t ethernetif_init(struct netif *netif); + +void ethernetif_input(void const * argument); +void ethernetif_set_link(void const *argument); +void ethernetif_update_config(struct netif *netif); +void ethernetif_notify_conn_changed(struct netif *netif); + +u32_t sys_jiffies(void); +u32_t sys_now(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +#endif + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/LWIP/Target/lwipopts.h b/src/third_party/stm32_cubeide/LWIP/Target/lwipopts.h new file mode 100644 index 000000000..2697bac25 --- /dev/null +++ b/src/third_party/stm32_cubeide/LWIP/Target/lwipopts.h @@ -0,0 +1,135 @@ + +/** + ****************************************************************************** + * File Name : Target/lwipopts.h + * Description : This file overrides LwIP stack default configuration + * done in opt.h file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion --------------------------------------*/ +#ifndef __LWIPOPTS__H__ +#define __LWIPOPTS__H__ + +#include "main.h" + +/*-----------------------------------------------------------------------------*/ +/* Current version of LwIP supported by CubeMx: 2.1.2 -*/ +/*-----------------------------------------------------------------------------*/ + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +#ifdef __cplusplus +extern "C" { +#endif +void sntpSetSystemTimeUs(uint32_t t, uint32_t us); +#ifdef __cplusplus +} +#endif + +#define SNTP_SET_SYSTEM_TIME_US(t, us) sntpSetSystemTimeUs(t, us) +#define SNTP_SERVER_DNS 1 + +/* USER CODE END 0 */ + +#ifdef __cplusplus + extern "C" { +#endif + +/* STM32CubeMX Specific Parameters (not defined in opt.h) ---------------------*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- WITH_RTOS enabled (Since FREERTOS is set) -----*/ +#define WITH_RTOS 1 +/*----- CHECKSUM_BY_HARDWARE disabled -----*/ +#define CHECKSUM_BY_HARDWARE 0 +/*-----------------------------------------------------------------------------*/ + +/* LwIP Stack Parameters (modified compared to initialization value in opt.h) -*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- Value in opt.h for LWIP_DHCP: 0 -----*/ +#define LWIP_DHCP 1 +/*----- Default Value for LWIP_DNS: 0 ---*/ +#define LWIP_DNS 1 +/*----- Value in opt.h for MEM_ALIGNMENT: 1 -----*/ +#define MEM_ALIGNMENT 4 +/*----- Value in opt.h for LWIP_ETHERNET: LWIP_ARP || PPPOE_SUPPORT -*/ +#define LWIP_ETHERNET 1 +/*----- Value in opt.h for LWIP_DNS_SECURE: (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) -*/ +#define LWIP_DNS_SECURE 7 +/*----- Value in opt.h for TCP_SND_QUEUELEN: (4*TCP_SND_BUF + (TCP_MSS - 1))/TCP_MSS -----*/ +#define TCP_SND_QUEUELEN 9 +/*----- Value in opt.h for TCP_SNDLOWAT: LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) -*/ +#define TCP_SNDLOWAT 1071 +/*----- Value in opt.h for TCP_SNDQUEUELOWAT: LWIP_MAX(TCP_SND_QUEUELEN)/2, 5) -*/ +#define TCP_SNDQUEUELOWAT 5 +/*----- Value in opt.h for TCP_WND_UPDATE_THRESHOLD: LWIP_MIN(TCP_WND/4, TCP_MSS*4) -----*/ +#define TCP_WND_UPDATE_THRESHOLD 536 +/*----- Value in opt.h for TCPIP_THREAD_STACKSIZE: 0 -----*/ +#define TCPIP_THREAD_STACKSIZE 1024 +/*----- Value in opt.h for TCPIP_THREAD_PRIO: 1 -----*/ +#define TCPIP_THREAD_PRIO osPriorityNormal +/*----- Value in opt.h for TCPIP_MBOX_SIZE: 0 -----*/ +#define TCPIP_MBOX_SIZE 6 +/*----- Value in opt.h for SLIPIF_THREAD_STACKSIZE: 0 -----*/ +#define SLIPIF_THREAD_STACKSIZE 1024 +/*----- Value in opt.h for SLIPIF_THREAD_PRIO: 1 -----*/ +#define SLIPIF_THREAD_PRIO 3 +/*----- Value in opt.h for DEFAULT_THREAD_STACKSIZE: 0 -----*/ +#define DEFAULT_THREAD_STACKSIZE 1024 +/*----- Value in opt.h for DEFAULT_THREAD_PRIO: 1 -----*/ +#define DEFAULT_THREAD_PRIO 3 +/*----- Value in opt.h for DEFAULT_UDP_RECVMBOX_SIZE: 0 -----*/ +#define DEFAULT_UDP_RECVMBOX_SIZE 6 +/*----- Value in opt.h for DEFAULT_TCP_RECVMBOX_SIZE: 0 -----*/ +#define DEFAULT_TCP_RECVMBOX_SIZE 6 +/*----- Value in opt.h for DEFAULT_ACCEPTMBOX_SIZE: 0 -----*/ +#define DEFAULT_ACCEPTMBOX_SIZE 6 +/*----- Value in opt.h for RECV_BUFSIZE_DEFAULT: INT_MAX -----*/ +#define RECV_BUFSIZE_DEFAULT 2000000000 +/*----- Default Value for LWIP_SNTP: 0 ---*/ +#define LWIP_SNTP 1 +/*----- Value in opt.h for LWIP_STATS: 1 -----*/ +#define LWIP_STATS 0 +/*----- Value in opt.h for CHECKSUM_GEN_IP: 1 -----*/ +#define CHECKSUM_GEN_IP 0 +/*----- Value in opt.h for CHECKSUM_GEN_UDP: 1 -----*/ +#define CHECKSUM_GEN_UDP 0 +/*----- Value in opt.h for CHECKSUM_GEN_TCP: 1 -----*/ +#define CHECKSUM_GEN_TCP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP: 1 -----*/ +#define CHECKSUM_GEN_ICMP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP6: 1 -----*/ +#define CHECKSUM_GEN_ICMP6 0 +/*----- Value in opt.h for CHECKSUM_CHECK_IP: 1 -----*/ +#define CHECKSUM_CHECK_IP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_UDP: 1 -----*/ +#define CHECKSUM_CHECK_UDP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_TCP: 1 -----*/ +#define CHECKSUM_CHECK_TCP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP: 1 -----*/ +#define CHECKSUM_CHECK_ICMP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP6: 1 -----*/ +#define CHECKSUM_CHECK_ICMP6 0 +/*-----------------------------------------------------------------------------*/ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +#ifdef __cplusplus +} +#endif +#endif /*__LWIPOPTS__H__ */ + +/************************* (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h new file mode 100644 index 000000000..b484218e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h @@ -0,0 +1,174 @@ +/** + ****************************************************************************** + * @file usbd_cdc.h + * @author MCD Application Team + * @brief header file for the usbd_cdc.c file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USB_CDC_H +#define __USB_CDC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_ioreq.h" + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup usbd_cdc + * @brief This file is the Header file for usbd_cdc.c + * @{ + */ + + +/** @defgroup usbd_cdc_Exported_Defines + * @{ + */ +#define CDC_IN_EP 0x81U /* EP1 for data IN */ +#define CDC_OUT_EP 0x01U /* EP1 for data OUT */ +#define CDC_CMD_EP 0x82U /* EP2 for CDC commands */ + +#ifndef CDC_HS_BINTERVAL +#define CDC_HS_BINTERVAL 0x10U +#endif /* CDC_HS_BINTERVAL */ + +#ifndef CDC_FS_BINTERVAL +#define CDC_FS_BINTERVAL 0x10U +#endif /* CDC_FS_BINTERVAL */ + +/* CDC Endpoints parameters: you can fine tune these values depending on the needed baudrates and performance. */ +#define CDC_DATA_HS_MAX_PACKET_SIZE 512U /* Endpoint IN & OUT Packet size */ +#define CDC_DATA_FS_MAX_PACKET_SIZE 64U /* Endpoint IN & OUT Packet size */ +#define CDC_CMD_PACKET_SIZE 8U /* Control Endpoint Packet size */ + +#define USB_CDC_CONFIG_DESC_SIZ 67U +#define CDC_DATA_HS_IN_PACKET_SIZE CDC_DATA_HS_MAX_PACKET_SIZE +#define CDC_DATA_HS_OUT_PACKET_SIZE CDC_DATA_HS_MAX_PACKET_SIZE + +#define CDC_DATA_FS_IN_PACKET_SIZE CDC_DATA_FS_MAX_PACKET_SIZE +#define CDC_DATA_FS_OUT_PACKET_SIZE CDC_DATA_FS_MAX_PACKET_SIZE + +/*---------------------------------------------------------------------*/ +/* CDC definitions */ +/*---------------------------------------------------------------------*/ +#define CDC_SEND_ENCAPSULATED_COMMAND 0x00U +#define CDC_GET_ENCAPSULATED_RESPONSE 0x01U +#define CDC_SET_COMM_FEATURE 0x02U +#define CDC_GET_COMM_FEATURE 0x03U +#define CDC_CLEAR_COMM_FEATURE 0x04U +#define CDC_SET_LINE_CODING 0x20U +#define CDC_GET_LINE_CODING 0x21U +#define CDC_SET_CONTROL_LINE_STATE 0x22U +#define CDC_SEND_BREAK 0x23U + +/** + * @} + */ + + +/** @defgroup USBD_CORE_Exported_TypesDefinitions + * @{ + */ + +/** + * @} + */ +typedef struct +{ + uint32_t bitrate; + uint8_t format; + uint8_t paritytype; + uint8_t datatype; +} USBD_CDC_LineCodingTypeDef; + +typedef struct _USBD_CDC_Itf +{ + int8_t (* Init)(void); + int8_t (* DeInit)(void); + int8_t (* Control)(uint8_t cmd, uint8_t *pbuf, uint16_t length); + int8_t (* Receive)(uint8_t *Buf, uint32_t *Len); + int8_t (* TransmitCplt)(uint8_t *Buf, uint32_t *Len, uint8_t epnum); +} USBD_CDC_ItfTypeDef; + + +typedef struct +{ + uint32_t data[CDC_DATA_HS_MAX_PACKET_SIZE / 4U]; /* Force 32bits alignment */ + uint8_t CmdOpCode; + uint8_t CmdLength; + uint8_t *RxBuffer; + uint8_t *TxBuffer; + uint32_t RxLength; + uint32_t TxLength; + + __IO uint32_t TxState; + __IO uint32_t RxState; +} USBD_CDC_HandleTypeDef; + + + +/** @defgroup USBD_CORE_Exported_Macros + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_CORE_Exported_Variables + * @{ + */ + +extern USBD_ClassTypeDef USBD_CDC; +#define USBD_CDC_CLASS &USBD_CDC +/** + * @} + */ + +/** @defgroup USB_CORE_Exported_Functions + * @{ + */ +uint8_t USBD_CDC_RegisterInterface(USBD_HandleTypeDef *pdev, + USBD_CDC_ItfTypeDef *fops); + +uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff, + uint32_t length); + +uint8_t USBD_CDC_SetRxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff); +uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev); +uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev); +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USB_CDC_H */ +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c new file mode 100644 index 000000000..017e4cdc9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c @@ -0,0 +1,935 @@ +/** + ****************************************************************************** + * @file usbd_cdc.c + * @author MCD Application Team + * @brief This file provides the high layer firmware functions to manage the + * following functionalities of the USB CDC Class: + * - Initialization and Configuration of high and low layer + * - Enumeration as CDC Device (and enumeration for each implemented memory interface) + * - OUT/IN data transfer + * - Command IN transfer (class requests management) + * - Error management + * + * @verbatim + * + * =================================================================== + * CDC Class Driver Description + * =================================================================== + * This driver manages the "Universal Serial Bus Class Definitions for Communications Devices + * Revision 1.2 November 16, 2007" and the sub-protocol specification of "Universal Serial Bus + * Communications Class Subclass Specification for PSTN Devices Revision 1.2 February 9, 2007" + * This driver implements the following aspects of the specification: + * - Device descriptor management + * - Configuration descriptor management + * - Enumeration as CDC device with 2 data endpoints (IN and OUT) and 1 command endpoint (IN) + * - Requests management (as described in section 6.2 in specification) + * - Abstract Control Model compliant + * - Union Functional collection (using 1 IN endpoint for control) + * - Data interface class + * + * These aspects may be enriched or modified for a specific user application. + * + * This driver doesn't implement the following aspects of the specification + * (but it is possible to manage these features with some modifications on this driver): + * - Any class-specific aspect relative to communication classes should be managed by user application. + * - All communication classes other than PSTN are not managed + * + * @endverbatim + * + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* BSPDependencies +- "stm32xxxxx_{eval}{discovery}{nucleo_144}.c" +- "stm32xxxxx_{eval}{discovery}_io.c" +EndBSPDependencies */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_cdc.h" +#include "usbd_ctlreq.h" + + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + + +/** @defgroup USBD_CDC + * @brief usbd core module + * @{ + */ + +/** @defgroup USBD_CDC_Private_TypesDefinitions + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_CDC_Private_Defines + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_CDC_Private_Macros + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_CDC_Private_FunctionPrototypes + * @{ + */ + +static uint8_t USBD_CDC_Init(USBD_HandleTypeDef *pdev, uint8_t cfgidx); +static uint8_t USBD_CDC_DeInit(USBD_HandleTypeDef *pdev, uint8_t cfgidx); +static uint8_t USBD_CDC_Setup(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum); +static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum); +static uint8_t USBD_CDC_EP0_RxReady(USBD_HandleTypeDef *pdev); + +static uint8_t *USBD_CDC_GetFSCfgDesc(uint16_t *length); +static uint8_t *USBD_CDC_GetHSCfgDesc(uint16_t *length); +static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length); +static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length); +uint8_t *USBD_CDC_GetDeviceQualifierDescriptor(uint16_t *length); + +/* USB Standard Device Descriptor */ +__ALIGN_BEGIN static uint8_t USBD_CDC_DeviceQualifierDesc[USB_LEN_DEV_QUALIFIER_DESC] __ALIGN_END = +{ + USB_LEN_DEV_QUALIFIER_DESC, + USB_DESC_TYPE_DEVICE_QUALIFIER, + 0x00, + 0x02, + 0x00, + 0x00, + 0x00, + 0x40, + 0x01, + 0x00, +}; + +/** + * @} + */ + +/** @defgroup USBD_CDC_Private_Variables + * @{ + */ + + +/* CDC interface class callbacks structure */ +USBD_ClassTypeDef USBD_CDC = +{ + USBD_CDC_Init, + USBD_CDC_DeInit, + USBD_CDC_Setup, + NULL, /* EP0_TxSent, */ + USBD_CDC_EP0_RxReady, + USBD_CDC_DataIn, + USBD_CDC_DataOut, + NULL, + NULL, + NULL, + USBD_CDC_GetHSCfgDesc, + USBD_CDC_GetFSCfgDesc, + USBD_CDC_GetOtherSpeedCfgDesc, + USBD_CDC_GetDeviceQualifierDescriptor, +}; + +/* USB CDC device Configuration Descriptor */ +__ALIGN_BEGIN static uint8_t USBD_CDC_CfgHSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = +{ + /* Configuration Descriptor */ + 0x09, /* bLength: Configuration Descriptor size */ + USB_DESC_TYPE_CONFIGURATION, /* bDescriptorType: Configuration */ + USB_CDC_CONFIG_DESC_SIZ, /* wTotalLength:no of returned bytes */ + 0x00, + 0x02, /* bNumInterfaces: 2 interface */ + 0x01, /* bConfigurationValue: Configuration value */ + 0x00, /* iConfiguration: Index of string descriptor describing the configuration */ + 0xC0, /* bmAttributes: self powered */ + 0x32, /* MaxPower 0 mA */ + + /*---------------------------------------------------------------------------*/ + + /* Interface Descriptor */ + 0x09, /* bLength: Interface Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: Interface */ + /* Interface descriptor type */ + 0x00, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x01, /* bNumEndpoints: One endpoints used */ + 0x02, /* bInterfaceClass: Communication Interface Class */ + 0x02, /* bInterfaceSubClass: Abstract Control Model */ + 0x01, /* bInterfaceProtocol: Common AT commands */ + 0x00, /* iInterface: */ + + /* Header Functional Descriptor */ + 0x05, /* bLength: Endpoint Descriptor size */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x00, /* bDescriptorSubtype: Header Func Desc */ + 0x10, /* bcdCDC: spec release number */ + 0x01, + + /* Call Management Functional Descriptor */ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x01, /* bDescriptorSubtype: Call Management Func Desc */ + 0x00, /* bmCapabilities: D0+D1 */ + 0x01, /* bDataInterface: 1 */ + + /* ACM Functional Descriptor */ + 0x04, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x02, /* bDescriptorSubtype: Abstract Control Management desc */ + 0x02, /* bmCapabilities */ + + /* Union Functional Descriptor */ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x06, /* bDescriptorSubtype: Union func desc */ + 0x00, /* bMasterInterface: Communication class interface */ + 0x01, /* bSlaveInterface0: Data Class Interface */ + + /* Endpoint 2 Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_CMD_EP, /* bEndpointAddress */ + 0x03, /* bmAttributes: Interrupt */ + LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_CMD_PACKET_SIZE), + CDC_HS_BINTERVAL, /* bInterval: */ + /*---------------------------------------------------------------------------*/ + + /* Data class interface descriptor */ + 0x09, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: */ + 0x01, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x02, /* bNumEndpoints: Two endpoints used */ + 0x0A, /* bInterfaceClass: CDC */ + 0x00, /* bInterfaceSubClass: */ + 0x00, /* bInterfaceProtocol: */ + 0x00, /* iInterface: */ + + /* Endpoint OUT Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_OUT_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + LOBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), + 0x00, /* bInterval: ignore for Bulk transfer */ + + /* Endpoint IN Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_IN_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + LOBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), + 0x00 /* bInterval: ignore for Bulk transfer */ +}; + + +/* USB CDC device Configuration Descriptor */ +__ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = +{ + /* Configuration Descriptor */ + 0x09, /* bLength: Configuration Descriptor size */ + USB_DESC_TYPE_CONFIGURATION, /* bDescriptorType: Configuration */ + USB_CDC_CONFIG_DESC_SIZ, /* wTotalLength:no of returned bytes */ + 0x00, + 0x02, /* bNumInterfaces: 2 interface */ + 0x01, /* bConfigurationValue: Configuration value */ + 0x00, /* iConfiguration: Index of string descriptor describing the configuration */ + 0xC0, /* bmAttributes: self powered */ + 0x32, /* MaxPower 0 mA */ + + /*---------------------------------------------------------------------------*/ + + /* Interface Descriptor */ + 0x09, /* bLength: Interface Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: Interface */ + /* Interface descriptor type */ + 0x00, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x01, /* bNumEndpoints: One endpoints used */ + 0x02, /* bInterfaceClass: Communication Interface Class */ + 0x02, /* bInterfaceSubClass: Abstract Control Model */ + 0x01, /* bInterfaceProtocol: Common AT commands */ + 0x00, /* iInterface: */ + + /* Header Functional Descriptor */ + 0x05, /* bLength: Endpoint Descriptor size */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x00, /* bDescriptorSubtype: Header Func Desc */ + 0x10, /* bcdCDC: spec release number */ + 0x01, + + /* Call Management Functional Descriptor */ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x01, /* bDescriptorSubtype: Call Management Func Desc */ + 0x00, /* bmCapabilities: D0+D1 */ + 0x01, /* bDataInterface: 1 */ + + /* ACM Functional Descriptor */ + 0x04, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x02, /* bDescriptorSubtype: Abstract Control Management desc */ + 0x02, /* bmCapabilities */ + + /* Union Functional Descriptor */ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x06, /* bDescriptorSubtype: Union func desc */ + 0x00, /* bMasterInterface: Communication class interface */ + 0x01, /* bSlaveInterface0: Data Class Interface */ + + /* Endpoint 2 Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_CMD_EP, /* bEndpointAddress */ + 0x03, /* bmAttributes: Interrupt */ + LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_CMD_PACKET_SIZE), + CDC_FS_BINTERVAL, /* bInterval: */ + /*---------------------------------------------------------------------------*/ + + /* Data class interface descriptor */ + 0x09, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: */ + 0x01, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x02, /* bNumEndpoints: Two endpoints used */ + 0x0A, /* bInterfaceClass: CDC */ + 0x00, /* bInterfaceSubClass: */ + 0x00, /* bInterfaceProtocol: */ + 0x00, /* iInterface: */ + + /* Endpoint OUT Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_OUT_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), + 0x00, /* bInterval: ignore for Bulk transfer */ + + /* Endpoint IN Descriptor */ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_IN_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), + 0x00 /* bInterval: ignore for Bulk transfer */ +}; + +__ALIGN_BEGIN static uint8_t USBD_CDC_OtherSpeedCfgDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = +{ + 0x09, /* bLength: Configuation Descriptor size */ + USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION, + USB_CDC_CONFIG_DESC_SIZ, + 0x00, + 0x02, /* bNumInterfaces: 2 interfaces */ + 0x01, /* bConfigurationValue: */ + 0x04, /* iConfiguration: */ + 0xC0, /* bmAttributes: */ + 0x32, /* MaxPower 100 mA */ + + /*Interface Descriptor */ + 0x09, /* bLength: Interface Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: Interface */ + /* Interface descriptor type */ + 0x00, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x01, /* bNumEndpoints: One endpoints used */ + 0x02, /* bInterfaceClass: Communication Interface Class */ + 0x02, /* bInterfaceSubClass: Abstract Control Model */ + 0x01, /* bInterfaceProtocol: Common AT commands */ + 0x00, /* iInterface: */ + + /* Header Functional Descriptor */ + 0x05, /* bLength: Endpoint Descriptor size */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x00, /* bDescriptorSubtype: Header Func Desc */ + 0x10, /* bcdCDC: spec release number */ + 0x01, + + /*Call Management Functional Descriptor*/ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x01, /* bDescriptorSubtype: Call Management Func Desc */ + 0x00, /* bmCapabilities: D0+D1 */ + 0x01, /* bDataInterface: 1 */ + + /*ACM Functional Descriptor*/ + 0x04, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x02, /* bDescriptorSubtype: Abstract Control Management desc */ + 0x02, /* bmCapabilities */ + + /*Union Functional Descriptor*/ + 0x05, /* bFunctionLength */ + 0x24, /* bDescriptorType: CS_INTERFACE */ + 0x06, /* bDescriptorSubtype: Union func desc */ + 0x00, /* bMasterInterface: Communication class interface */ + 0x01, /* bSlaveInterface0: Data Class Interface */ + + /*Endpoint 2 Descriptor*/ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_CMD_EP, /* bEndpointAddress */ + 0x03, /* bmAttributes: Interrupt */ + LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ + HIBYTE(CDC_CMD_PACKET_SIZE), + CDC_FS_BINTERVAL, /* bInterval: */ + + /*---------------------------------------------------------------------------*/ + + /*Data class interface descriptor*/ + 0x09, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_INTERFACE, /* bDescriptorType: */ + 0x01, /* bInterfaceNumber: Number of Interface */ + 0x00, /* bAlternateSetting: Alternate setting */ + 0x02, /* bNumEndpoints: Two endpoints used */ + 0x0A, /* bInterfaceClass: CDC */ + 0x00, /* bInterfaceSubClass: */ + 0x00, /* bInterfaceProtocol: */ + 0x00, /* iInterface: */ + + /*Endpoint OUT Descriptor*/ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_OUT_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + 0x40, /* wMaxPacketSize: */ + 0x00, + 0x00, /* bInterval: ignore for Bulk transfer */ + + /*Endpoint IN Descriptor*/ + 0x07, /* bLength: Endpoint Descriptor size */ + USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ + CDC_IN_EP, /* bEndpointAddress */ + 0x02, /* bmAttributes: Bulk */ + 0x40, /* wMaxPacketSize: */ + 0x00, + 0x00 /* bInterval */ +}; + +/** + * @} + */ + +/** @defgroup USBD_CDC_Private_Functions + * @{ + */ + +/** + * @brief USBD_CDC_Init + * Initialize the CDC interface + * @param pdev: device instance + * @param cfgidx: Configuration index + * @retval status + */ +static uint8_t USBD_CDC_Init(USBD_HandleTypeDef *pdev, uint8_t cfgidx) +{ + UNUSED(cfgidx); + USBD_CDC_HandleTypeDef *hcdc; + + hcdc = USBD_malloc(sizeof(USBD_CDC_HandleTypeDef)); + + if (hcdc == NULL) + { + pdev->pClassData = NULL; + return (uint8_t)USBD_EMEM; + } + + pdev->pClassData = (void *)hcdc; + + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + /* Open EP IN */ + (void)USBD_LL_OpenEP(pdev, CDC_IN_EP, USBD_EP_TYPE_BULK, + CDC_DATA_HS_IN_PACKET_SIZE); + + pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 1U; + + /* Open EP OUT */ + (void)USBD_LL_OpenEP(pdev, CDC_OUT_EP, USBD_EP_TYPE_BULK, + CDC_DATA_HS_OUT_PACKET_SIZE); + + pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 1U; + + /* Set bInterval for CDC CMD Endpoint */ + pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = CDC_HS_BINTERVAL; + } + else + { + /* Open EP IN */ + (void)USBD_LL_OpenEP(pdev, CDC_IN_EP, USBD_EP_TYPE_BULK, + CDC_DATA_FS_IN_PACKET_SIZE); + + pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 1U; + + /* Open EP OUT */ + (void)USBD_LL_OpenEP(pdev, CDC_OUT_EP, USBD_EP_TYPE_BULK, + CDC_DATA_FS_OUT_PACKET_SIZE); + + pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 1U; + + /* Set bInterval for CMD Endpoint */ + pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = CDC_FS_BINTERVAL; + } + + /* Open Command IN EP */ + (void)USBD_LL_OpenEP(pdev, CDC_CMD_EP, USBD_EP_TYPE_INTR, CDC_CMD_PACKET_SIZE); + pdev->ep_in[CDC_CMD_EP & 0xFU].is_used = 1U; + + /* Init physical Interface components */ + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Init(); + + /* Init Xfer states */ + hcdc->TxState = 0U; + hcdc->RxState = 0U; + + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + /* Prepare Out endpoint to receive next packet */ + (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + CDC_DATA_HS_OUT_PACKET_SIZE); + } + else + { + /* Prepare Out endpoint to receive next packet */ + (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + CDC_DATA_FS_OUT_PACKET_SIZE); + } + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_Init + * DeInitialize the CDC layer + * @param pdev: device instance + * @param cfgidx: Configuration index + * @retval status + */ +static uint8_t USBD_CDC_DeInit(USBD_HandleTypeDef *pdev, uint8_t cfgidx) +{ + UNUSED(cfgidx); + uint8_t ret = 0U; + + /* Close EP IN */ + (void)USBD_LL_CloseEP(pdev, CDC_IN_EP); + pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 0U; + + /* Close EP OUT */ + (void)USBD_LL_CloseEP(pdev, CDC_OUT_EP); + pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 0U; + + /* Close Command IN EP */ + (void)USBD_LL_CloseEP(pdev, CDC_CMD_EP); + pdev->ep_in[CDC_CMD_EP & 0xFU].is_used = 0U; + pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = 0U; + + /* DeInit physical Interface components */ + if (pdev->pClassData != NULL) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->DeInit(); + (void)USBD_free(pdev->pClassData); + pdev->pClassData = NULL; + } + + return ret; +} + +/** + * @brief USBD_CDC_Setup + * Handle the CDC specific requests + * @param pdev: instance + * @param req: usb requests + * @retval status + */ +static uint8_t USBD_CDC_Setup(USBD_HandleTypeDef *pdev, + USBD_SetupReqTypedef *req) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + uint8_t ifalt = 0U; + uint16_t status_info = 0U; + USBD_StatusTypeDef ret = USBD_OK; + + switch (req->bmRequest & USB_REQ_TYPE_MASK) + { + case USB_REQ_TYPE_CLASS: + if (req->wLength != 0U) + { + if ((req->bmRequest & 0x80U) != 0U) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(req->bRequest, + (uint8_t *)hcdc->data, + req->wLength); + + (void)USBD_CtlSendData(pdev, (uint8_t *)hcdc->data, req->wLength); + } + else + { + hcdc->CmdOpCode = req->bRequest; + hcdc->CmdLength = (uint8_t)req->wLength; + + (void)USBD_CtlPrepareRx(pdev, (uint8_t *)hcdc->data, req->wLength); + } + } + else + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(req->bRequest, + (uint8_t *)req, 0U); + } + break; + + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) + { + case USB_REQ_GET_STATUS: + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + (void)USBD_CtlSendData(pdev, (uint8_t *)&status_info, 2U); + } + else + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_GET_INTERFACE: + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + (void)USBD_CtlSendData(pdev, &ifalt, 1U); + } + else + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_SET_INTERFACE: + if (pdev->dev_state != USBD_STATE_CONFIGURED) + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_CLEAR_FEATURE: + break; + + default: + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + break; + } + break; + + default: + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + break; + } + + return (uint8_t)ret; +} + +/** + * @brief USBD_CDC_DataIn + * Data sent on non-control IN endpoint + * @param pdev: device instance + * @param epnum: endpoint number + * @retval status + */ +static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum) +{ + USBD_CDC_HandleTypeDef *hcdc; + PCD_HandleTypeDef *hpcd = pdev->pData; + + if (pdev->pClassData == NULL) + { + return (uint8_t)USBD_FAIL; + } + + hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + if ((pdev->ep_in[epnum].total_length > 0U) && + ((pdev->ep_in[epnum].total_length % hpcd->IN_ep[epnum].maxpacket) == 0U)) + { + /* Update the packet total length */ + pdev->ep_in[epnum].total_length = 0U; + + /* Send ZLP */ + (void)USBD_LL_Transmit(pdev, epnum, NULL, 0U); + } + else + { + hcdc->TxState = 0U; + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->TransmitCplt(hcdc->TxBuffer, &hcdc->TxLength, epnum); + } + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_DataOut + * Data received on non-control Out endpoint + * @param pdev: device instance + * @param epnum: endpoint number + * @retval status + */ +static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + if (pdev->pClassData == NULL) + { + return (uint8_t)USBD_FAIL; + } + + /* Get the received data length */ + hcdc->RxLength = USBD_LL_GetRxDataSize(pdev, epnum); + + /* USB data will be immediately processed, this allow next USB traffic being + NAKed till the end of the application Xfer */ + + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Receive(hcdc->RxBuffer, &hcdc->RxLength); + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_EP0_RxReady + * Handle EP0 Rx Ready event + * @param pdev: device instance + * @retval status + */ +static uint8_t USBD_CDC_EP0_RxReady(USBD_HandleTypeDef *pdev) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + if ((pdev->pUserData != NULL) && (hcdc->CmdOpCode != 0xFFU)) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(hcdc->CmdOpCode, + (uint8_t *)hcdc->data, + (uint16_t)hcdc->CmdLength); + hcdc->CmdOpCode = 0xFFU; + + } + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_GetFSCfgDesc + * Return configuration descriptor + * @param speed : current device speed + * @param length : pointer data length + * @retval pointer to descriptor buffer + */ +static uint8_t *USBD_CDC_GetFSCfgDesc(uint16_t *length) +{ + *length = (uint16_t)sizeof(USBD_CDC_CfgFSDesc); + + return USBD_CDC_CfgFSDesc; +} + +/** + * @brief USBD_CDC_GetHSCfgDesc + * Return configuration descriptor + * @param speed : current device speed + * @param length : pointer data length + * @retval pointer to descriptor buffer + */ +static uint8_t *USBD_CDC_GetHSCfgDesc(uint16_t *length) +{ + *length = (uint16_t)sizeof(USBD_CDC_CfgHSDesc); + + return USBD_CDC_CfgHSDesc; +} + +/** + * @brief USBD_CDC_GetCfgDesc + * Return configuration descriptor + * @param speed : current device speed + * @param length : pointer data length + * @retval pointer to descriptor buffer + */ +static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length) +{ + *length = (uint16_t)sizeof(USBD_CDC_OtherSpeedCfgDesc); + + return USBD_CDC_OtherSpeedCfgDesc; +} + +/** +* @brief DeviceQualifierDescriptor +* return Device Qualifier descriptor +* @param length : pointer data length +* @retval pointer to descriptor buffer +*/ +uint8_t *USBD_CDC_GetDeviceQualifierDescriptor(uint16_t *length) +{ + *length = (uint16_t)sizeof(USBD_CDC_DeviceQualifierDesc); + + return USBD_CDC_DeviceQualifierDesc; +} + +/** +* @brief USBD_CDC_RegisterInterface + * @param pdev: device instance + * @param fops: CD Interface callback + * @retval status + */ +uint8_t USBD_CDC_RegisterInterface(USBD_HandleTypeDef *pdev, + USBD_CDC_ItfTypeDef *fops) +{ + if (fops == NULL) + { + return (uint8_t)USBD_FAIL; + } + + pdev->pUserData = fops; + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_SetTxBuffer + * @param pdev: device instance + * @param pbuff: Tx Buffer + * @retval status + */ +uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, + uint8_t *pbuff, uint32_t length) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + hcdc->TxBuffer = pbuff; + hcdc->TxLength = length; + + return (uint8_t)USBD_OK; +} + + +/** + * @brief USBD_CDC_SetRxBuffer + * @param pdev: device instance + * @param pbuff: Rx Buffer + * @retval status + */ +uint8_t USBD_CDC_SetRxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + hcdc->RxBuffer = pbuff; + + return (uint8_t)USBD_OK; +} + +/** + * @brief USBD_CDC_TransmitPacket + * Transmit packet on IN endpoint + * @param pdev: device instance + * @retval status + */ +uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_StatusTypeDef ret = USBD_BUSY; + + if (pdev->pClassData == NULL) + { + return (uint8_t)USBD_FAIL; + } + + if (hcdc->TxState == 0U) + { + /* Tx Transfer in progress */ + hcdc->TxState = 1U; + + /* Update the packet total length */ + pdev->ep_in[CDC_IN_EP & 0xFU].total_length = hcdc->TxLength; + + /* Transmit next packet */ + (void)USBD_LL_Transmit(pdev, CDC_IN_EP, hcdc->TxBuffer, hcdc->TxLength); + + ret = USBD_OK; + } + + return (uint8_t)ret; +} + + +/** + * @brief USBD_CDC_ReceivePacket + * prepare OUT Endpoint for reception + * @param pdev: device instance + * @retval status + */ +uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + + if (pdev->pClassData == NULL) + { + return (uint8_t)USBD_FAIL; + } + + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + /* Prepare Out endpoint to receive next packet */ + (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + CDC_DATA_HS_OUT_PACKET_SIZE); + } + else + { + /* Prepare Out endpoint to receive next packet */ + (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + CDC_DATA_FS_OUT_PACKET_SIZE); + } + + return (uint8_t)USBD_OK; +} +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h new file mode 100644 index 000000000..c7d2ba39e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h @@ -0,0 +1,158 @@ +/** + ****************************************************************************** + * @file usbd_core.h + * @author MCD Application Team + * @brief Header file for usbd_core.c file + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_CORE_H +#define __USBD_CORE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_conf.h" +#include "usbd_def.h" +#include "usbd_ioreq.h" +#include "usbd_ctlreq.h" + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup USBD_CORE + * @brief This file is the Header file for usbd_core.c file + * @{ + */ + + +/** @defgroup USBD_CORE_Exported_Defines + * @{ + */ +#ifndef USBD_DEBUG_LEVEL +#define USBD_DEBUG_LEVEL 0U +#endif /* USBD_DEBUG_LEVEL */ +/** + * @} + */ + + +/** @defgroup USBD_CORE_Exported_TypesDefinitions + * @{ + */ + + +/** + * @} + */ + + + +/** @defgroup USBD_CORE_Exported_Macros + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_CORE_Exported_Variables + * @{ + */ +#define USBD_SOF USBD_LL_SOF +/** + * @} + */ + +/** @defgroup USBD_CORE_Exported_FunctionsPrototype + * @{ + */ +USBD_StatusTypeDef USBD_Init(USBD_HandleTypeDef *pdev, USBD_DescriptorsTypeDef *pdesc, uint8_t id); +USBD_StatusTypeDef USBD_DeInit(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_Start(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_Stop(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_RegisterClass(USBD_HandleTypeDef *pdev, USBD_ClassTypeDef *pclass); + +USBD_StatusTypeDef USBD_RunTestMode(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_SetClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx); +USBD_StatusTypeDef USBD_ClrClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx); + +USBD_StatusTypeDef USBD_LL_SetupStage(USBD_HandleTypeDef *pdev, uint8_t *psetup); +USBD_StatusTypeDef USBD_LL_DataOutStage(USBD_HandleTypeDef *pdev, uint8_t epnum, uint8_t *pdata); +USBD_StatusTypeDef USBD_LL_DataInStage(USBD_HandleTypeDef *pdev, uint8_t epnum, uint8_t *pdata); + +USBD_StatusTypeDef USBD_LL_Reset(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_SetSpeed(USBD_HandleTypeDef *pdev, USBD_SpeedTypeDef speed); +USBD_StatusTypeDef USBD_LL_Suspend(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_Resume(USBD_HandleTypeDef *pdev); + +USBD_StatusTypeDef USBD_LL_SOF(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_IsoINIncomplete(USBD_HandleTypeDef *pdev, uint8_t epnum); +USBD_StatusTypeDef USBD_LL_IsoOUTIncomplete(USBD_HandleTypeDef *pdev, uint8_t epnum); + +USBD_StatusTypeDef USBD_LL_DevConnected(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_DevDisconnected(USBD_HandleTypeDef *pdev); + +/* USBD Low Level Driver */ +USBD_StatusTypeDef USBD_LL_Init(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_DeInit(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_Start(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_LL_Stop(USBD_HandleTypeDef *pdev); + +USBD_StatusTypeDef USBD_LL_OpenEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr, + uint8_t ep_type, uint16_t ep_mps); + +USBD_StatusTypeDef USBD_LL_CloseEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); +USBD_StatusTypeDef USBD_LL_FlushEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); +USBD_StatusTypeDef USBD_LL_StallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); +USBD_StatusTypeDef USBD_LL_ClearStallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); +USBD_StatusTypeDef USBD_LL_SetUSBAddress(USBD_HandleTypeDef *pdev, uint8_t dev_addr); + +USBD_StatusTypeDef USBD_LL_Transmit(USBD_HandleTypeDef *pdev, uint8_t ep_addr, + uint8_t *pbuf, uint32_t size); + +USBD_StatusTypeDef USBD_LL_PrepareReceive(USBD_HandleTypeDef *pdev, uint8_t ep_addr, + uint8_t *pbuf, uint32_t size); + +uint8_t USBD_LL_IsStallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); +uint32_t USBD_LL_GetRxDataSize(USBD_HandleTypeDef *pdev, uint8_t ep_addr); + +void USBD_LL_Delay(uint32_t Delay); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_CORE_H */ + +/** + * @} + */ + +/** +* @} +*/ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h new file mode 100644 index 000000000..f973a8b1b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h @@ -0,0 +1,103 @@ +/** + ****************************************************************************** + * @file usbd_req.h + * @author MCD Application Team + * @brief Header file for the usbd_req.c file + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USB_REQUEST_H +#define __USB_REQUEST_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_def.h" + + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup USBD_REQ + * @brief header file for the usbd_req.c file + * @{ + */ + +/** @defgroup USBD_REQ_Exported_Defines + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_REQ_Exported_Types + * @{ + */ +/** + * @} + */ + + + +/** @defgroup USBD_REQ_Exported_Macros + * @{ + */ +/** + * @} + */ + +/** @defgroup USBD_REQ_Exported_Variables + * @{ + */ +/** + * @} + */ + +/** @defgroup USBD_REQ_Exported_FunctionsPrototype + * @{ + */ + +USBD_StatusTypeDef USBD_StdDevReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +USBD_StatusTypeDef USBD_StdItfReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +USBD_StatusTypeDef USBD_StdEPReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); + +void USBD_CtlError(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +void USBD_ParseSetupRequest(USBD_SetupReqTypedef *req, uint8_t *pdata); +void USBD_GetString(uint8_t *desc, uint8_t *unicode, uint16_t *len); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USB_REQUEST_H */ + +/** + * @} + */ + +/** +* @} +*/ + + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h new file mode 100644 index 000000000..0463e2f31 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h @@ -0,0 +1,393 @@ +/** + ****************************************************************************** + * @file usbd_def.h + * @author MCD Application Team + * @brief General defines for the usb device library + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_DEF_H +#define __USBD_DEF_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_conf.h" + +/** @addtogroup STM32_USBD_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup USB_DEF + * @brief general defines for the usb device library file + * @{ + */ + +/** @defgroup USB_DEF_Exported_Defines + * @{ + */ + +#ifndef NULL +#define NULL 0U +#endif /* NULL */ + +#ifndef USBD_MAX_NUM_INTERFACES +#define USBD_MAX_NUM_INTERFACES 1U +#endif /* USBD_MAX_NUM_CONFIGURATION */ + +#ifndef USBD_MAX_NUM_CONFIGURATION +#define USBD_MAX_NUM_CONFIGURATION 1U +#endif /* USBD_MAX_NUM_CONFIGURATION */ + +#ifndef USBD_LPM_ENABLED +#define USBD_LPM_ENABLED 0U +#endif /* USBD_LPM_ENABLED */ + +#ifndef USBD_SELF_POWERED +#define USBD_SELF_POWERED 1U +#endif /*USBD_SELF_POWERED */ + +#ifndef USBD_SUPPORT_USER_STRING_DESC +#define USBD_SUPPORT_USER_STRING_DESC 0U +#endif /* USBD_SUPPORT_USER_STRING_DESC */ + +#ifndef USBD_CLASS_USER_STRING_DESC +#define USBD_CLASS_USER_STRING_DESC 0U +#endif /* USBD_CLASS_USER_STRING_DESC */ + +#define USB_LEN_DEV_QUALIFIER_DESC 0x0AU +#define USB_LEN_DEV_DESC 0x12U +#define USB_LEN_CFG_DESC 0x09U +#define USB_LEN_IF_DESC 0x09U +#define USB_LEN_EP_DESC 0x07U +#define USB_LEN_OTG_DESC 0x03U +#define USB_LEN_LANGID_STR_DESC 0x04U +#define USB_LEN_OTHER_SPEED_DESC_SIZ 0x09U + +#define USBD_IDX_LANGID_STR 0x00U +#define USBD_IDX_MFC_STR 0x01U +#define USBD_IDX_PRODUCT_STR 0x02U +#define USBD_IDX_SERIAL_STR 0x03U +#define USBD_IDX_CONFIG_STR 0x04U +#define USBD_IDX_INTERFACE_STR 0x05U + +#define USB_REQ_TYPE_STANDARD 0x00U +#define USB_REQ_TYPE_CLASS 0x20U +#define USB_REQ_TYPE_VENDOR 0x40U +#define USB_REQ_TYPE_MASK 0x60U + +#define USB_REQ_RECIPIENT_DEVICE 0x00U +#define USB_REQ_RECIPIENT_INTERFACE 0x01U +#define USB_REQ_RECIPIENT_ENDPOINT 0x02U +#define USB_REQ_RECIPIENT_MASK 0x03U + +#define USB_REQ_GET_STATUS 0x00U +#define USB_REQ_CLEAR_FEATURE 0x01U +#define USB_REQ_SET_FEATURE 0x03U +#define USB_REQ_SET_ADDRESS 0x05U +#define USB_REQ_GET_DESCRIPTOR 0x06U +#define USB_REQ_SET_DESCRIPTOR 0x07U +#define USB_REQ_GET_CONFIGURATION 0x08U +#define USB_REQ_SET_CONFIGURATION 0x09U +#define USB_REQ_GET_INTERFACE 0x0AU +#define USB_REQ_SET_INTERFACE 0x0BU +#define USB_REQ_SYNCH_FRAME 0x0CU + +#define USB_DESC_TYPE_DEVICE 0x01U +#define USB_DESC_TYPE_CONFIGURATION 0x02U +#define USB_DESC_TYPE_STRING 0x03U +#define USB_DESC_TYPE_INTERFACE 0x04U +#define USB_DESC_TYPE_ENDPOINT 0x05U +#define USB_DESC_TYPE_DEVICE_QUALIFIER 0x06U +#define USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION 0x07U +#define USB_DESC_TYPE_BOS 0x0FU + +#define USB_CONFIG_REMOTE_WAKEUP 0x02U +#define USB_CONFIG_SELF_POWERED 0x01U + +#define USB_FEATURE_EP_HALT 0x00U +#define USB_FEATURE_REMOTE_WAKEUP 0x01U +#define USB_FEATURE_TEST_MODE 0x02U + +#define USB_DEVICE_CAPABITY_TYPE 0x10U + +#define USB_HS_MAX_PACKET_SIZE 512U +#define USB_FS_MAX_PACKET_SIZE 64U +#define USB_MAX_EP0_SIZE 64U + +/* Device Status */ +#define USBD_STATE_DEFAULT 0x01U +#define USBD_STATE_ADDRESSED 0x02U +#define USBD_STATE_CONFIGURED 0x03U +#define USBD_STATE_SUSPENDED 0x04U + + +/* EP0 State */ +#define USBD_EP0_IDLE 0x00U +#define USBD_EP0_SETUP 0x01U +#define USBD_EP0_DATA_IN 0x02U +#define USBD_EP0_DATA_OUT 0x03U +#define USBD_EP0_STATUS_IN 0x04U +#define USBD_EP0_STATUS_OUT 0x05U +#define USBD_EP0_STALL 0x06U + +#define USBD_EP_TYPE_CTRL 0x00U +#define USBD_EP_TYPE_ISOC 0x01U +#define USBD_EP_TYPE_BULK 0x02U +#define USBD_EP_TYPE_INTR 0x03U + + +/** + * @} + */ + + +/** @defgroup USBD_DEF_Exported_TypesDefinitions + * @{ + */ + +typedef struct usb_setup_req +{ + uint8_t bmRequest; + uint8_t bRequest; + uint16_t wValue; + uint16_t wIndex; + uint16_t wLength; +} USBD_SetupReqTypedef; + +typedef struct +{ + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t wDescriptorLengthLow; + uint8_t wDescriptorLengthHigh; + uint8_t bNumInterfaces; + uint8_t bConfigurationValue; + uint8_t iConfiguration; + uint8_t bmAttributes; + uint8_t bMaxPower; +} USBD_ConfigDescTypedef; + +typedef struct +{ + uint8_t bLength; + uint8_t bDescriptorType; + uint16_t wTotalLength; + uint8_t bNumDeviceCaps; +} USBD_BosDescTypedef; + + +struct _USBD_HandleTypeDef; + +typedef struct _Device_cb +{ + uint8_t (*Init)(struct _USBD_HandleTypeDef *pdev, uint8_t cfgidx); + uint8_t (*DeInit)(struct _USBD_HandleTypeDef *pdev, uint8_t cfgidx); + /* Control Endpoints*/ + uint8_t (*Setup)(struct _USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); + uint8_t (*EP0_TxSent)(struct _USBD_HandleTypeDef *pdev); + uint8_t (*EP0_RxReady)(struct _USBD_HandleTypeDef *pdev); + /* Class Specific Endpoints*/ + uint8_t (*DataIn)(struct _USBD_HandleTypeDef *pdev, uint8_t epnum); + uint8_t (*DataOut)(struct _USBD_HandleTypeDef *pdev, uint8_t epnum); + uint8_t (*SOF)(struct _USBD_HandleTypeDef *pdev); + uint8_t (*IsoINIncomplete)(struct _USBD_HandleTypeDef *pdev, uint8_t epnum); + uint8_t (*IsoOUTIncomplete)(struct _USBD_HandleTypeDef *pdev, uint8_t epnum); + + uint8_t *(*GetHSConfigDescriptor)(uint16_t *length); + uint8_t *(*GetFSConfigDescriptor)(uint16_t *length); + uint8_t *(*GetOtherSpeedConfigDescriptor)(uint16_t *length); + uint8_t *(*GetDeviceQualifierDescriptor)(uint16_t *length); +#if (USBD_SUPPORT_USER_STRING_DESC == 1U) + uint8_t *(*GetUsrStrDescriptor)(struct _USBD_HandleTypeDef *pdev, uint8_t index, uint16_t *length); +#endif + +} USBD_ClassTypeDef; + +/* Following USB Device Speed */ +typedef enum +{ + USBD_SPEED_HIGH = 0U, + USBD_SPEED_FULL = 1U, + USBD_SPEED_LOW = 2U, +} USBD_SpeedTypeDef; + +/* Following USB Device status */ +typedef enum +{ + USBD_OK = 0U, + USBD_BUSY, + USBD_EMEM, + USBD_FAIL, +} USBD_StatusTypeDef; + +/* USB Device descriptors structure */ +typedef struct +{ + uint8_t *(*GetDeviceDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetLangIDStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetManufacturerStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetProductStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetSerialStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetConfigurationStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); + uint8_t *(*GetInterfaceStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); +#if (USBD_CLASS_USER_STRING_DESC == 1) + uint8_t *(*GetUserStrDescriptor)(USBD_SpeedTypeDef speed, uint8_t idx, uint16_t *length); +#endif +#if ((USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1)) + uint8_t *(*GetBOSDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); +#endif +} USBD_DescriptorsTypeDef; + +/* USB Device handle structure */ +typedef struct +{ + uint32_t status; + uint32_t total_length; + uint32_t rem_length; + uint32_t maxpacket; + uint16_t is_used; + uint16_t bInterval; +} USBD_EndpointTypeDef; + +/* USB Device handle structure */ +typedef struct _USBD_HandleTypeDef +{ + uint8_t id; + uint32_t dev_config; + uint32_t dev_default_config; + uint32_t dev_config_status; + USBD_SpeedTypeDef dev_speed; + USBD_EndpointTypeDef ep_in[16]; + USBD_EndpointTypeDef ep_out[16]; + uint32_t ep0_state; + uint32_t ep0_data_len; + uint8_t dev_state; + uint8_t dev_old_state; + uint8_t dev_address; + uint8_t dev_connection_status; + uint8_t dev_test_mode; + uint32_t dev_remote_wakeup; + uint8_t ConfIdx; + + USBD_SetupReqTypedef request; + USBD_DescriptorsTypeDef *pDesc; + USBD_ClassTypeDef *pClass; + void *pClassData; + void *pUserData; + void *pData; + void *pBosDesc; + void *pConfDesc; +} USBD_HandleTypeDef; + +/** + * @} + */ + + + +/** @defgroup USBD_DEF_Exported_Macros + * @{ + */ +__STATIC_INLINE uint16_t SWAPBYTE(uint8_t *addr) +{ + uint16_t _SwapVal, _Byte1, _Byte2; + uint8_t *_pbuff = addr; + + _Byte1 = *(uint8_t *)_pbuff; + _pbuff++; + _Byte2 = *(uint8_t *)_pbuff; + + _SwapVal = (_Byte2 << 8) | _Byte1; + + return _SwapVal; +} + +#define LOBYTE(x) ((uint8_t)((x) & 0x00FFU)) +#define HIBYTE(x) ((uint8_t)(((x) & 0xFF00U) >> 8U)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + + +#if defined ( __GNUC__ ) +#ifndef __weak +#define __weak __attribute__((weak)) +#endif /* __weak */ +#ifndef __packed +#define __packed __attribute__((__packed__)) +#endif /* __packed */ +#endif /* __GNUC__ */ + + +/* In HS mode and when the DMA is used, all variables and data structures dealing + with the DMA during the transaction process should be 4-bytes aligned */ + +#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ +#ifndef __ALIGN_END +#define __ALIGN_END __attribute__ ((aligned (4U))) +#endif /* __ALIGN_END */ +#ifndef __ALIGN_BEGIN +#define __ALIGN_BEGIN +#endif /* __ALIGN_BEGIN */ +#else +#ifndef __ALIGN_END +#define __ALIGN_END +#endif /* __ALIGN_END */ +#ifndef __ALIGN_BEGIN +#if defined (__CC_ARM) /* ARM Compiler */ +#define __ALIGN_BEGIN __align(4U) +#elif defined (__ICCARM__) /* IAR Compiler */ +#define __ALIGN_BEGIN +#endif /* __CC_ARM */ +#endif /* __ALIGN_BEGIN */ +#endif /* __GNUC__ */ + + +/** + * @} + */ + +/** @defgroup USBD_DEF_Exported_Variables + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_DEF_Exported_FunctionsPrototype + * @{ + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_DEF_H */ + +/** + * @} + */ + +/** +* @} +*/ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h new file mode 100644 index 000000000..b7159d53b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h @@ -0,0 +1,114 @@ +/** + ****************************************************************************** + * @file usbd_ioreq.h + * @author MCD Application Team + * @brief Header file for the usbd_ioreq.c file + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_IOREQ_H +#define __USBD_IOREQ_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_def.h" +#include "usbd_core.h" + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup USBD_IOREQ + * @brief header file for the usbd_ioreq.c file + * @{ + */ + +/** @defgroup USBD_IOREQ_Exported_Defines + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Exported_Types + * @{ + */ + + +/** + * @} + */ + + + +/** @defgroup USBD_IOREQ_Exported_Macros + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_IOREQ_Exported_Variables + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_IOREQ_Exported_FunctionsPrototype + * @{ + */ + +USBD_StatusTypeDef USBD_CtlSendData(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len); + +USBD_StatusTypeDef USBD_CtlContinueSendData(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len); + +USBD_StatusTypeDef USBD_CtlPrepareRx(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len); + +USBD_StatusTypeDef USBD_CtlContinueRx(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len); + +USBD_StatusTypeDef USBD_CtlSendStatus(USBD_HandleTypeDef *pdev); +USBD_StatusTypeDef USBD_CtlReceiveStatus(USBD_HandleTypeDef *pdev); + +uint32_t USBD_GetRxCount(USBD_HandleTypeDef *pdev, uint8_t ep_addr); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_IOREQ_H */ + +/** + * @} + */ + +/** +* @} +*/ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c new file mode 100644 index 000000000..3faed3520 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c @@ -0,0 +1,669 @@ +/** + ****************************************************************************** + * @file usbd_core.c + * @author MCD Application Team + * @brief This file provides all the USBD core functions. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_core.h" + +/** @addtogroup STM32_USBD_DEVICE_LIBRARY +* @{ +*/ + + +/** @defgroup USBD_CORE +* @brief usbd core module +* @{ +*/ + +/** @defgroup USBD_CORE_Private_TypesDefinitions +* @{ +*/ + +/** +* @} +*/ + + +/** @defgroup USBD_CORE_Private_Defines +* @{ +*/ + +/** +* @} +*/ + + +/** @defgroup USBD_CORE_Private_Macros +* @{ +*/ + +/** +* @} +*/ + + +/** @defgroup USBD_CORE_Private_FunctionPrototypes +* @{ +*/ + +/** +* @} +*/ + +/** @defgroup USBD_CORE_Private_Variables +* @{ +*/ + +/** +* @} +*/ + + +/** @defgroup USBD_CORE_Private_Functions +* @{ +*/ + +/** +* @brief USBD_Init +* Initializes the device stack and load the class driver +* @param pdev: device instance +* @param pdesc: Descriptor structure address +* @param id: Low level core index +* @retval None +*/ +USBD_StatusTypeDef USBD_Init(USBD_HandleTypeDef *pdev, + USBD_DescriptorsTypeDef *pdesc, uint8_t id) +{ + USBD_StatusTypeDef ret; + + /* Check whether the USB Host handle is valid */ + if (pdev == NULL) + { +#if (USBD_DEBUG_LEVEL > 1U) + USBD_ErrLog("Invalid Device handle"); +#endif + return USBD_FAIL; + } + + /* Unlink previous class */ + if (pdev->pClass != NULL) + { + pdev->pClass = NULL; + } + + if (pdev->pConfDesc != NULL) + { + pdev->pConfDesc = NULL; + } + + /* Assign USBD Descriptors */ + if (pdesc != NULL) + { + pdev->pDesc = pdesc; + } + + /* Set Device initial State */ + pdev->dev_state = USBD_STATE_DEFAULT; + pdev->id = id; + + /* Initialize low level driver */ + ret = USBD_LL_Init(pdev); + + return ret; +} + +/** +* @brief USBD_DeInit +* Re-Initialize th device library +* @param pdev: device instance +* @retval status: status +*/ +USBD_StatusTypeDef USBD_DeInit(USBD_HandleTypeDef *pdev) +{ + USBD_StatusTypeDef ret; + + /* Set Default State */ + pdev->dev_state = USBD_STATE_DEFAULT; + + /* Free Class Resources */ + if (pdev->pClass != NULL) + { + pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + } + + if (pdev->pConfDesc != NULL) + { + pdev->pConfDesc = NULL; + } + + /* Stop the low level driver */ + ret = USBD_LL_Stop(pdev); + + if (ret != USBD_OK) + { + return ret; + } + + /* Initialize low level driver */ + ret = USBD_LL_DeInit(pdev); + + return ret; +} + +/** + * @brief USBD_RegisterClass + * Link class driver to Device Core. + * @param pDevice : Device Handle + * @param pclass: Class handle + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_RegisterClass(USBD_HandleTypeDef *pdev, USBD_ClassTypeDef *pclass) +{ + uint16_t len = 0U; + + if (pclass == NULL) + { +#if (USBD_DEBUG_LEVEL > 1U) + USBD_ErrLog("Invalid Class handle"); +#endif + return USBD_FAIL; + } + + /* link the class to the USB Device handle */ + pdev->pClass = pclass; + + /* Get Device Configuration Descriptor */ +#ifdef USE_USB_FS + pdev->pConfDesc = (void *)pdev->pClass->GetFSConfigDescriptor(&len); +#else /* USE_USB_HS */ + pdev->pConfDesc = (void *)pdev->pClass->GetHSConfigDescriptor(&len); +#endif /* USE_USB_FS */ + + + return USBD_OK; +} + +/** + * @brief USBD_Start + * Start the USB Device Core. + * @param pdev: Device Handle + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_Start(USBD_HandleTypeDef *pdev) +{ + /* Start the low level driver */ + return USBD_LL_Start(pdev); +} + +/** + * @brief USBD_Stop + * Stop the USB Device Core. + * @param pdev: Device Handle + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_Stop(USBD_HandleTypeDef *pdev) +{ + USBD_StatusTypeDef ret; + + /* Free Class Resources */ + if (pdev->pClass != NULL) + { + pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + } + + if (pdev->pConfDesc != NULL) + { + pdev->pConfDesc = NULL; + } + + /* Stop the low level driver */ + ret = USBD_LL_Stop(pdev); + + return ret; +} + +/** +* @brief USBD_RunTestMode +* Launch test mode process +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_RunTestMode(USBD_HandleTypeDef *pdev) +{ + /* Prevent unused argument compilation warning */ + UNUSED(pdev); + + return USBD_OK; +} + +/** +* @brief USBD_SetClassConfig +* Configure device and start the interface +* @param pdev: device instance +* @param cfgidx: configuration index +* @retval status +*/ + +USBD_StatusTypeDef USBD_SetClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx) +{ + USBD_StatusTypeDef ret = USBD_FAIL; + + if (pdev->pClass != NULL) + { + /* Set configuration and Start the Class */ + ret = (USBD_StatusTypeDef)pdev->pClass->Init(pdev, cfgidx); + } + + return ret; +} + +/** +* @brief USBD_ClrClassConfig +* Clear current configuration +* @param pdev: device instance +* @param cfgidx: configuration index +* @retval status: USBD_StatusTypeDef +*/ +USBD_StatusTypeDef USBD_ClrClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx) +{ + /* Clear configuration and De-initialize the Class process */ + if (pdev->pClass != NULL) + { + pdev->pClass->DeInit(pdev, cfgidx); + } + + return USBD_OK; +} + + +/** +* @brief USBD_SetupStage +* Handle the setup stage +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_SetupStage(USBD_HandleTypeDef *pdev, uint8_t *psetup) +{ + USBD_StatusTypeDef ret; + + USBD_ParseSetupRequest(&pdev->request, psetup); + + pdev->ep0_state = USBD_EP0_SETUP; + + pdev->ep0_data_len = pdev->request.wLength; + + switch (pdev->request.bmRequest & 0x1FU) + { + case USB_REQ_RECIPIENT_DEVICE: + ret = USBD_StdDevReq(pdev, &pdev->request); + break; + + case USB_REQ_RECIPIENT_INTERFACE: + ret = USBD_StdItfReq(pdev, &pdev->request); + break; + + case USB_REQ_RECIPIENT_ENDPOINT: + ret = USBD_StdEPReq(pdev, &pdev->request); + break; + + default: + ret = USBD_LL_StallEP(pdev, (pdev->request.bmRequest & 0x80U)); + break; + } + + return ret; +} + +/** +* @brief USBD_DataOutStage +* Handle data OUT stage +* @param pdev: device instance +* @param epnum: endpoint index +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_DataOutStage(USBD_HandleTypeDef *pdev, + uint8_t epnum, uint8_t *pdata) +{ + USBD_EndpointTypeDef *pep; + USBD_StatusTypeDef ret; + + if (epnum == 0U) + { + pep = &pdev->ep_out[0]; + + if (pdev->ep0_state == USBD_EP0_DATA_OUT) + { + if (pep->rem_length > pep->maxpacket) + { + pep->rem_length -= pep->maxpacket; + + (void)USBD_CtlContinueRx(pdev, pdata, MIN(pep->rem_length, pep->maxpacket)); + } + else + { + if ((pdev->pClass->EP0_RxReady != NULL) && + (pdev->dev_state == USBD_STATE_CONFIGURED)) + { + pdev->pClass->EP0_RxReady(pdev); + } + (void)USBD_CtlSendStatus(pdev); + } + } + else + { +#if 0 + if (pdev->ep0_state == USBD_EP0_STATUS_OUT) + { + /* + * STATUS PHASE completed, update ep0_state to idle + */ + pdev->ep0_state = USBD_EP0_IDLE; + (void)USBD_LL_StallEP(pdev, 0U); + } +#endif + } + } + else if ((pdev->pClass->DataOut != NULL) && + (pdev->dev_state == USBD_STATE_CONFIGURED)) + { + ret = (USBD_StatusTypeDef)pdev->pClass->DataOut(pdev, epnum); + + if (ret != USBD_OK) + { + return ret; + } + } + else + { + /* should never be in this condition */ + return USBD_FAIL; + } + + return USBD_OK; +} + +/** +* @brief USBD_DataInStage +* Handle data in stage +* @param pdev: device instance +* @param epnum: endpoint index +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_DataInStage(USBD_HandleTypeDef *pdev, + uint8_t epnum, uint8_t *pdata) +{ + USBD_EndpointTypeDef *pep; + USBD_StatusTypeDef ret; + + if (epnum == 0U) + { + pep = &pdev->ep_in[0]; + + if (pdev->ep0_state == USBD_EP0_DATA_IN) + { + if (pep->rem_length > pep->maxpacket) + { + pep->rem_length -= pep->maxpacket; + + (void)USBD_CtlContinueSendData(pdev, pdata, pep->rem_length); + + /* Prepare endpoint for premature end of transfer */ + (void)USBD_LL_PrepareReceive(pdev, 0U, NULL, 0U); + } + else + { + /* last packet is MPS multiple, so send ZLP packet */ + if ((pep->maxpacket == pep->rem_length) && + (pep->total_length >= pep->maxpacket) && + (pep->total_length < pdev->ep0_data_len)) + { + (void)USBD_CtlContinueSendData(pdev, NULL, 0U); + pdev->ep0_data_len = 0U; + + /* Prepare endpoint for premature end of transfer */ + (void)USBD_LL_PrepareReceive(pdev, 0U, NULL, 0U); + } + else + { + if ((pdev->pClass->EP0_TxSent != NULL) && + (pdev->dev_state == USBD_STATE_CONFIGURED)) + { + pdev->pClass->EP0_TxSent(pdev); + } + (void)USBD_LL_StallEP(pdev, 0x80U); + (void)USBD_CtlReceiveStatus(pdev); + } + } + } + else + { +#if 0 + if ((pdev->ep0_state == USBD_EP0_STATUS_IN) || + (pdev->ep0_state == USBD_EP0_IDLE)) + { + (void)USBD_LL_StallEP(pdev, 0x80U); + } +#endif + } + + if (pdev->dev_test_mode == 1U) + { + (void)USBD_RunTestMode(pdev); + pdev->dev_test_mode = 0U; + } + } + else if ((pdev->pClass->DataIn != NULL) && + (pdev->dev_state == USBD_STATE_CONFIGURED)) + { + ret = (USBD_StatusTypeDef)pdev->pClass->DataIn(pdev, epnum); + + if (ret != USBD_OK) + { + return ret; + } + } + else + { + /* should never be in this condition */ + return USBD_FAIL; + } + + return USBD_OK; +} + +/** +* @brief USBD_LL_Reset +* Handle Reset event +* @param pdev: device instance +* @retval status +*/ + +USBD_StatusTypeDef USBD_LL_Reset(USBD_HandleTypeDef *pdev) +{ + /* Upon Reset call user call back */ + pdev->dev_state = USBD_STATE_DEFAULT; + pdev->ep0_state = USBD_EP0_IDLE; + pdev->dev_config = 0U; + pdev->dev_remote_wakeup = 0U; + + if (pdev->pClassData != NULL) + { + pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + } + + /* Open EP0 OUT */ + (void)USBD_LL_OpenEP(pdev, 0x00U, USBD_EP_TYPE_CTRL, USB_MAX_EP0_SIZE); + pdev->ep_out[0x00U & 0xFU].is_used = 1U; + + pdev->ep_out[0].maxpacket = USB_MAX_EP0_SIZE; + + /* Open EP0 IN */ + (void)USBD_LL_OpenEP(pdev, 0x80U, USBD_EP_TYPE_CTRL, USB_MAX_EP0_SIZE); + pdev->ep_in[0x80U & 0xFU].is_used = 1U; + + pdev->ep_in[0].maxpacket = USB_MAX_EP0_SIZE; + + return USBD_OK; +} + +/** +* @brief USBD_LL_Reset +* Handle Reset event +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_SetSpeed(USBD_HandleTypeDef *pdev, + USBD_SpeedTypeDef speed) +{ + pdev->dev_speed = speed; + + return USBD_OK; +} + +/** +* @brief USBD_Suspend +* Handle Suspend event +* @param pdev: device instance +* @retval status +*/ + +USBD_StatusTypeDef USBD_LL_Suspend(USBD_HandleTypeDef *pdev) +{ + pdev->dev_old_state = pdev->dev_state; + pdev->dev_state = USBD_STATE_SUSPENDED; + + return USBD_OK; +} + +/** +* @brief USBD_Resume +* Handle Resume event +* @param pdev: device instance +* @retval status +*/ + +USBD_StatusTypeDef USBD_LL_Resume(USBD_HandleTypeDef *pdev) +{ + if (pdev->dev_state == USBD_STATE_SUSPENDED) + { + pdev->dev_state = pdev->dev_old_state; + } + + return USBD_OK; +} + +/** +* @brief USBD_SOF +* Handle SOF event +* @param pdev: device instance +* @retval status +*/ + +USBD_StatusTypeDef USBD_LL_SOF(USBD_HandleTypeDef *pdev) +{ + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass->SOF != NULL) + { + pdev->pClass->SOF(pdev); + } + } + + return USBD_OK; +} + +/** +* @brief USBD_IsoINIncomplete +* Handle iso in incomplete event +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_IsoINIncomplete(USBD_HandleTypeDef *pdev, + uint8_t epnum) +{ + /* Prevent unused arguments compilation warning */ + UNUSED(pdev); + UNUSED(epnum); + + return USBD_OK; +} + +/** +* @brief USBD_IsoOUTIncomplete +* Handle iso out incomplete event +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_IsoOUTIncomplete(USBD_HandleTypeDef *pdev, + uint8_t epnum) +{ + /* Prevent unused arguments compilation warning */ + UNUSED(pdev); + UNUSED(epnum); + + return USBD_OK; +} + +/** +* @brief USBD_DevConnected +* Handle device connection event +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_DevConnected(USBD_HandleTypeDef *pdev) +{ + /* Prevent unused argument compilation warning */ + UNUSED(pdev); + + return USBD_OK; +} + +/** +* @brief USBD_DevDisconnected +* Handle device disconnection event +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_LL_DevDisconnected(USBD_HandleTypeDef *pdev) +{ + /* Free Class Resources */ + pdev->dev_state = USBD_STATE_DEFAULT; + + if (pdev->pClass != NULL) + { + pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + } + + return USBD_OK; +} +/** +* @} +*/ + + +/** +* @} +*/ + + +/** +* @} +*/ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c new file mode 100644 index 000000000..c31d40e0a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c @@ -0,0 +1,944 @@ +/** + ****************************************************************************** + * @file usbd_req.c + * @author MCD Application Team + * @brief This file provides the standard USB requests following chapter 9. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_ctlreq.h" +#include "usbd_ioreq.h" + + +/** @addtogroup STM32_USBD_STATE_DEVICE_LIBRARY + * @{ + */ + + +/** @defgroup USBD_REQ + * @brief USB standard requests module + * @{ + */ + +/** @defgroup USBD_REQ_Private_TypesDefinitions + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_REQ_Private_Defines + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_REQ_Private_Macros + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_REQ_Private_Variables + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_REQ_Private_FunctionPrototypes + * @{ + */ +static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static void USBD_SetAddress(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static USBD_StatusTypeDef USBD_SetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static void USBD_GetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static void USBD_GetStatus(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static void USBD_SetFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static void USBD_ClrFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req); +static uint8_t USBD_GetLen(uint8_t *buf); + +/** + * @} + */ + + +/** @defgroup USBD_REQ_Private_Functions + * @{ + */ + + +/** +* @brief USBD_StdDevReq +* Handle standard usb device requests +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +USBD_StatusTypeDef USBD_StdDevReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + USBD_StatusTypeDef ret = USBD_OK; + + switch (req->bmRequest & USB_REQ_TYPE_MASK) + { + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); + break; + + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) + { + case USB_REQ_GET_DESCRIPTOR: + USBD_GetDescriptor(pdev, req); + break; + + case USB_REQ_SET_ADDRESS: + USBD_SetAddress(pdev, req); + break; + + case USB_REQ_SET_CONFIGURATION: + ret = USBD_SetConfig(pdev, req); + break; + + case USB_REQ_GET_CONFIGURATION: + USBD_GetConfig(pdev, req); + break; + + case USB_REQ_GET_STATUS: + USBD_GetStatus(pdev, req); + break; + + case USB_REQ_SET_FEATURE: + USBD_SetFeature(pdev, req); + break; + + case USB_REQ_CLEAR_FEATURE: + USBD_ClrFeature(pdev, req); + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + + return ret; +} + +/** +* @brief USBD_StdItfReq +* Handle standard usb interface requests +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +USBD_StatusTypeDef USBD_StdItfReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + USBD_StatusTypeDef ret = USBD_OK; + + switch (req->bmRequest & USB_REQ_TYPE_MASK) + { + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + case USB_REQ_TYPE_STANDARD: + switch (pdev->dev_state) + { + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + case USBD_STATE_CONFIGURED: + + if (LOBYTE(req->wIndex) <= USBD_MAX_NUM_INTERFACES) + { + ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); + + if ((req->wLength == 0U) && (ret == USBD_OK)) + { + (void)USBD_CtlSendStatus(pdev); + } + } + else + { + USBD_CtlError(pdev, req); + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + + return ret; +} + +/** +* @brief USBD_StdEPReq +* Handle standard usb endpoint requests +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +USBD_StatusTypeDef USBD_StdEPReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + USBD_EndpointTypeDef *pep; + uint8_t ep_addr; + USBD_StatusTypeDef ret = USBD_OK; + ep_addr = LOBYTE(req->wIndex); + + switch (req->bmRequest & USB_REQ_TYPE_MASK) + { + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); + break; + + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) + { + case USB_REQ_SET_FEATURE: + switch (pdev->dev_state) + { + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + (void)USBD_LL_StallEP(pdev, 0x80U); + } + else + { + USBD_CtlError(pdev, req); + } + break; + + case USBD_STATE_CONFIGURED: + if (req->wValue == USB_FEATURE_EP_HALT) + { + if ((ep_addr != 0x00U) && (ep_addr != 0x80U) && (req->wLength == 0x00U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + } + } + (void)USBD_CtlSendStatus(pdev); + + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + case USB_REQ_CLEAR_FEATURE: + + switch (pdev->dev_state) + { + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + (void)USBD_LL_StallEP(pdev, 0x80U); + } + else + { + USBD_CtlError(pdev, req); + } + break; + + case USBD_STATE_CONFIGURED: + if (req->wValue == USB_FEATURE_EP_HALT) + { + if ((ep_addr & 0x7FU) != 0x00U) + { + (void)USBD_LL_ClearStallEP(pdev, ep_addr); + } + (void)USBD_CtlSendStatus(pdev); + (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + case USB_REQ_GET_STATUS: + switch (pdev->dev_state) + { + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + USBD_CtlError(pdev, req); + break; + } + pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ + &pdev->ep_out[ep_addr & 0x7FU]; + + pep->status = 0x0000U; + + (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); + break; + + case USBD_STATE_CONFIGURED: + if ((ep_addr & 0x80U) == 0x80U) + { + if (pdev->ep_in[ep_addr & 0xFU].is_used == 0U) + { + USBD_CtlError(pdev, req); + break; + } + } + else + { + if (pdev->ep_out[ep_addr & 0xFU].is_used == 0U) + { + USBD_CtlError(pdev, req); + break; + } + } + + pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ + &pdev->ep_out[ep_addr & 0x7FU]; + + if ((ep_addr == 0x00U) || (ep_addr == 0x80U)) + { + pep->status = 0x0000U; + } + else if (USBD_LL_IsStallEP(pdev, ep_addr) != 0U) + { + pep->status = 0x0001U; + } + else + { + pep->status = 0x0000U; + } + + (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } + + return ret; +} + + +/** +* @brief USBD_GetDescriptor +* Handle Get Descriptor requests +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + uint16_t len = 0U; + uint8_t *pbuf = NULL; + uint8_t err = 0U; + + switch (req->wValue >> 8) + { +#if ((USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1U)) + case USB_DESC_TYPE_BOS: + if (pdev->pDesc->GetBOSDescriptor != NULL) + { + pbuf = pdev->pDesc->GetBOSDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; +#endif + case USB_DESC_TYPE_DEVICE: + pbuf = pdev->pDesc->GetDeviceDescriptor(pdev->dev_speed, &len); + break; + + case USB_DESC_TYPE_CONFIGURATION: + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + pbuf = pdev->pClass->GetHSConfigDescriptor(&len); + pbuf[1] = USB_DESC_TYPE_CONFIGURATION; + } + else + { + pbuf = pdev->pClass->GetFSConfigDescriptor(&len); + pbuf[1] = USB_DESC_TYPE_CONFIGURATION; + } + break; + + case USB_DESC_TYPE_STRING: + switch ((uint8_t)(req->wValue)) + { + case USBD_IDX_LANGID_STR: + if (pdev->pDesc->GetLangIDStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetLangIDStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_MFC_STR: + if (pdev->pDesc->GetManufacturerStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetManufacturerStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_PRODUCT_STR: + if (pdev->pDesc->GetProductStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetProductStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_SERIAL_STR: + if (pdev->pDesc->GetSerialStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetSerialStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_CONFIG_STR: + if (pdev->pDesc->GetConfigurationStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetConfigurationStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_INTERFACE_STR: + if (pdev->pDesc->GetInterfaceStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetInterfaceStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + default: +#if (USBD_SUPPORT_USER_STRING_DESC == 1U) + if (pdev->pClass->GetUsrStrDescriptor != NULL) + { + pbuf = pdev->pClass->GetUsrStrDescriptor(pdev, (req->wValue), &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } +#elif (USBD_CLASS_USER_STRING_DESC == 1U) + if (pdev->pDesc->GetUserStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetUserStrDescriptor(pdev->dev_speed, (req->wValue), &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } +#else + USBD_CtlError(pdev, req); + err++; +#endif + break; + } + break; + + case USB_DESC_TYPE_DEVICE_QUALIFIER: + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + pbuf = pdev->pClass->GetDeviceQualifierDescriptor(&len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION: + if (pdev->dev_speed == USBD_SPEED_HIGH) + { + pbuf = pdev->pClass->GetOtherSpeedConfigDescriptor(&len); + pbuf[1] = USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION; + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + default: + USBD_CtlError(pdev, req); + err++; + break; + } + + if (err != 0U) + { + return; + } + else + { + if (req->wLength != 0U) + { + if (len != 0U) + { + len = MIN(len, req->wLength); + (void)USBD_CtlSendData(pdev, pbuf, len); + } + else + { + USBD_CtlError(pdev, req); + } + } + else + { + (void)USBD_CtlSendStatus(pdev); + } + } +} + +/** +* @brief USBD_SetAddress +* Set device address +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_SetAddress(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + uint8_t dev_addr; + + if ((req->wIndex == 0U) && (req->wLength == 0U) && (req->wValue < 128U)) + { + dev_addr = (uint8_t)(req->wValue) & 0x7FU; + + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + USBD_CtlError(pdev, req); + } + else + { + pdev->dev_address = dev_addr; + (void)USBD_LL_SetUSBAddress(pdev, dev_addr); + (void)USBD_CtlSendStatus(pdev); + + if (dev_addr != 0U) + { + pdev->dev_state = USBD_STATE_ADDRESSED; + } + else + { + pdev->dev_state = USBD_STATE_DEFAULT; + } + } + } + else + { + USBD_CtlError(pdev, req); + } +} + +/** +* @brief USBD_SetConfig +* Handle Set device configuration request +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static USBD_StatusTypeDef USBD_SetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + USBD_StatusTypeDef ret = USBD_OK; + static uint8_t cfgidx; + + cfgidx = (uint8_t)(req->wValue); + + if (cfgidx > USBD_MAX_NUM_CONFIGURATION) + { + USBD_CtlError(pdev, req); + return USBD_FAIL; + } + + switch (pdev->dev_state) + { + case USBD_STATE_ADDRESSED: + if (cfgidx != 0U) + { + pdev->dev_config = cfgidx; + + ret = USBD_SetClassConfig(pdev, cfgidx); + + if (ret != USBD_OK) + { + USBD_CtlError(pdev, req); + } + else + { + (void)USBD_CtlSendStatus(pdev); + pdev->dev_state = USBD_STATE_CONFIGURED; + } + } + else + { + (void)USBD_CtlSendStatus(pdev); + } + break; + + case USBD_STATE_CONFIGURED: + if (cfgidx == 0U) + { + pdev->dev_state = USBD_STATE_ADDRESSED; + pdev->dev_config = cfgidx; + (void)USBD_ClrClassConfig(pdev, cfgidx); + (void)USBD_CtlSendStatus(pdev); + } + else if (cfgidx != pdev->dev_config) + { + /* Clear old configuration */ + (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); + + /* set new configuration */ + pdev->dev_config = cfgidx; + + ret = USBD_SetClassConfig(pdev, cfgidx); + + if (ret != USBD_OK) + { + USBD_CtlError(pdev, req); + (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); + pdev->dev_state = USBD_STATE_ADDRESSED; + } + else + { + (void)USBD_CtlSendStatus(pdev); + } + } + else + { + (void)USBD_CtlSendStatus(pdev); + } + break; + + default: + USBD_CtlError(pdev, req); + (void)USBD_ClrClassConfig(pdev, cfgidx); + ret = USBD_FAIL; + break; + } + + return ret; +} + +/** +* @brief USBD_GetConfig +* Handle Get device configuration request +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_GetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + if (req->wLength != 1U) + { + USBD_CtlError(pdev, req); + } + else + { + switch (pdev->dev_state) + { + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + pdev->dev_default_config = 0U; + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_default_config, 1U); + break; + + case USBD_STATE_CONFIGURED: + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config, 1U); + break; + + default: + USBD_CtlError(pdev, req); + break; + } + } +} + +/** +* @brief USBD_GetStatus +* Handle Get Status request +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_GetStatus(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + switch (pdev->dev_state) + { + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + case USBD_STATE_CONFIGURED: + if (req->wLength != 0x2U) + { + USBD_CtlError(pdev, req); + break; + } + +#if (USBD_SELF_POWERED == 1U) + pdev->dev_config_status = USB_CONFIG_SELF_POWERED; +#else + pdev->dev_config_status = 0U; +#endif + + if (pdev->dev_remote_wakeup != 0U) + { + pdev->dev_config_status |= USB_CONFIG_REMOTE_WAKEUP; + } + + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config_status, 2U); + break; + + default: + USBD_CtlError(pdev, req); + break; + } +} + + +/** +* @brief USBD_SetFeature +* Handle Set device feature request +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_SetFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + if (req->wValue == USB_FEATURE_REMOTE_WAKEUP) + { + pdev->dev_remote_wakeup = 1U; + (void)USBD_CtlSendStatus(pdev); + } +} + + +/** +* @brief USBD_ClrFeature +* Handle clear device feature request +* @param pdev: device instance +* @param req: usb request +* @retval status +*/ +static void USBD_ClrFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + switch (pdev->dev_state) + { + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + case USBD_STATE_CONFIGURED: + if (req->wValue == USB_FEATURE_REMOTE_WAKEUP) + { + pdev->dev_remote_wakeup = 0U; + (void)USBD_CtlSendStatus(pdev); + } + break; + + default: + USBD_CtlError(pdev, req); + break; + } +} + +/** +* @brief USBD_ParseSetupRequest +* Copy buffer into setup structure +* @param pdev: device instance +* @param req: usb request +* @retval None +*/ + +void USBD_ParseSetupRequest(USBD_SetupReqTypedef *req, uint8_t *pdata) +{ + uint8_t *pbuff = pdata; + + req->bmRequest = *(uint8_t *)(pbuff); + + pbuff++; + req->bRequest = *(uint8_t *)(pbuff); + + pbuff++; + req->wValue = SWAPBYTE(pbuff); + + pbuff++; + pbuff++; + req->wIndex = SWAPBYTE(pbuff); + + pbuff++; + pbuff++; + req->wLength = SWAPBYTE(pbuff); +} + +/** +* @brief USBD_CtlError +* Handle USB low level Error +* @param pdev: device instance +* @param req: usb request +* @retval None +*/ + +void USBD_CtlError(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) +{ + UNUSED(req); + + (void)USBD_LL_StallEP(pdev, 0x80U); + (void)USBD_LL_StallEP(pdev, 0U); +} + + +/** + * @brief USBD_GetString + * Convert Ascii string into unicode one + * @param desc : descriptor buffer + * @param unicode : Formatted string buffer (unicode) + * @param len : descriptor length + * @retval None + */ +void USBD_GetString(uint8_t *desc, uint8_t *unicode, uint16_t *len) +{ + uint8_t idx = 0U; + uint8_t *pdesc; + + if (desc == NULL) + { + return; + } + + pdesc = desc; + *len = ((uint16_t)USBD_GetLen(pdesc) * 2U) + 2U; + + unicode[idx] = *(uint8_t *)len; + idx++; + unicode[idx] = USB_DESC_TYPE_STRING; + idx++; + + while (*pdesc != (uint8_t)'\0') + { + unicode[idx] = *pdesc; + pdesc++; + idx++; + + unicode[idx] = 0U; + idx++; + } +} + +/** + * @brief USBD_GetLen + * return the string length + * @param buf : pointer to the ascii string buffer + * @retval string length + */ +static uint8_t USBD_GetLen(uint8_t *buf) +{ + uint8_t len = 0U; + uint8_t *pbuff = buf; + + while (*pbuff != (uint8_t)'\0') + { + len++; + pbuff++; + } + + return len; +} +/** + * @} + */ + + +/** + * @} + */ + + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c new file mode 100644 index 000000000..8ac5491ff --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c @@ -0,0 +1,216 @@ +/** + ****************************************************************************** + * @file usbd_ioreq.c + * @author MCD Application Team + * @brief This file provides the IO requests APIs for control endpoints. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2015 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_ioreq.h" + +/** @addtogroup STM32_USB_DEVICE_LIBRARY + * @{ + */ + + +/** @defgroup USBD_IOREQ + * @brief control I/O requests module + * @{ + */ + +/** @defgroup USBD_IOREQ_Private_TypesDefinitions + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Private_Defines + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Private_Macros + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Private_Variables + * @{ + */ + +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Private_FunctionPrototypes + * @{ + */ +/** + * @} + */ + + +/** @defgroup USBD_IOREQ_Private_Functions + * @{ + */ + +/** +* @brief USBD_CtlSendData +* send data on the ctl pipe +* @param pdev: device instance +* @param buff: pointer to data buffer +* @param len: length of data to be sent +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlSendData(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len) +{ + /* Set EP0 State */ + pdev->ep0_state = USBD_EP0_DATA_IN; + pdev->ep_in[0].total_length = len; + pdev->ep_in[0].rem_length = len; + + /* Start the transfer */ + (void)USBD_LL_Transmit(pdev, 0x00U, pbuf, len); + + return USBD_OK; +} + +/** +* @brief USBD_CtlContinueSendData +* continue sending data on the ctl pipe +* @param pdev: device instance +* @param buff: pointer to data buffer +* @param len: length of data to be sent +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlContinueSendData(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len) +{ + /* Start the next transfer */ + (void)USBD_LL_Transmit(pdev, 0x00U, pbuf, len); + + return USBD_OK; +} + +/** +* @brief USBD_CtlPrepareRx +* receive data on the ctl pipe +* @param pdev: device instance +* @param buff: pointer to data buffer +* @param len: length of data to be received +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlPrepareRx(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len) +{ + /* Set EP0 State */ + pdev->ep0_state = USBD_EP0_DATA_OUT; + pdev->ep_out[0].total_length = len; + pdev->ep_out[0].rem_length = len; + + /* Start the transfer */ + (void)USBD_LL_PrepareReceive(pdev, 0U, pbuf, len); + + return USBD_OK; +} + +/** +* @brief USBD_CtlContinueRx +* continue receive data on the ctl pipe +* @param pdev: device instance +* @param buff: pointer to data buffer +* @param len: length of data to be received +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlContinueRx(USBD_HandleTypeDef *pdev, + uint8_t *pbuf, uint32_t len) +{ + (void)USBD_LL_PrepareReceive(pdev, 0U, pbuf, len); + + return USBD_OK; +} + +/** +* @brief USBD_CtlSendStatus +* send zero lzngth packet on the ctl pipe +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlSendStatus(USBD_HandleTypeDef *pdev) +{ + /* Set EP0 State */ + pdev->ep0_state = USBD_EP0_STATUS_IN; + + /* Start the transfer */ + (void)USBD_LL_Transmit(pdev, 0x00U, NULL, 0U); + + return USBD_OK; +} + +/** +* @brief USBD_CtlReceiveStatus +* receive zero lzngth packet on the ctl pipe +* @param pdev: device instance +* @retval status +*/ +USBD_StatusTypeDef USBD_CtlReceiveStatus(USBD_HandleTypeDef *pdev) +{ + /* Set EP0 State */ + pdev->ep0_state = USBD_EP0_STATUS_OUT; + + /* Start the transfer */ + (void)USBD_LL_PrepareReceive(pdev, 0U, NULL, 0U); + + return USBD_OK; +} + +/** +* @brief USBD_GetRxCount +* returns the received data length +* @param pdev: device instance +* @param ep_addr: endpoint address +* @retval Rx Data blength +*/ +uint32_t USBD_GetRxCount(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + return USBD_LL_GetRxDataSize(pdev, ep_addr); +} + +/** + * @} + */ + + +/** + * @} + */ + + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.c new file mode 100644 index 000000000..5cf8edf46 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.c @@ -0,0 +1,141 @@ +/*-----------------------------------------------------------------------*/ +/* Low level disk I/O module skeleton for FatFs (C)ChaN, 2017 */ +/* */ +/* Portions COPYRIGHT 2017 STMicroelectronics */ +/* Portions Copyright (C) 2017, ChaN, all right reserved */ +/*-----------------------------------------------------------------------*/ +/* If a working storage control module is available, it should be */ +/* attached to the FatFs via a glue function rather than modifying it. */ +/* This is an example of glue functions to attach various existing */ +/* storage control modules to the FatFs module with a defined API. */ +/*-----------------------------------------------------------------------*/ + +/* Includes ------------------------------------------------------------------*/ +#include "diskio.h" +#include "ff_gen_drv.h" + +#if defined ( __GNUC__ ) +#ifndef __weak +#define __weak __attribute__((weak)) +#endif +#endif + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +extern Disk_drvTypeDef disk; + +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** + * @brief Gets Disk Status + * @param pdrv: Physical drive number (0..) + * @retval DSTATUS: Operation status + */ +DSTATUS disk_status ( + BYTE pdrv /* Physical drive number to identify the drive */ +) +{ + DSTATUS stat; + + stat = disk.drv[pdrv]->disk_status(disk.lun[pdrv]); + return stat; +} + +/** + * @brief Initializes a Drive + * @param pdrv: Physical drive number (0..) + * @retval DSTATUS: Operation status + */ +DSTATUS disk_initialize ( + BYTE pdrv /* Physical drive nmuber to identify the drive */ +) +{ + DSTATUS stat = RES_OK; + + if(disk.is_initialized[pdrv] == 0) + { + disk.is_initialized[pdrv] = 1; + stat = disk.drv[pdrv]->disk_initialize(disk.lun[pdrv]); + } + return stat; +} + +/** + * @brief Reads Sector(s) + * @param pdrv: Physical drive number (0..) + * @param *buff: Data buffer to store read data + * @param sector: Sector address (LBA) + * @param count: Number of sectors to read (1..128) + * @retval DRESULT: Operation result + */ +DRESULT disk_read ( + BYTE pdrv, /* Physical drive nmuber to identify the drive */ + BYTE *buff, /* Data buffer to store read data */ + DWORD sector, /* Sector address in LBA */ + UINT count /* Number of sectors to read */ +) +{ + DRESULT res; + + res = disk.drv[pdrv]->disk_read(disk.lun[pdrv], buff, sector, count); + return res; +} + +/** + * @brief Writes Sector(s) + * @param pdrv: Physical drive number (0..) + * @param *buff: Data to be written + * @param sector: Sector address (LBA) + * @param count: Number of sectors to write (1..128) + * @retval DRESULT: Operation result + */ +#if _USE_WRITE == 1 +DRESULT disk_write ( + BYTE pdrv, /* Physical drive nmuber to identify the drive */ + const BYTE *buff, /* Data to be written */ + DWORD sector, /* Sector address in LBA */ + UINT count /* Number of sectors to write */ +) +{ + DRESULT res; + + res = disk.drv[pdrv]->disk_write(disk.lun[pdrv], buff, sector, count); + return res; +} +#endif /* _USE_WRITE == 1 */ + +/** + * @brief I/O control operation + * @param pdrv: Physical drive number (0..) + * @param cmd: Control code + * @param *buff: Buffer to send/receive control data + * @retval DRESULT: Operation result + */ +#if _USE_IOCTL == 1 +DRESULT disk_ioctl ( + BYTE pdrv, /* Physical drive nmuber (0..) */ + BYTE cmd, /* Control code */ + void *buff /* Buffer to send/receive control data */ +) +{ + DRESULT res; + + res = disk.drv[pdrv]->disk_ioctl(disk.lun[pdrv], cmd, buff); + return res; +} +#endif /* _USE_IOCTL == 1 */ + +/** + * @brief Gets Time from RTC + * @param None + * @retval Time in DWORD + */ +__weak DWORD get_fattime (void) +{ + return 0; +} + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.h new file mode 100644 index 000000000..5b61e570b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/diskio.h @@ -0,0 +1,80 @@ +/*-----------------------------------------------------------------------/ +/ Low level disk interface modlue include file (C)ChaN, 2014 / +/-----------------------------------------------------------------------*/ + +#ifndef _DISKIO_DEFINED +#define _DISKIO_DEFINED + +#ifdef __cplusplus +extern "C" { +#endif + +#define _USE_WRITE 1 /* 1: Enable disk_write function */ +#define _USE_IOCTL 1 /* 1: Enable disk_ioctl function */ + +#include "integer.h" + + +/* Status of Disk Functions */ +typedef BYTE DSTATUS; + +/* Results of Disk Functions */ +typedef enum { + RES_OK = 0, /* 0: Successful */ + RES_ERROR, /* 1: R/W Error */ + RES_WRPRT, /* 2: Write Protected */ + RES_NOTRDY, /* 3: Not Ready */ + RES_PARERR /* 4: Invalid Parameter */ +} DRESULT; + + +/*---------------------------------------*/ +/* Prototypes for disk control functions */ + + +DSTATUS disk_initialize (BYTE pdrv); +DSTATUS disk_status (BYTE pdrv); +DRESULT disk_read (BYTE pdrv, BYTE* buff, DWORD sector, UINT count); +DRESULT disk_write (BYTE pdrv, const BYTE* buff, DWORD sector, UINT count); +DRESULT disk_ioctl (BYTE pdrv, BYTE cmd, void* buff); +DWORD get_fattime (void); + +/* Disk Status Bits (DSTATUS) */ + +#define STA_NOINIT 0x01 /* Drive not initialized */ +#define STA_NODISK 0x02 /* No medium in the drive */ +#define STA_PROTECT 0x04 /* Write protected */ + + +/* Command code for disk_ioctrl fucntion */ + +/* Generic command (Used by FatFs) */ +#define CTRL_SYNC 0 /* Complete pending write process (needed at _FS_READONLY == 0) */ +#define GET_SECTOR_COUNT 1 /* Get media size (needed at _USE_MKFS == 1) */ +#define GET_SECTOR_SIZE 2 /* Get sector size (needed at _MAX_SS != _MIN_SS) */ +#define GET_BLOCK_SIZE 3 /* Get erase block size (needed at _USE_MKFS == 1) */ +#define CTRL_TRIM 4 /* Inform device that the data on the block of sectors is no longer used (needed at _USE_TRIM == 1) */ + +/* Generic command (Not used by FatFs) */ +#define CTRL_POWER 5 /* Get/Set power status */ +#define CTRL_LOCK 6 /* Lock/Unlock media removal */ +#define CTRL_EJECT 7 /* Eject media */ +#define CTRL_FORMAT 8 /* Create physical format on the media */ + +/* MMC/SDC specific ioctl command */ +#define MMC_GET_TYPE 10 /* Get card type */ +#define MMC_GET_CSD 11 /* Get CSD */ +#define MMC_GET_CID 12 /* Get CID */ +#define MMC_GET_OCR 13 /* Get OCR */ +#define MMC_GET_SDSTAT 14 /* Get SD status */ + +/* ATA/CF specific ioctl command */ +#define ATA_GET_REV 20 /* Get F/W revision */ +#define ATA_GET_MODEL 21 /* Get model name */ +#define ATA_GET_SN 22 /* Get serial number */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.c new file mode 100644 index 000000000..b0bd43662 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.c @@ -0,0 +1,6140 @@ +/*----------------------------------------------------------------------------/ +/ FatFs - Generic FAT file system module R0.12c / +/-----------------------------------------------------------------------------/ +/ +/ Copyright (C) 2017, ChaN, all right reserved. +/ +/ FatFs module is an open source software. Redistribution and use of FatFs in +/ source and binary forms, with or without modification, are permitted provided +/ that the following condition is met: +/ +/ 1. Redistributions of source code must retain the above copyright notice, +/ this condition and the following disclaimer. +/ +/ This software is provided by the copyright holder and contributors "AS IS" +/ and any warranties related to this software are DISCLAIMED. +/ The copyright owner or contributors be NOT LIABLE for any damages caused +/ by use of this software. +/----------------------------------------------------------------------------*/ + + +#include "ff.h" /* Declarations of FatFs API */ +#include "diskio.h" /* Declarations of device I/O functions */ + + +/*-------------------------------------------------------------------------- + + Module Private Definitions + +---------------------------------------------------------------------------*/ + +#if _FATFS != 68300 /* Revision ID */ +#error Wrong include file (ff.h). +#endif + + +/* DBCS code ranges and SBCS upper conversion tables */ + +#if _CODE_PAGE == 932 /* Japanese Shift-JIS */ +#define _DF1S 0x81 /* DBC 1st byte range 1 start */ +#define _DF1E 0x9F /* DBC 1st byte range 1 end */ +#define _DF2S 0xE0 /* DBC 1st byte range 2 start */ +#define _DF2E 0xFC /* DBC 1st byte range 2 end */ +#define _DS1S 0x40 /* DBC 2nd byte range 1 start */ +#define _DS1E 0x7E /* DBC 2nd byte range 1 end */ +#define _DS2S 0x80 /* DBC 2nd byte range 2 start */ +#define _DS2E 0xFC /* DBC 2nd byte range 2 end */ + +#elif _CODE_PAGE == 936 /* Simplified Chinese GBK */ +#define _DF1S 0x81 +#define _DF1E 0xFE +#define _DS1S 0x40 +#define _DS1E 0x7E +#define _DS2S 0x80 +#define _DS2E 0xFE + +#elif _CODE_PAGE == 949 /* Korean */ +#define _DF1S 0x81 +#define _DF1E 0xFE +#define _DS1S 0x41 +#define _DS1E 0x5A +#define _DS2S 0x61 +#define _DS2E 0x7A +#define _DS3S 0x81 +#define _DS3E 0xFE + +#elif _CODE_PAGE == 950 /* Traditional Chinese Big5 */ +#define _DF1S 0x81 +#define _DF1E 0xFE +#define _DS1S 0x40 +#define _DS1E 0x7E +#define _DS2S 0xA1 +#define _DS2E 0xFE + +#elif _CODE_PAGE == 437 /* U.S. */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x45,0x41,0x8E,0x41,0x8F,0x80,0x45,0x45,0x45,0x49,0x49,0x49,0x8E,0x8F, \ + 0x90,0x92,0x92,0x4F,0x99,0x4F,0x55,0x55,0x59,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x41,0x49,0x4F,0x55,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 720 /* Arabic */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 737 /* Greek */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x92,0x92,0x93,0x94,0x95,0x96,0x97,0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87, \ + 0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,0x90,0x91,0xAA,0x92,0x93,0x94,0x95,0x96, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0x97,0xEA,0xEB,0xEC,0xE4,0xED,0xEE,0xEF,0xF5,0xF0,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 771 /* KBL */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDC,0xDE,0xDE, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0xF0,0xF0,0xF2,0xF2,0xF4,0xF4,0xF6,0xF6,0xF8,0xF8,0xFA,0xFA,0xFC,0xFC,0xFE,0xFF} + +#elif _CODE_PAGE == 775 /* Baltic */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x91,0xA0,0x8E,0x95,0x8F,0x80,0xAD,0xED,0x8A,0x8A,0xA1,0x8D,0x8E,0x8F, \ + 0x90,0x92,0x92,0xE2,0x99,0x95,0x96,0x97,0x97,0x99,0x9A,0x9D,0x9C,0x9D,0x9E,0x9F, \ + 0xA0,0xA1,0xE0,0xA3,0xA3,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xB5,0xB6,0xB7,0xB8,0xBD,0xBE,0xC6,0xC7,0xA5,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE5,0xE5,0xE6,0xE3,0xE8,0xE8,0xEA,0xEA,0xEE,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 850 /* Latin 1 */ +#define _DF1S 0 +#define _EXCVT {0x43,0x55,0x45,0x41,0x41,0x41,0x41,0x43,0x45,0x45,0x45,0x49,0x49,0x49,0x41,0x41, \ + 0x45,0x92,0x92,0x4F,0x4F,0x4F,0x55,0x55,0x59,0x4F,0x55,0x4F,0x9C,0x4F,0x9E,0x9F, \ + 0x41,0x49,0x4F,0x55,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0x41,0x41,0x41,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0x41,0x41,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD1,0xD1,0x45,0x45,0x45,0x49,0x49,0x49,0x49,0xD9,0xDA,0xDB,0xDC,0xDD,0x49,0xDF, \ + 0x4F,0xE1,0x4F,0x4F,0x4F,0x4F,0xE6,0xE8,0xE8,0x55,0x55,0x55,0x59,0x59,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 852 /* Latin 2 */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x90,0xB6,0x8E,0xDE,0x8F,0x80,0x9D,0xD3,0x8A,0x8A,0xD7,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x91,0xE2,0x99,0x95,0x95,0x97,0x97,0x99,0x9A,0x9B,0x9B,0x9D,0x9E,0xAC, \ + 0xB5,0xD6,0xE0,0xE9,0xA4,0xA4,0xA6,0xA6,0xA8,0xA8,0xAA,0x8D,0xAC,0xB8,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBD,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC6,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD1,0xD1,0xD2,0xD3,0xD2,0xD5,0xD6,0xD7,0xB7,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE3,0xD5,0xE6,0xE6,0xE8,0xE9,0xE8,0xEB,0xED,0xED,0xDD,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xEB,0xFC,0xFC,0xFE,0xFF} + +#elif _CODE_PAGE == 855 /* Cyrillic */ +#define _DF1S 0 +#define _EXCVT {0x81,0x81,0x83,0x83,0x85,0x85,0x87,0x87,0x89,0x89,0x8B,0x8B,0x8D,0x8D,0x8F,0x8F, \ + 0x91,0x91,0x93,0x93,0x95,0x95,0x97,0x97,0x99,0x99,0x9B,0x9B,0x9D,0x9D,0x9F,0x9F, \ + 0xA1,0xA1,0xA3,0xA3,0xA5,0xA5,0xA7,0xA7,0xA9,0xA9,0xAB,0xAB,0xAD,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB6,0xB6,0xB8,0xB8,0xB9,0xBA,0xBB,0xBC,0xBE,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC7,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD1,0xD1,0xD3,0xD3,0xD5,0xD5,0xD7,0xD7,0xDD,0xD9,0xDA,0xDB,0xDC,0xDD,0xE0,0xDF, \ + 0xE0,0xE2,0xE2,0xE4,0xE4,0xE6,0xE6,0xE8,0xE8,0xEA,0xEA,0xEC,0xEC,0xEE,0xEE,0xEF, \ + 0xF0,0xF2,0xF2,0xF4,0xF4,0xF6,0xF6,0xF8,0xF8,0xFA,0xFA,0xFC,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 857 /* Turkish */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x90,0xB6,0x8E,0xB7,0x8F,0x80,0xD2,0xD3,0xD4,0xD8,0xD7,0x49,0x8E,0x8F, \ + 0x90,0x92,0x92,0xE2,0x99,0xE3,0xEA,0xEB,0x98,0x99,0x9A,0x9D,0x9C,0x9D,0x9E,0x9E, \ + 0xB5,0xD6,0xE0,0xE9,0xA5,0xA5,0xA6,0xA6,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC7,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0x49,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE5,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xDE,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 860 /* Portuguese */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x90,0x8F,0x8E,0x91,0x86,0x80,0x89,0x89,0x92,0x8B,0x8C,0x98,0x8E,0x8F, \ + 0x90,0x91,0x92,0x8C,0x99,0xA9,0x96,0x9D,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x86,0x8B,0x9F,0x96,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 861 /* Icelandic */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x90,0x41,0x8E,0x41,0x8F,0x80,0x45,0x45,0x45,0x8B,0x8B,0x8D,0x8E,0x8F, \ + 0x90,0x92,0x92,0x4F,0x99,0x8D,0x55,0x97,0x97,0x99,0x9A,0x9D,0x9C,0x9D,0x9E,0x9F, \ + 0xA4,0xA5,0xA6,0xA7,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 862 /* Hebrew */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x41,0x49,0x4F,0x55,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 863 /* Canadian-French */ +#define _DF1S 0 +#define _EXCVT {0x43,0x55,0x45,0x41,0x41,0x41,0x86,0x43,0x45,0x45,0x45,0x49,0x49,0x8D,0x41,0x8F, \ + 0x45,0x45,0x45,0x4F,0x45,0x49,0x55,0x55,0x98,0x4F,0x55,0x9B,0x9C,0x55,0x55,0x9F, \ + 0xA0,0xA1,0x4F,0x55,0xA4,0xA5,0xA6,0xA7,0x49,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 864 /* Arabic */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x45,0x41,0x8E,0x41,0x8F,0x80,0x45,0x45,0x45,0x49,0x49,0x49,0x8E,0x8F, \ + 0x90,0x92,0x92,0x4F,0x99,0x4F,0x55,0x55,0x59,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x41,0x49,0x4F,0x55,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 865 /* Nordic */ +#define _DF1S 0 +#define _EXCVT {0x80,0x9A,0x90,0x41,0x8E,0x41,0x8F,0x80,0x45,0x45,0x45,0x49,0x49,0x49,0x8E,0x8F, \ + 0x90,0x92,0x92,0x4F,0x99,0x4F,0x55,0x55,0x59,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x41,0x49,0x4F,0x55,0xA5,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, \ + 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 866 /* Russian */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F, \ + 0xF0,0xF0,0xF2,0xF2,0xF4,0xF4,0xF6,0xF6,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF} + +#elif _CODE_PAGE == 869 /* Greek 2 */ +#define _DF1S 0 +#define _EXCVT {0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F, \ + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x86,0x9C,0x8D,0x8F,0x90, \ + 0x91,0x90,0x92,0x95,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF, \ + 0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF, \ + 0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF, \ + 0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xA4,0xA5,0xA6,0xD9,0xDA,0xDB,0xDC,0xA7,0xA8,0xDF, \ + 0xA9,0xAA,0xAC,0xAD,0xB5,0xB6,0xB7,0xB8,0xBD,0xBE,0xC6,0xC7,0xCF,0xCF,0xD0,0xEF, \ + 0xF0,0xF1,0xD1,0xD2,0xD3,0xF5,0xD4,0xF7,0xF8,0xF9,0xD5,0x96,0x95,0x98,0xFE,0xFF} + +#elif _CODE_PAGE == 1 /* ASCII (for only non-LFN cfg) */ +#if _USE_LFN != 0 +#error Cannot enable LFN without valid code page. +#endif +#define _DF1S 0 + +#else +#error Unknown code page + +#endif + + +/* Character code support macros */ +#define IsUpper(c) (((c)>='A')&&((c)<='Z')) +#define IsLower(c) (((c)>='a')&&((c)<='z')) +#define IsDigit(c) (((c)>='0')&&((c)<='9')) + +#if _DF1S != 0 /* Code page is DBCS */ + +#ifdef _DF2S /* Two 1st byte areas */ +#define IsDBCS1(c) (((BYTE)(c) >= _DF1S && (BYTE)(c) <= _DF1E) || ((BYTE)(c) >= _DF2S && (BYTE)(c) <= _DF2E)) +#else /* One 1st byte area */ +#define IsDBCS1(c) ((BYTE)(c) >= _DF1S && (BYTE)(c) <= _DF1E) +#endif + +#ifdef _DS3S /* Three 2nd byte areas */ +#define IsDBCS2(c) (((BYTE)(c) >= _DS1S && (BYTE)(c) <= _DS1E) || ((BYTE)(c) >= _DS2S && (BYTE)(c) <= _DS2E) || ((BYTE)(c) >= _DS3S && (BYTE)(c) <= _DS3E)) +#else /* Two 2nd byte areas */ +#define IsDBCS2(c) (((BYTE)(c) >= _DS1S && (BYTE)(c) <= _DS1E) || ((BYTE)(c) >= _DS2S && (BYTE)(c) <= _DS2E)) +#endif + +#else /* Code page is SBCS */ + +#define IsDBCS1(c) 0 +#define IsDBCS2(c) 0 + +#endif /* _DF1S */ + + +/* Additional file attribute bits for internal use */ +#define AM_VOL 0x08 /* Volume label */ +#define AM_LFN 0x0F /* LFN entry */ +#define AM_MASK 0x3F /* Mask of defined bits */ + + +/* Additional file access control and file status flags for internal use */ +#define FA_SEEKEND 0x20 /* Seek to end of the file on file open */ +#define FA_MODIFIED 0x40 /* File has been modified */ +#define FA_DIRTY 0x80 /* FIL.buf[] needs to be written-back */ + + +/* Name status flags in fn[] */ +#define NSFLAG 11 /* Index of the name status byte */ +#define NS_LOSS 0x01 /* Out of 8.3 format */ +#define NS_LFN 0x02 /* Force to create LFN entry */ +#define NS_LAST 0x04 /* Last segment */ +#define NS_BODY 0x08 /* Lower case flag (body) */ +#define NS_EXT 0x10 /* Lower case flag (ext) */ +#define NS_DOT 0x20 /* Dot entry */ +#define NS_NOLFN 0x40 /* Do not find LFN */ +#define NS_NONAME 0x80 /* Not followed */ + + +/* Limits and boundaries */ +#define MAX_DIR 0x200000 /* Max size of FAT directory */ +#define MAX_DIR_EX 0x10000000 /* Max size of exFAT directory */ +#define MAX_FAT12 0xFF5 /* Max FAT12 clusters (differs from specs, but correct for real DOS/Windows behavior) */ +#define MAX_FAT16 0xFFF5 /* Max FAT16 clusters (differs from specs, but correct for real DOS/Windows behavior) */ +#define MAX_FAT32 0x0FFFFFF5 /* Max FAT32 clusters (not specified, practical limit) */ +#define MAX_EXFAT 0x7FFFFFFD /* Max exFAT clusters (differs from specs, implementation limit) */ + + +/* FatFs refers the FAT structure as simple byte array instead of structure member +/ because the C structure is not binary compatible between different platforms */ + +#define BS_JmpBoot 0 /* x86 jump instruction (3-byte) */ +#define BS_OEMName 3 /* OEM name (8-byte) */ +#define BPB_BytsPerSec 11 /* Sector size [byte] (WORD) */ +#define BPB_SecPerClus 13 /* Cluster size [sector] (BYTE) */ +#define BPB_RsvdSecCnt 14 /* Size of reserved area [sector] (WORD) */ +#define BPB_NumFATs 16 /* Number of FATs (BYTE) */ +#define BPB_RootEntCnt 17 /* Size of root directory area for FAT12/16 [entry] (WORD) */ +#define BPB_TotSec16 19 /* Volume size (16-bit) [sector] (WORD) */ +#define BPB_Media 21 /* Media descriptor byte (BYTE) */ +#define BPB_FATSz16 22 /* FAT size (16-bit) [sector] (WORD) */ +#define BPB_SecPerTrk 24 /* Track size for int13h [sector] (WORD) */ +#define BPB_NumHeads 26 /* Number of heads for int13h (WORD) */ +#define BPB_HiddSec 28 /* Volume offset from top of the drive (DWORD) */ +#define BPB_TotSec32 32 /* Volume size (32-bit) [sector] (DWORD) */ +#define BS_DrvNum 36 /* Physical drive number for int13h (BYTE) */ +#define BS_NTres 37 /* Error flag (BYTE) */ +#define BS_BootSig 38 /* Extended boot signature (BYTE) */ +#define BS_VolID 39 /* Volume serial number (DWORD) */ +#define BS_VolLab 43 /* Volume label string (8-byte) */ +#define BS_FilSysType 54 /* File system type string (8-byte) */ +#define BS_BootCode 62 /* Boot code (448-byte) */ +#define BS_55AA 510 /* Signature word (WORD) */ + +#define BPB_FATSz32 36 /* FAT32: FAT size [sector] (DWORD) */ +#define BPB_ExtFlags32 40 /* FAT32: Extended flags (WORD) */ +#define BPB_FSVer32 42 /* FAT32: File system version (WORD) */ +#define BPB_RootClus32 44 /* FAT32: Root directory cluster (DWORD) */ +#define BPB_FSInfo32 48 /* FAT32: Offset of FSINFO sector (WORD) */ +#define BPB_BkBootSec32 50 /* FAT32: Offset of backup boot sector (WORD) */ +#define BS_DrvNum32 64 /* FAT32: Physical drive number for int13h (BYTE) */ +#define BS_NTres32 65 /* FAT32: Error flag (BYTE) */ +#define BS_BootSig32 66 /* FAT32: Extended boot signature (BYTE) */ +#define BS_VolID32 67 /* FAT32: Volume serial number (DWORD) */ +#define BS_VolLab32 71 /* FAT32: Volume label string (8-byte) */ +#define BS_FilSysType32 82 /* FAT32: File system type string (8-byte) */ +#define BS_BootCode32 90 /* FAT32: Boot code (420-byte) */ + +#define BPB_ZeroedEx 11 /* exFAT: MBZ field (53-byte) */ +#define BPB_VolOfsEx 64 /* exFAT: Volume offset from top of the drive [sector] (QWORD) */ +#define BPB_TotSecEx 72 /* exFAT: Volume size [sector] (QWORD) */ +#define BPB_FatOfsEx 80 /* exFAT: FAT offset from top of the volume [sector] (DWORD) */ +#define BPB_FatSzEx 84 /* exFAT: FAT size [sector] (DWORD) */ +#define BPB_DataOfsEx 88 /* exFAT: Data offset from top of the volume [sector] (DWORD) */ +#define BPB_NumClusEx 92 /* exFAT: Number of clusters (DWORD) */ +#define BPB_RootClusEx 96 /* exFAT: Root directory start cluster (DWORD) */ +#define BPB_VolIDEx 100 /* exFAT: Volume serial number (DWORD) */ +#define BPB_FSVerEx 104 /* exFAT: File system version (WORD) */ +#define BPB_VolFlagEx 106 /* exFAT: Volume flags (BYTE) */ +#define BPB_ActFatEx 107 /* exFAT: Active FAT flags (BYTE) */ +#define BPB_BytsPerSecEx 108 /* exFAT: Log2 of sector size in unit of byte (BYTE) */ +#define BPB_SecPerClusEx 109 /* exFAT: Log2 of cluster size in unit of sector (BYTE) */ +#define BPB_NumFATsEx 110 /* exFAT: Number of FATs (BYTE) */ +#define BPB_DrvNumEx 111 /* exFAT: Physical drive number for int13h (BYTE) */ +#define BPB_PercInUseEx 112 /* exFAT: Percent in use (BYTE) */ +#define BPB_RsvdEx 113 /* exFAT: Reserved (7-byte) */ +#define BS_BootCodeEx 120 /* exFAT: Boot code (390-byte) */ + +#define DIR_Name 0 /* Short file name (11-byte) */ +#define DIR_Attr 11 /* Attribute (BYTE) */ +#define DIR_NTres 12 /* Lower case flag (BYTE) */ +#define DIR_CrtTime10 13 /* Created time sub-second (BYTE) */ +#define DIR_CrtTime 14 /* Created time (DWORD) */ +#define DIR_LstAccDate 18 /* Last accessed date (WORD) */ +#define DIR_FstClusHI 20 /* Higher 16-bit of first cluster (WORD) */ +#define DIR_ModTime 22 /* Modified time (DWORD) */ +#define DIR_FstClusLO 26 /* Lower 16-bit of first cluster (WORD) */ +#define DIR_FileSize 28 /* File size (DWORD) */ +#define LDIR_Ord 0 /* LFN: LFN order and LLE flag (BYTE) */ +#define LDIR_Attr 11 /* LFN: LFN attribute (BYTE) */ +#define LDIR_Type 12 /* LFN: Entry type (BYTE) */ +#define LDIR_Chksum 13 /* LFN: Checksum of the SFN (BYTE) */ +#define LDIR_FstClusLO 26 /* LFN: MBZ field (WORD) */ +#define XDIR_Type 0 /* exFAT: Type of exFAT directory entry (BYTE) */ +#define XDIR_NumLabel 1 /* exFAT: Number of volume label characters (BYTE) */ +#define XDIR_Label 2 /* exFAT: Volume label (11-WORD) */ +#define XDIR_CaseSum 4 /* exFAT: Sum of case conversion table (DWORD) */ +#define XDIR_NumSec 1 /* exFAT: Number of secondary entries (BYTE) */ +#define XDIR_SetSum 2 /* exFAT: Sum of the set of directory entries (WORD) */ +#define XDIR_Attr 4 /* exFAT: File attribute (WORD) */ +#define XDIR_CrtTime 8 /* exFAT: Created time (DWORD) */ +#define XDIR_ModTime 12 /* exFAT: Modified time (DWORD) */ +#define XDIR_AccTime 16 /* exFAT: Last accessed time (DWORD) */ +#define XDIR_CrtTime10 20 /* exFAT: Created time subsecond (BYTE) */ +#define XDIR_ModTime10 21 /* exFAT: Modified time subsecond (BYTE) */ +#define XDIR_CrtTZ 22 /* exFAT: Created timezone (BYTE) */ +#define XDIR_ModTZ 23 /* exFAT: Modified timezone (BYTE) */ +#define XDIR_AccTZ 24 /* exFAT: Last accessed timezone (BYTE) */ +#define XDIR_GenFlags 33 /* exFAT: General secondary flags (WORD) */ +#define XDIR_NumName 35 /* exFAT: Number of file name characters (BYTE) */ +#define XDIR_NameHash 36 /* exFAT: Hash of file name (WORD) */ +#define XDIR_ValidFileSize 40 /* exFAT: Valid file size (QWORD) */ +#define XDIR_FstClus 52 /* exFAT: First cluster of the file data (DWORD) */ +#define XDIR_FileSize 56 /* exFAT: File/Directory size (QWORD) */ + +#define SZDIRE 32 /* Size of a directory entry */ +#define DDEM 0xE5 /* Deleted directory entry mark set to DIR_Name[0] */ +#define RDDEM 0x05 /* Replacement of the character collides with DDEM */ +#define LLEF 0x40 /* Last long entry flag in LDIR_Ord */ + +#define FSI_LeadSig 0 /* FAT32 FSI: Leading signature (DWORD) */ +#define FSI_StrucSig 484 /* FAT32 FSI: Structure signature (DWORD) */ +#define FSI_Free_Count 488 /* FAT32 FSI: Number of free clusters (DWORD) */ +#define FSI_Nxt_Free 492 /* FAT32 FSI: Last allocated cluster (DWORD) */ + +#define MBR_Table 446 /* MBR: Offset of partition table in the MBR */ +#define SZ_PTE 16 /* MBR: Size of a partition table entry */ +#define PTE_Boot 0 /* MBR PTE: Boot indicator */ +#define PTE_StHead 1 /* MBR PTE: Start head */ +#define PTE_StSec 2 /* MBR PTE: Start sector */ +#define PTE_StCyl 3 /* MBR PTE: Start cylinder */ +#define PTE_System 4 /* MBR PTE: System ID */ +#define PTE_EdHead 5 /* MBR PTE: End head */ +#define PTE_EdSec 6 /* MBR PTE: End sector */ +#define PTE_EdCyl 7 /* MBR PTE: End cylinder */ +#define PTE_StLba 8 /* MBR PTE: Start in LBA */ +#define PTE_SizLba 12 /* MBR PTE: Size in LBA */ + + +/* Post process after fatal error on file operation */ +#define ABORT(fs, res) { fp->err = (BYTE)(res); LEAVE_FF(fs, res); } + + +/* Reentrancy related */ +#if _FS_REENTRANT +#if _USE_LFN == 1 +#error Static LFN work area cannot be used at thread-safe configuration +#endif +#define ENTER_FF(fs) { if (!lock_fs(fs)) return FR_TIMEOUT; } +#define LEAVE_FF(fs, res) { unlock_fs(fs, res); return res; } +#else +#define ENTER_FF(fs) +#define LEAVE_FF(fs, res) return res +#endif + + +/* Definitions of volume - partition conversion */ +#if _MULTI_PARTITION +#define LD2PD(vol) VolToPart[vol].pd /* Get physical drive number */ +#define LD2PT(vol) VolToPart[vol].pt /* Get partition index */ +#else +#define LD2PD(vol) (BYTE)(vol) /* Each logical drive is bound to the same physical drive number */ +#define LD2PT(vol) 0 /* Find first valid partition or in SFD */ +#endif + + +/* Definitions of sector size */ +#if (_MAX_SS < _MIN_SS) || (_MAX_SS != 512 && _MAX_SS != 1024 && _MAX_SS != 2048 && _MAX_SS != 4096) || (_MIN_SS != 512 && _MIN_SS != 1024 && _MIN_SS != 2048 && _MIN_SS != 4096) +#error Wrong sector size configuration +#endif +#if _MAX_SS == _MIN_SS +#define SS(fs) ((UINT)_MAX_SS) /* Fixed sector size */ +#else +#define SS(fs) ((fs)->ssize) /* Variable sector size */ +#endif + + +/* Timestamp */ +#if _FS_NORTC == 1 +#if _NORTC_YEAR < 1980 || _NORTC_YEAR > 2107 || _NORTC_MON < 1 || _NORTC_MON > 12 || _NORTC_MDAY < 1 || _NORTC_MDAY > 31 +#error Invalid _FS_NORTC settings +#endif +#define GET_FATTIME() ((DWORD)(_NORTC_YEAR - 1980) << 25 | (DWORD)_NORTC_MON << 21 | (DWORD)_NORTC_MDAY << 16) +#else +#define GET_FATTIME() get_fattime() +#endif + + +/* File lock controls */ +#if _FS_LOCK != 0 +#if _FS_READONLY +#error _FS_LOCK must be 0 at read-only configuration +#endif +typedef struct { + FATFS *fs; /* Object ID 1, volume (NULL:blank entry) */ + DWORD clu; /* Object ID 2, containing directory (0:root) */ + DWORD ofs; /* Object ID 3, offset in the directory */ + WORD ctr; /* Object open counter, 0:none, 0x01..0xFF:read mode open count, 0x100:write mode */ +} FILESEM; +#endif + + + + + +/*-------------------------------------------------------------------------- + + Module Private Work Area + +---------------------------------------------------------------------------*/ + +/* Remark: Variables defined here without initial value shall be guaranteed +/ zero/null at start-up. If not, the linker option or start-up routine is +/ not compliance with C standard. */ + +#if _VOLUMES < 1 || _VOLUMES > 10 +#error Wrong _VOLUMES setting +#endif +static FATFS *FatFs[_VOLUMES]; /* Pointer to the file system objects (logical drives) */ +static WORD Fsid; /* File system mount ID */ + +#if _FS_RPATH != 0 && _VOLUMES >= 2 +static BYTE CurrVol; /* Current drive */ +#endif + +#if _FS_LOCK != 0 +static FILESEM Files[_FS_LOCK]; /* Open object lock semaphores */ +#endif + +#if _USE_LFN == 0 /* Non-LFN configuration */ +#define DEF_NAMBUF +#define INIT_NAMBUF(fs) +#define FREE_NAMBUF() + +#else /* LFN configuration */ +#if _MAX_LFN < 12 || _MAX_LFN > 255 +#error Wrong _MAX_LFN value +#endif +#define MAXDIRB(nc) ((nc + 44U) / 15 * SZDIRE) + +#if _USE_LFN == 1 /* LFN enabled with static working buffer */ +#if _FS_EXFAT +static BYTE DirBuf[MAXDIRB(_MAX_LFN)]; /* Directory entry block scratchpad buffer */ +#endif +static WCHAR LfnBuf[_MAX_LFN + 1]; /* LFN enabled with static working buffer */ +#define DEF_NAMBUF +#define INIT_NAMBUF(fs) +#define FREE_NAMBUF() + +#elif _USE_LFN == 2 /* LFN enabled with dynamic working buffer on the stack */ +#if _FS_EXFAT +#define DEF_NAMBUF WCHAR lbuf[_MAX_LFN+1]; BYTE dbuf[MAXDIRB(_MAX_LFN)]; +#define INIT_NAMBUF(fs) { (fs)->lfnbuf = lbuf; (fs)->dirbuf = dbuf; } +#define FREE_NAMBUF() +#else +#define DEF_NAMBUF WCHAR lbuf[_MAX_LFN+1]; +#define INIT_NAMBUF(fs) { (fs)->lfnbuf = lbuf; } +#define FREE_NAMBUF() +#endif + +#elif _USE_LFN == 3 /* LFN enabled with dynamic working buffer on the heap */ +#if _FS_EXFAT +#define DEF_NAMBUF WCHAR *lfn; +#define INIT_NAMBUF(fs) { lfn = ff_memalloc((_MAX_LFN+1)*2 + MAXDIRB(_MAX_LFN)); if (!lfn) LEAVE_FF(fs, FR_NOT_ENOUGH_CORE); (fs)->lfnbuf = lfn; (fs)->dirbuf = (BYTE*)(lfn+_MAX_LFN+1); } +#define FREE_NAMBUF() ff_memfree(lfn) +#else +#define DEF_NAMBUF WCHAR *lfn; +#define INIT_NAMBUF(fs) { lfn = ff_memalloc((_MAX_LFN+1)*2); if (!lfn) LEAVE_FF(fs, FR_NOT_ENOUGH_CORE); (fs)->lfnbuf = lfn; } +#define FREE_NAMBUF() ff_memfree(lfn) +#endif + +#else +#error Wrong _USE_LFN setting + +#endif +#endif /* else _USE_LFN == 0 */ + +#ifdef _EXCVT +static const BYTE ExCvt[] = _EXCVT; /* Upper conversion table for SBCS extended characters */ +#endif + + + + + + +/*-------------------------------------------------------------------------- + + Module Private Functions + +---------------------------------------------------------------------------*/ + + +/*-----------------------------------------------------------------------*/ +/* Load/Store multi-byte word in the FAT structure */ +/*-----------------------------------------------------------------------*/ + +static +WORD ld_word (const BYTE* ptr) /* Load a 2-byte little-endian word */ +{ + WORD rv; + + rv = ptr[1]; + rv = rv << 8 | ptr[0]; + return rv; +} + +static +DWORD ld_dword (const BYTE* ptr) /* Load a 4-byte little-endian word */ +{ + DWORD rv; + + rv = ptr[3]; + rv = rv << 8 | ptr[2]; + rv = rv << 8 | ptr[1]; + rv = rv << 8 | ptr[0]; + return rv; +} + +#if _FS_EXFAT +static +QWORD ld_qword (const BYTE* ptr) /* Load an 8-byte little-endian word */ +{ + QWORD rv; + + rv = ptr[7]; + rv = rv << 8 | ptr[6]; + rv = rv << 8 | ptr[5]; + rv = rv << 8 | ptr[4]; + rv = rv << 8 | ptr[3]; + rv = rv << 8 | ptr[2]; + rv = rv << 8 | ptr[1]; + rv = rv << 8 | ptr[0]; + return rv; +} +#endif + +#if !_FS_READONLY +static +void st_word (BYTE* ptr, WORD val) /* Store a 2-byte word in little-endian */ +{ + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; +} + +static +void st_dword (BYTE* ptr, DWORD val) /* Store a 4-byte word in little-endian */ +{ + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; +} + +#if _FS_EXFAT +static +void st_qword (BYTE* ptr, QWORD val) /* Store an 8-byte word in little-endian */ +{ + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; val >>= 8; + *ptr++ = (BYTE)val; +} +#endif +#endif /* !_FS_READONLY */ + + + +/*-----------------------------------------------------------------------*/ +/* String functions */ +/*-----------------------------------------------------------------------*/ + +/* Copy memory to memory */ +static +void mem_cpy (void* dst, const void* src, UINT cnt) { + BYTE *d = (BYTE*)dst; + const BYTE *s = (const BYTE*)src; + + if (cnt) { + do { + *d++ = *s++; + } while (--cnt); + } +} + +/* Fill memory block */ +static +void mem_set (void* dst, int val, UINT cnt) { + BYTE *d = (BYTE*)dst; + + do { + *d++ = (BYTE)val; + } while (--cnt); +} + +/* Compare memory block */ +static +int mem_cmp (const void* dst, const void* src, UINT cnt) { /* ZR:same, NZ:different */ + const BYTE *d = (const BYTE *)dst, *s = (const BYTE *)src; + int r = 0; + + do { + r = *d++ - *s++; + } while (--cnt && r == 0); + + return r; +} + +/* Check if chr is contained in the string */ +static +int chk_chr (const char* str, int chr) { /* NZ:contained, ZR:not contained */ + while (*str && *str != chr) str++; + return *str; +} + + + + +#if _FS_REENTRANT +/*-----------------------------------------------------------------------*/ +/* Request/Release grant to access the volume */ +/*-----------------------------------------------------------------------*/ +static +int lock_fs ( + FATFS* fs /* File system object */ +) +{ + return (fs && ff_req_grant(fs->sobj)) ? 1 : 0; +} + + +static +void unlock_fs ( + FATFS* fs, /* File system object */ + FRESULT res /* Result code to be returned */ +) +{ + if (fs && res != FR_NOT_ENABLED && res != FR_INVALID_DRIVE && res != FR_TIMEOUT) { + ff_rel_grant(fs->sobj); + } +} + +#endif + + + +#if _FS_LOCK != 0 +/*-----------------------------------------------------------------------*/ +/* File lock control functions */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT chk_lock ( /* Check if the file can be accessed */ + DIR* dp, /* Directory object pointing the file to be checked */ + int acc /* Desired access type (0:Read, 1:Write, 2:Delete/Rename) */ +) +{ + UINT i, be; + + /* Search file semaphore table */ + for (i = be = 0; i < _FS_LOCK; i++) { + if (Files[i].fs) { /* Existing entry */ + if (Files[i].fs == dp->obj.fs && /* Check if the object matched with an open object */ + Files[i].clu == dp->obj.sclust && + Files[i].ofs == dp->dptr) break; + } else { /* Blank entry */ + be = 1; + } + } + if (i == _FS_LOCK) { /* The object is not opened */ + return (be || acc == 2) ? FR_OK : FR_TOO_MANY_OPEN_FILES; /* Is there a blank entry for new object? */ + } + + /* The object has been opened. Reject any open against writing file and all write mode open */ + return (acc || Files[i].ctr == 0x100) ? FR_LOCKED : FR_OK; +} + + +static +int enq_lock (void) /* Check if an entry is available for a new object */ +{ + UINT i; + + for (i = 0; i < _FS_LOCK && Files[i].fs; i++) ; + return (i == _FS_LOCK) ? 0 : 1; +} + + +static +UINT inc_lock ( /* Increment object open counter and returns its index (0:Internal error) */ + DIR* dp, /* Directory object pointing the file to register or increment */ + int acc /* Desired access (0:Read, 1:Write, 2:Delete/Rename) */ +) +{ + UINT i; + + + for (i = 0; i < _FS_LOCK; i++) { /* Find the object */ + if (Files[i].fs == dp->obj.fs && + Files[i].clu == dp->obj.sclust && + Files[i].ofs == dp->dptr) break; + } + + if (i == _FS_LOCK) { /* Not opened. Register it as new. */ + for (i = 0; i < _FS_LOCK && Files[i].fs; i++) ; + if (i == _FS_LOCK) return 0; /* No free entry to register (int err) */ + Files[i].fs = dp->obj.fs; + Files[i].clu = dp->obj.sclust; + Files[i].ofs = dp->dptr; + Files[i].ctr = 0; + } + + if (acc && Files[i].ctr) return 0; /* Access violation (int err) */ + + Files[i].ctr = acc ? 0x100 : Files[i].ctr + 1; /* Set semaphore value */ + + return i + 1; +} + + +static +FRESULT dec_lock ( /* Decrement object open counter */ + UINT i /* Semaphore index (1..) */ +) +{ + WORD n; + FRESULT res; + + + if (--i < _FS_LOCK) { /* Shift index number origin from 0 */ + n = Files[i].ctr; + if (n == 0x100) n = 0; /* If write mode open, delete the entry */ + if (n > 0) n--; /* Decrement read mode open count */ + Files[i].ctr = n; + if (n == 0) Files[i].fs = 0; /* Delete the entry if open count gets zero */ + res = FR_OK; + } else { + res = FR_INT_ERR; /* Invalid index nunber */ + } + return res; +} + + +static +void clear_lock ( /* Clear lock entries of the volume */ + FATFS *fs +) +{ + UINT i; + + for (i = 0; i < _FS_LOCK; i++) { + if (Files[i].fs == fs) Files[i].fs = 0; + } +} + +#endif /* _FS_LOCK != 0 */ + + + +/*-----------------------------------------------------------------------*/ +/* Move/Flush disk access window in the file system object */ +/*-----------------------------------------------------------------------*/ +#if !_FS_READONLY +static +FRESULT sync_window ( /* Returns FR_OK or FR_DISK_ERROR */ + FATFS* fs /* File system object */ +) +{ + DWORD wsect; + UINT nf; + FRESULT res = FR_OK; + + + if (fs->wflag) { /* Write back the sector if it is dirty */ + wsect = fs->winsect; /* Current sector number */ + if (disk_write(fs->drv, fs->win, wsect, 1) != RES_OK) { + res = FR_DISK_ERR; + } else { + fs->wflag = 0; + if (wsect - fs->fatbase < fs->fsize) { /* Is it in the FAT area? */ + for (nf = fs->n_fats; nf >= 2; nf--) { /* Reflect the change to all FAT copies */ + wsect += fs->fsize; + disk_write(fs->drv, fs->win, wsect, 1); + } + } + } + } + return res; +} +#endif + + +static +FRESULT move_window ( /* Returns FR_OK or FR_DISK_ERROR */ + FATFS* fs, /* File system object */ + DWORD sector /* Sector number to make appearance in the fs->win[] */ +) +{ + FRESULT res = FR_OK; + + + if (sector != fs->winsect) { /* Window offset changed? */ +#if !_FS_READONLY + res = sync_window(fs); /* Write-back changes */ +#endif + if (res == FR_OK) { /* Fill sector window with new data */ + if (disk_read(fs->drv, fs->win, sector, 1) != RES_OK) { + sector = 0xFFFFFFFF; /* Invalidate window if data is not reliable */ + res = FR_DISK_ERR; + } + fs->winsect = sector; + } + } + return res; +} + + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Synchronize file system and strage device */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT sync_fs ( /* FR_OK:succeeded, !=0:error */ + FATFS* fs /* File system object */ +) +{ + FRESULT res; + + + res = sync_window(fs); + if (res == FR_OK) { + /* Update FSInfo sector if needed */ + if (fs->fs_type == FS_FAT32 && fs->fsi_flag == 1) { + /* Create FSInfo structure */ + mem_set(fs->win, 0, SS(fs)); + st_word(fs->win + BS_55AA, 0xAA55); + st_dword(fs->win + FSI_LeadSig, 0x41615252); + st_dword(fs->win + FSI_StrucSig, 0x61417272); + st_dword(fs->win + FSI_Free_Count, fs->free_clst); + st_dword(fs->win + FSI_Nxt_Free, fs->last_clst); + /* Write it into the FSInfo sector */ + fs->winsect = fs->volbase + 1; + disk_write(fs->drv, fs->win, fs->winsect, 1); + fs->fsi_flag = 0; + } + /* Make sure that no pending write process in the physical drive */ + if (disk_ioctl(fs->drv, CTRL_SYNC, 0) != RES_OK) res = FR_DISK_ERR; + } + + return res; +} + +#endif + + + +/*-----------------------------------------------------------------------*/ +/* Get sector# from cluster# */ +/*-----------------------------------------------------------------------*/ + +static +DWORD clust2sect ( /* !=0:Sector number, 0:Failed (invalid cluster#) */ + FATFS* fs, /* File system object */ + DWORD clst /* Cluster# to be converted */ +) +{ + clst -= 2; + if (clst >= fs->n_fatent - 2) return 0; /* Invalid cluster# */ + return clst * fs->csize + fs->database; +} + + + + +/*-----------------------------------------------------------------------*/ +/* FAT access - Read value of a FAT entry */ +/*-----------------------------------------------------------------------*/ + +static +DWORD get_fat ( /* 0xFFFFFFFF:Disk error, 1:Internal error, 2..0x7FFFFFFF:Cluster status */ + _FDID* obj, /* Corresponding object */ + DWORD clst /* Cluster number to get the value */ +) +{ + UINT wc, bc; + DWORD val; + FATFS *fs = obj->fs; + + + if (clst < 2 || clst >= fs->n_fatent) { /* Check if in valid range */ + val = 1; /* Internal error */ + + } else { + val = 0xFFFFFFFF; /* Default value falls on disk error */ + + switch (fs->fs_type) { + case FS_FAT12 : + bc = (UINT)clst; bc += bc / 2; + if (move_window(fs, fs->fatbase + (bc / SS(fs))) != FR_OK) break; + wc = fs->win[bc++ % SS(fs)]; + if (move_window(fs, fs->fatbase + (bc / SS(fs))) != FR_OK) break; + wc |= fs->win[bc % SS(fs)] << 8; + val = (clst & 1) ? (wc >> 4) : (wc & 0xFFF); + break; + + case FS_FAT16 : + if (move_window(fs, fs->fatbase + (clst / (SS(fs) / 2))) != FR_OK) break; + val = ld_word(fs->win + clst * 2 % SS(fs)); + break; + + case FS_FAT32 : + if (move_window(fs, fs->fatbase + (clst / (SS(fs) / 4))) != FR_OK) break; + val = ld_dword(fs->win + clst * 4 % SS(fs)) & 0x0FFFFFFF; + break; +#if _FS_EXFAT + case FS_EXFAT : + if (obj->objsize) { + DWORD cofs = clst - obj->sclust; /* Offset from start cluster */ + DWORD clen = (DWORD)((obj->objsize - 1) / SS(fs)) / fs->csize; /* Number of clusters - 1 */ + + if (obj->stat == 2) { /* Is there no valid chain on the FAT? */ + if (cofs <= clen) { + val = (cofs == clen) ? 0x7FFFFFFF : clst + 1; /* Generate the value */ + break; + } + } + if (obj->stat == 3 && cofs < obj->n_cont) { /* Is it in the 1st fragment? */ + val = clst + 1; /* Generate the value */ + break; + } + if (obj->stat != 2) { /* Get value from FAT if FAT chain is valid */ + if (obj->n_frag != 0) { /* Is it on the growing edge? */ + val = 0x7FFFFFFF; /* Generate EOC */ + } else { + if (move_window(fs, fs->fatbase + (clst / (SS(fs) / 4))) != FR_OK) break; + val = ld_dword(fs->win + clst * 4 % SS(fs)) & 0x7FFFFFFF; + } + break; + } + } + /* go to default */ +#endif + default: + val = 1; /* Internal error */ + } + } + + return val; +} + + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* FAT access - Change value of a FAT entry */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT put_fat ( /* FR_OK(0):succeeded, !=0:error */ + FATFS* fs, /* Corresponding file system object */ + DWORD clst, /* FAT index number (cluster number) to be changed */ + DWORD val /* New value to be set to the entry */ +) +{ + UINT bc; + BYTE *p; + FRESULT res = FR_INT_ERR; + + if (clst >= 2 && clst < fs->n_fatent) { /* Check if in valid range */ + switch (fs->fs_type) { + case FS_FAT12 : /* Bitfield items */ + bc = (UINT)clst; bc += bc / 2; + res = move_window(fs, fs->fatbase + (bc / SS(fs))); + if (res != FR_OK) break; + p = fs->win + bc++ % SS(fs); + *p = (clst & 1) ? ((*p & 0x0F) | ((BYTE)val << 4)) : (BYTE)val; + fs->wflag = 1; + res = move_window(fs, fs->fatbase + (bc / SS(fs))); + if (res != FR_OK) break; + p = fs->win + bc % SS(fs); + *p = (clst & 1) ? (BYTE)(val >> 4) : ((*p & 0xF0) | ((BYTE)(val >> 8) & 0x0F)); + fs->wflag = 1; + break; + + case FS_FAT16 : /* WORD aligned items */ + res = move_window(fs, fs->fatbase + (clst / (SS(fs) / 2))); + if (res != FR_OK) break; + st_word(fs->win + clst * 2 % SS(fs), (WORD)val); + fs->wflag = 1; + break; + + case FS_FAT32 : /* DWORD aligned items */ +#if _FS_EXFAT + case FS_EXFAT : +#endif + res = move_window(fs, fs->fatbase + (clst / (SS(fs) / 4))); + if (res != FR_OK) break; + if (!_FS_EXFAT || fs->fs_type != FS_EXFAT) { + val = (val & 0x0FFFFFFF) | (ld_dword(fs->win + clst * 4 % SS(fs)) & 0xF0000000); + } + st_dword(fs->win + clst * 4 % SS(fs), val); + fs->wflag = 1; + break; + } + } + return res; +} + +#endif /* !_FS_READONLY */ + + + + +#if _FS_EXFAT && !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* exFAT: Accessing FAT and Allocation Bitmap */ +/*-----------------------------------------------------------------------*/ + +/*--------------------------------------*/ +/* Find a contiguous free cluster block */ +/*--------------------------------------*/ + +static +DWORD find_bitmap ( /* 0:Not found, 2..:Cluster block found, 0xFFFFFFFF:Disk error */ + FATFS* fs, /* File system object */ + DWORD clst, /* Cluster number to scan from */ + DWORD ncl /* Number of contiguous clusters to find (1..) */ +) +{ + BYTE bm, bv; + UINT i; + DWORD val, scl, ctr; + + + clst -= 2; /* The first bit in the bitmap corresponds to cluster #2 */ + if (clst >= fs->n_fatent - 2) clst = 0; + scl = val = clst; ctr = 0; + for (;;) { + if (move_window(fs, fs->database + val / 8 / SS(fs)) != FR_OK) return 0xFFFFFFFF; /* (assuming bitmap is located top of the cluster heap) */ + i = val / 8 % SS(fs); bm = 1 << (val % 8); + do { + do { + bv = fs->win[i] & bm; bm <<= 1; /* Get bit value */ + if (++val >= fs->n_fatent - 2) { /* Next cluster (with wrap-around) */ + val = 0; bm = 0; i = SS(fs); + } + if (!bv) { /* Is it a free cluster? */ + if (++ctr == ncl) return scl + 2; /* Check if run length is sufficient for required */ + } else { + scl = val; ctr = 0; /* Encountered a cluster in-use, restart to scan */ + } + if (val == clst) return 0; /* All cluster scanned? */ + } while (bm); + bm = 1; + } while (++i < SS(fs)); + } +} + + +/*----------------------------------------*/ +/* Set/Clear a block of allocation bitmap */ +/*----------------------------------------*/ + +static +FRESULT change_bitmap ( + FATFS* fs, /* File system object */ + DWORD clst, /* Cluster number to change from */ + DWORD ncl, /* Number of clusters to be changed */ + int bv /* bit value to be set (0 or 1) */ +) +{ + BYTE bm; + UINT i; + DWORD sect; + + clst -= 2; /* The first bit corresponds to cluster #2 */ + sect = fs->database + clst / 8 / SS(fs); /* Sector address (assuming bitmap is located top of the cluster heap) */ + i = clst / 8 % SS(fs); /* Byte offset in the sector */ + bm = 1 << (clst % 8); /* Bit mask in the byte */ + for (;;) { + if (move_window(fs, sect++) != FR_OK) return FR_DISK_ERR; + do { + do { + if (bv == (int)((fs->win[i] & bm) != 0)) return FR_INT_ERR; /* Is the bit expected value? */ + fs->win[i] ^= bm; /* Flip the bit */ + fs->wflag = 1; + if (--ncl == 0) return FR_OK; /* All bits processed? */ + } while (bm <<= 1); /* Next bit */ + bm = 1; + } while (++i < SS(fs)); /* Next byte */ + i = 0; + } +} + + +/*---------------------------------------------*/ +/* Fill the first fragment of the FAT chain */ +/*---------------------------------------------*/ + +static +FRESULT fill_first_frag ( + _FDID* obj /* Pointer to the corresponding object */ +) +{ + FRESULT res; + DWORD cl, n; + + if (obj->stat == 3) { /* Has the object been changed 'fragmented'? */ + for (cl = obj->sclust, n = obj->n_cont; n; cl++, n--) { /* Create cluster chain on the FAT */ + res = put_fat(obj->fs, cl, cl + 1); + if (res != FR_OK) return res; + } + obj->stat = 0; /* Change status 'FAT chain is valid' */ + } + return FR_OK; +} + + +/*---------------------------------------------*/ +/* Fill the last fragment of the FAT chain */ +/*---------------------------------------------*/ + +static +FRESULT fill_last_frag ( + _FDID* obj, /* Pointer to the corresponding object */ + DWORD lcl, /* Last cluster of the fragment */ + DWORD term /* Value to set the last FAT entry */ +) +{ + FRESULT res; + + while (obj->n_frag > 0) { /* Create the last chain on the FAT */ + res = put_fat(obj->fs, lcl - obj->n_frag + 1, (obj->n_frag > 1) ? lcl - obj->n_frag + 2 : term); + if (res != FR_OK) return res; + obj->n_frag--; + } + return FR_OK; +} + +#endif /* _FS_EXFAT && !_FS_READONLY */ + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* FAT handling - Remove a cluster chain */ +/*-----------------------------------------------------------------------*/ +static +FRESULT remove_chain ( /* FR_OK(0):succeeded, !=0:error */ + _FDID* obj, /* Corresponding object */ + DWORD clst, /* Cluster to remove a chain from */ + DWORD pclst /* Previous cluster of clst (0:an entire chain) */ +) +{ + FRESULT res = FR_OK; + DWORD nxt; + FATFS *fs = obj->fs; +#if _FS_EXFAT || _USE_TRIM + DWORD scl = clst, ecl = clst; +#endif +#if _USE_TRIM + DWORD rt[2]; +#endif + + if (clst < 2 || clst >= fs->n_fatent) return FR_INT_ERR; /* Check if in valid range */ + + /* Mark the previous cluster 'EOC' on the FAT if it exists */ + if (pclst && (!_FS_EXFAT || fs->fs_type != FS_EXFAT || obj->stat != 2)) { + res = put_fat(fs, pclst, 0xFFFFFFFF); + if (res != FR_OK) return res; + } + + /* Remove the chain */ + do { + nxt = get_fat(obj, clst); /* Get cluster status */ + if (nxt == 0) break; /* Empty cluster? */ + if (nxt == 1) return FR_INT_ERR; /* Internal error? */ + if (nxt == 0xFFFFFFFF) return FR_DISK_ERR; /* Disk error? */ + if (!_FS_EXFAT || fs->fs_type != FS_EXFAT) { + res = put_fat(fs, clst, 0); /* Mark the cluster 'free' on the FAT */ + if (res != FR_OK) return res; + } + if (fs->free_clst < fs->n_fatent - 2) { /* Update FSINFO */ + fs->free_clst++; + fs->fsi_flag |= 1; + } +#if _FS_EXFAT || _USE_TRIM + if (ecl + 1 == nxt) { /* Is next cluster contiguous? */ + ecl = nxt; + } else { /* End of contiguous cluster block */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + res = change_bitmap(fs, scl, ecl - scl + 1, 0); /* Mark the cluster block 'free' on the bitmap */ + if (res != FR_OK) return res; + } +#endif +#if _USE_TRIM + rt[0] = clust2sect(fs, scl); /* Start sector */ + rt[1] = clust2sect(fs, ecl) + fs->csize - 1; /* End sector */ + disk_ioctl(fs->drv, CTRL_TRIM, rt); /* Inform device the block can be erased */ +#endif + scl = ecl = nxt; + } +#endif + clst = nxt; /* Next cluster */ + } while (clst < fs->n_fatent); /* Repeat while not the last link */ + +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + if (pclst == 0) { /* Does the object have no chain? */ + obj->stat = 0; /* Change the object status 'initial' */ + } else { + if (obj->stat == 3 && pclst >= obj->sclust && pclst <= obj->sclust + obj->n_cont) { /* Did the chain get contiguous? */ + obj->stat = 2; /* Change the object status 'contiguous' */ + } + } + } +#endif + return FR_OK; +} + + + + +/*-----------------------------------------------------------------------*/ +/* FAT handling - Stretch a chain or Create a new chain */ +/*-----------------------------------------------------------------------*/ +static +DWORD create_chain ( /* 0:No free cluster, 1:Internal error, 0xFFFFFFFF:Disk error, >=2:New cluster# */ + _FDID* obj, /* Corresponding object */ + DWORD clst /* Cluster# to stretch, 0:Create a new chain */ +) +{ + DWORD cs, ncl, scl; + FRESULT res; + FATFS *fs = obj->fs; + + + if (clst == 0) { /* Create a new chain */ + scl = fs->last_clst; /* Get suggested cluster to start from */ + if (scl == 0 || scl >= fs->n_fatent) scl = 1; + } + else { /* Stretch current chain */ + cs = get_fat(obj, clst); /* Check the cluster status */ + if (cs < 2) return 1; /* Invalid FAT value */ + if (cs == 0xFFFFFFFF) return cs; /* A disk error occurred */ + if (cs < fs->n_fatent) return cs; /* It is already followed by next cluster */ + scl = clst; + } + +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + ncl = find_bitmap(fs, scl, 1); /* Find a free cluster */ + if (ncl == 0 || ncl == 0xFFFFFFFF) return ncl; /* No free cluster or hard error? */ + res = change_bitmap(fs, ncl, 1, 1); /* Mark the cluster 'in use' */ + if (res == FR_INT_ERR) return 1; + if (res == FR_DISK_ERR) return 0xFFFFFFFF; + if (clst == 0) { /* Is it a new chain? */ + obj->stat = 2; /* Set status 'contiguous' */ + } else { /* It is a stretched chain */ + if (obj->stat == 2 && ncl != scl + 1) { /* Is the chain got fragmented? */ + obj->n_cont = scl - obj->sclust; /* Set size of the contiguous part */ + obj->stat = 3; /* Change status 'just fragmented' */ + } + } + if (obj->stat != 2) { /* Is the file non-contiguous? */ + if (ncl == clst + 1) { /* Is the cluster next to previous one? */ + obj->n_frag = obj->n_frag ? obj->n_frag + 1 : 2; /* Increment size of last framgent */ + } else { /* New fragment */ + if (obj->n_frag == 0) obj->n_frag = 1; + res = fill_last_frag(obj, clst, ncl); /* Fill last fragment on the FAT and link it to new one */ + if (res == FR_OK) obj->n_frag = 1; + } + } + } else +#endif + { /* On the FAT12/16/32 volume */ + ncl = scl; /* Start cluster */ + for (;;) { + ncl++; /* Next cluster */ + if (ncl >= fs->n_fatent) { /* Check wrap-around */ + ncl = 2; + if (ncl > scl) return 0; /* No free cluster */ + } + cs = get_fat(obj, ncl); /* Get the cluster status */ + if (cs == 0) break; /* Found a free cluster */ + if (cs == 1 || cs == 0xFFFFFFFF) return cs; /* An error occurred */ + if (ncl == scl) return 0; /* No free cluster */ + } + res = put_fat(fs, ncl, 0xFFFFFFFF); /* Mark the new cluster 'EOC' */ + if (res == FR_OK && clst != 0) { + res = put_fat(fs, clst, ncl); /* Link it from the previous one if needed */ + } + } + + if (res == FR_OK) { /* Update FSINFO if function succeeded. */ + fs->last_clst = ncl; + if (fs->free_clst <= fs->n_fatent - 2) fs->free_clst--; + fs->fsi_flag |= 1; + } else { + ncl = (res == FR_DISK_ERR) ? 0xFFFFFFFF : 1; /* Failed. Generate error status */ + } + + return ncl; /* Return new cluster number or error status */ +} + +#endif /* !_FS_READONLY */ + + + + +#if _USE_FASTSEEK +/*-----------------------------------------------------------------------*/ +/* FAT handling - Convert offset into cluster with link map table */ +/*-----------------------------------------------------------------------*/ + +static +DWORD clmt_clust ( /* <2:Error, >=2:Cluster number */ + FIL* fp, /* Pointer to the file object */ + FSIZE_t ofs /* File offset to be converted to cluster# */ +) +{ + DWORD cl, ncl, *tbl; + FATFS *fs = fp->obj.fs; + + + tbl = fp->cltbl + 1; /* Top of CLMT */ + cl = (DWORD)(ofs / SS(fs) / fs->csize); /* Cluster order from top of the file */ + for (;;) { + ncl = *tbl++; /* Number of cluters in the fragment */ + if (ncl == 0) return 0; /* End of table? (error) */ + if (cl < ncl) break; /* In this fragment? */ + cl -= ncl; tbl++; /* Next fragment */ + } + return cl + *tbl; /* Return the cluster number */ +} + +#endif /* _USE_FASTSEEK */ + + + + +/*-----------------------------------------------------------------------*/ +/* Directory handling - Set directory index */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_sdi ( /* FR_OK(0):succeeded, !=0:error */ + DIR* dp, /* Pointer to directory object */ + DWORD ofs /* Offset of directory table */ +) +{ + DWORD csz, clst; + FATFS *fs = dp->obj.fs; + + + if (ofs >= (DWORD)((_FS_EXFAT && fs->fs_type == FS_EXFAT) ? MAX_DIR_EX : MAX_DIR) || ofs % SZDIRE) { /* Check range of offset and alignment */ + return FR_INT_ERR; + } + dp->dptr = ofs; /* Set current offset */ + clst = dp->obj.sclust; /* Table start cluster (0:root) */ + if (clst == 0 && fs->fs_type >= FS_FAT32) { /* Replace cluster# 0 with root cluster# */ + clst = fs->dirbase; + if (_FS_EXFAT) dp->obj.stat = 0; /* exFAT: Root dir has an FAT chain */ + } + + if (clst == 0) { /* Static table (root-directory in FAT12/16) */ + if (ofs / SZDIRE >= fs->n_rootdir) return FR_INT_ERR; /* Is index out of range? */ + dp->sect = fs->dirbase; + + } else { /* Dynamic table (sub-directory or root-directory in FAT32+) */ + csz = (DWORD)fs->csize * SS(fs); /* Bytes per cluster */ + while (ofs >= csz) { /* Follow cluster chain */ + clst = get_fat(&dp->obj, clst); /* Get next cluster */ + if (clst == 0xFFFFFFFF) return FR_DISK_ERR; /* Disk error */ + if (clst < 2 || clst >= fs->n_fatent) return FR_INT_ERR; /* Reached to end of table or internal error */ + ofs -= csz; + } + dp->sect = clust2sect(fs, clst); + } + dp->clust = clst; /* Current cluster# */ + if (!dp->sect) return FR_INT_ERR; + dp->sect += ofs / SS(fs); /* Sector# of the directory entry */ + dp->dir = fs->win + (ofs % SS(fs)); /* Pointer to the entry in the win[] */ + + return FR_OK; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Directory handling - Move directory table index next */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_next ( /* FR_OK(0):succeeded, FR_NO_FILE:End of table, FR_DENIED:Could not stretch */ + DIR* dp, /* Pointer to the directory object */ + int stretch /* 0: Do not stretch table, 1: Stretch table if needed */ +) +{ + DWORD ofs, clst; + FATFS *fs = dp->obj.fs; +#if !_FS_READONLY + UINT n; +#endif + + ofs = dp->dptr + SZDIRE; /* Next entry */ + if (!dp->sect || ofs >= (DWORD)((_FS_EXFAT && fs->fs_type == FS_EXFAT) ? MAX_DIR_EX : MAX_DIR)) return FR_NO_FILE; /* Report EOT when offset has reached max value */ + + if (ofs % SS(fs) == 0) { /* Sector changed? */ + dp->sect++; /* Next sector */ + + if (!dp->clust) { /* Static table */ + if (ofs / SZDIRE >= fs->n_rootdir) { /* Report EOT if it reached end of static table */ + dp->sect = 0; return FR_NO_FILE; + } + } + else { /* Dynamic table */ + if ((ofs / SS(fs) & (fs->csize - 1)) == 0) { /* Cluster changed? */ + clst = get_fat(&dp->obj, dp->clust); /* Get next cluster */ + if (clst <= 1) return FR_INT_ERR; /* Internal error */ + if (clst == 0xFFFFFFFF) return FR_DISK_ERR; /* Disk error */ + if (clst >= fs->n_fatent) { /* Reached end of dynamic table */ +#if !_FS_READONLY + if (!stretch) { /* If no stretch, report EOT */ + dp->sect = 0; return FR_NO_FILE; + } + clst = create_chain(&dp->obj, dp->clust); /* Allocate a cluster */ + if (clst == 0) return FR_DENIED; /* No free cluster */ + if (clst == 1) return FR_INT_ERR; /* Internal error */ + if (clst == 0xFFFFFFFF) return FR_DISK_ERR; /* Disk error */ + /* Clean-up the stretched table */ + if (_FS_EXFAT) dp->obj.stat |= 4; /* The directory needs to be updated */ + if (sync_window(fs) != FR_OK) return FR_DISK_ERR; /* Flush disk access window */ + mem_set(fs->win, 0, SS(fs)); /* Clear window buffer */ + for (n = 0, fs->winsect = clust2sect(fs, clst); n < fs->csize; n++, fs->winsect++) { /* Fill the new cluster with 0 */ + fs->wflag = 1; + if (sync_window(fs) != FR_OK) return FR_DISK_ERR; + } + fs->winsect -= n; /* Restore window offset */ +#else + if (!stretch) dp->sect = 0; /* (this line is to suppress compiler warning) */ + dp->sect = 0; return FR_NO_FILE; /* Report EOT */ +#endif + } + dp->clust = clst; /* Initialize data for new cluster */ + dp->sect = clust2sect(fs, clst); + } + } + } + dp->dptr = ofs; /* Current entry */ + dp->dir = fs->win + ofs % SS(fs); /* Pointer to the entry in the win[] */ + + return FR_OK; +} + + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Directory handling - Reserve a block of directory entries */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_alloc ( /* FR_OK(0):succeeded, !=0:error */ + DIR* dp, /* Pointer to the directory object */ + UINT nent /* Number of contiguous entries to allocate */ +) +{ + FRESULT res; + UINT n; + FATFS *fs = dp->obj.fs; + + + res = dir_sdi(dp, 0); + if (res == FR_OK) { + n = 0; + do { + res = move_window(fs, dp->sect); + if (res != FR_OK) break; +#if _FS_EXFAT + if ((fs->fs_type == FS_EXFAT) ? (int)((dp->dir[XDIR_Type] & 0x80) == 0) : (int)(dp->dir[DIR_Name] == DDEM || dp->dir[DIR_Name] == 0)) { +#else + if (dp->dir[DIR_Name] == DDEM || dp->dir[DIR_Name] == 0) { +#endif + if (++n == nent) break; /* A block of contiguous free entries is found */ + } else { + n = 0; /* Not a blank entry. Restart to search */ + } + res = dir_next(dp, 1); + } while (res == FR_OK); /* Next entry with table stretch enabled */ + } + + if (res == FR_NO_FILE) res = FR_DENIED; /* No directory entry to allocate */ + return res; +} + +#endif /* !_FS_READONLY */ + + + + +/*-----------------------------------------------------------------------*/ +/* FAT: Directory handling - Load/Store start cluster number */ +/*-----------------------------------------------------------------------*/ + +static +DWORD ld_clust ( /* Returns the top cluster value of the SFN entry */ + FATFS* fs, /* Pointer to the fs object */ + const BYTE* dir /* Pointer to the key entry */ +) +{ + DWORD cl; + + cl = ld_word(dir + DIR_FstClusLO); + if (fs->fs_type == FS_FAT32) { + cl |= (DWORD)ld_word(dir + DIR_FstClusHI) << 16; + } + + return cl; +} + + +#if !_FS_READONLY +static +void st_clust ( + FATFS* fs, /* Pointer to the fs object */ + BYTE* dir, /* Pointer to the key entry */ + DWORD cl /* Value to be set */ +) +{ + st_word(dir + DIR_FstClusLO, (WORD)cl); + if (fs->fs_type == FS_FAT32) { + st_word(dir + DIR_FstClusHI, (WORD)(cl >> 16)); + } +} +#endif + + + +#if _USE_LFN != 0 +/*------------------------------------------------------------------------*/ +/* FAT-LFN: LFN handling */ +/*------------------------------------------------------------------------*/ +static +const BYTE LfnOfs[] = {1,3,5,7,9,14,16,18,20,22,24,28,30}; /* Offset of LFN characters in the directory entry */ + + +/*--------------------------------------------------------*/ +/* FAT-LFN: Compare a part of file name with an LFN entry */ +/*--------------------------------------------------------*/ +static +int cmp_lfn ( /* 1:matched, 0:not matched */ + const WCHAR* lfnbuf, /* Pointer to the LFN working buffer to be compared */ + BYTE* dir /* Pointer to the directory entry containing the part of LFN */ +) +{ + UINT i, s; + WCHAR wc, uc; + + + if (ld_word(dir + LDIR_FstClusLO) != 0) return 0; /* Check LDIR_FstClusLO */ + + i = ((dir[LDIR_Ord] & 0x3F) - 1) * 13; /* Offset in the LFN buffer */ + + for (wc = 1, s = 0; s < 13; s++) { /* Process all characters in the entry */ + uc = ld_word(dir + LfnOfs[s]); /* Pick an LFN character */ + if (wc) { + if (i >= _MAX_LFN || ff_wtoupper(uc) != ff_wtoupper(lfnbuf[i++])) { /* Compare it */ + return 0; /* Not matched */ + } + wc = uc; + } else { + if (uc != 0xFFFF) return 0; /* Check filler */ + } + } + + if ((dir[LDIR_Ord] & LLEF) && wc && lfnbuf[i]) return 0; /* Last segment matched but different length */ + + return 1; /* The part of LFN matched */ +} + + +#if _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 || _USE_LABEL || _FS_EXFAT +/*-----------------------------------------------------*/ +/* FAT-LFN: Pick a part of file name from an LFN entry */ +/*-----------------------------------------------------*/ +static +int pick_lfn ( /* 1:succeeded, 0:buffer overflow or invalid LFN entry */ + WCHAR* lfnbuf, /* Pointer to the LFN working buffer */ + BYTE* dir /* Pointer to the LFN entry */ +) +{ + UINT i, s; + WCHAR wc, uc; + + + if (ld_word(dir + LDIR_FstClusLO) != 0) return 0; /* Check LDIR_FstClusLO is 0 */ + + i = ((dir[LDIR_Ord] & ~LLEF) - 1) * 13; /* Offset in the LFN buffer */ + + for (wc = 1, s = 0; s < 13; s++) { /* Process all characters in the entry */ + uc = ld_word(dir + LfnOfs[s]); /* Pick an LFN character */ + if (wc) { + if (i >= _MAX_LFN) return 0; /* Buffer overflow? */ + lfnbuf[i++] = wc = uc; /* Store it */ + } else { + if (uc != 0xFFFF) return 0; /* Check filler */ + } + } + + if (dir[LDIR_Ord] & LLEF) { /* Put terminator if it is the last LFN part */ + if (i >= _MAX_LFN) return 0; /* Buffer overflow? */ + lfnbuf[i] = 0; + } + + return 1; /* The part of LFN is valid */ +} +#endif + + +#if !_FS_READONLY +/*-----------------------------------------*/ +/* FAT-LFN: Create an entry of LFN entries */ +/*-----------------------------------------*/ +static +void put_lfn ( + const WCHAR* lfn, /* Pointer to the LFN */ + BYTE* dir, /* Pointer to the LFN entry to be created */ + BYTE ord, /* LFN order (1-20) */ + BYTE sum /* Checksum of the corresponding SFN */ +) +{ + UINT i, s; + WCHAR wc; + + + dir[LDIR_Chksum] = sum; /* Set checksum */ + dir[LDIR_Attr] = AM_LFN; /* Set attribute. LFN entry */ + dir[LDIR_Type] = 0; + st_word(dir + LDIR_FstClusLO, 0); + + i = (ord - 1) * 13; /* Get offset in the LFN working buffer */ + s = wc = 0; + do { + if (wc != 0xFFFF) wc = lfn[i++]; /* Get an effective character */ + st_word(dir + LfnOfs[s], wc); /* Put it */ + if (wc == 0) wc = 0xFFFF; /* Padding characters for left locations */ + } while (++s < 13); + if (wc == 0xFFFF || !lfn[i]) ord |= LLEF; /* Last LFN part is the start of LFN sequence */ + dir[LDIR_Ord] = ord; /* Set the LFN order */ +} + +#endif /* !_FS_READONLY */ +#endif /* _USE_LFN != 0 */ + + + +#if _USE_LFN != 0 && !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* FAT-LFN: Create a Numbered SFN */ +/*-----------------------------------------------------------------------*/ + +static +void gen_numname ( + BYTE* dst, /* Pointer to the buffer to store numbered SFN */ + const BYTE* src, /* Pointer to SFN */ + const WCHAR* lfn, /* Pointer to LFN */ + UINT seq /* Sequence number */ +) +{ + BYTE ns[8], c; + UINT i, j; + WCHAR wc; + DWORD sr; + + + mem_cpy(dst, src, 11); + + if (seq > 5) { /* In case of many collisions, generate a hash number instead of sequential number */ + sr = seq; + while (*lfn) { /* Create a CRC */ + wc = *lfn++; + for (i = 0; i < 16; i++) { + sr = (sr << 1) + (wc & 1); + wc >>= 1; + if (sr & 0x10000) sr ^= 0x11021; + } + } + seq = (UINT)sr; + } + + /* itoa (hexdecimal) */ + i = 7; + do { + c = (BYTE)((seq % 16) + '0'); + if (c > '9') c += 7; + ns[i--] = c; + seq /= 16; + } while (seq); + ns[i] = '~'; + + /* Append the number */ + for (j = 0; j < i && dst[j] != ' '; j++) { + if (IsDBCS1(dst[j])) { + if (j == i - 1) break; + j++; + } + } + do { + dst[j++] = (i < 8) ? ns[i++] : ' '; + } while (j < 8); +} +#endif /* _USE_LFN != 0 && !_FS_READONLY */ + + + +#if _USE_LFN != 0 +/*-----------------------------------------------------------------------*/ +/* FAT-LFN: Calculate checksum of an SFN entry */ +/*-----------------------------------------------------------------------*/ + +static +BYTE sum_sfn ( + const BYTE* dir /* Pointer to the SFN entry */ +) +{ + BYTE sum = 0; + UINT n = 11; + + do { + sum = (sum >> 1) + (sum << 7) + *dir++; + } while (--n); + return sum; +} + +#endif /* _USE_LFN != 0 */ + + + +#if _FS_EXFAT +/*-----------------------------------------------------------------------*/ +/* exFAT: Checksum */ +/*-----------------------------------------------------------------------*/ + +static +WORD xdir_sum ( /* Get checksum of the directoly block */ + const BYTE* dir /* Directory entry block to be calculated */ +) +{ + UINT i, szblk; + WORD sum; + + + szblk = (dir[XDIR_NumSec] + 1) * SZDIRE; + for (i = sum = 0; i < szblk; i++) { + if (i == XDIR_SetSum) { /* Skip sum field */ + i++; + } else { + sum = ((sum & 1) ? 0x8000 : 0) + (sum >> 1) + dir[i]; + } + } + return sum; +} + + + +static +WORD xname_sum ( /* Get check sum (to be used as hash) of the name */ + const WCHAR* name /* File name to be calculated */ +) +{ + WCHAR chr; + WORD sum = 0; + + + while ((chr = *name++) != 0) { + chr = ff_wtoupper(chr); /* File name needs to be ignored case */ + sum = ((sum & 1) ? 0x8000 : 0) + (sum >> 1) + (chr & 0xFF); + sum = ((sum & 1) ? 0x8000 : 0) + (sum >> 1) + (chr >> 8); + } + return sum; +} + + +#if !_FS_READONLY && _USE_MKFS +static +DWORD xsum32 ( + BYTE dat, /* Data to be sumed */ + DWORD sum /* Previous value */ +) +{ + sum = ((sum & 1) ? 0x80000000 : 0) + (sum >> 1) + dat; + return sum; +} +#endif + + +#if _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 +/*------------------------------------------------------*/ +/* exFAT: Get object information from a directory block */ +/*------------------------------------------------------*/ + +static +void get_xdir_info ( + BYTE* dirb, /* Pointer to the direcotry entry block 85+C0+C1s */ + FILINFO* fno /* Buffer to store the extracted file information */ +) +{ + UINT di, si; + WCHAR w; +#if !_LFN_UNICODE + UINT nc; +#endif + + /* Get file name */ + di = 0; +#if _LFN_UNICODE + for (si = SZDIRE * 2; di < dirb[XDIR_NumName]; si += 2, di++) { + if ((si % SZDIRE) == 0) si += 2; /* Skip entry type field */ + w = ld_word(dirb + si); /* Get a character */ + if (di >= _MAX_LFN) { di = 0; break; } /* Buffer overflow --> inaccessible object name */ + fno->fname[di] = w; /* Store it */ + } +#else + for (si = SZDIRE * 2, nc = 0; nc < dirb[XDIR_NumName]; si += 2, nc++) { + if ((si % SZDIRE) == 0) si += 2; /* Skip entry type field */ + w = ff_convert(ld_word(dirb + si), 0); /* Get a character and Unicode -> OEM */ + if (_DF1S && w >= 0x100) { /* Is it a double byte char? (always false at SBCS cfg) */ + fno->fname[di++] = (char)(w >> 8); /* Put 1st byte of the DBC */ + } + if (w == 0 || di >= _MAX_LFN) { di = 0; break; } /* Invalid char or buffer overflow --> inaccessible object name */ + fno->fname[di++] = (char)w; + } +#endif + if (di == 0) fno->fname[di++] = '?'; /* Inaccessible object name? */ + fno->fname[di] = 0; /* Terminate file name */ + + fno->altname[0] = 0; /* No SFN */ + fno->fattrib = dirb[XDIR_Attr]; /* Attribute */ + fno->fsize = (fno->fattrib & AM_DIR) ? 0 : ld_qword(dirb + XDIR_FileSize); /* Size */ + fno->ftime = ld_word(dirb + XDIR_ModTime + 0); /* Time */ + fno->fdate = ld_word(dirb + XDIR_ModTime + 2); /* Date */ +} + +#endif /* _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 */ + + +/*-----------------------------------*/ +/* exFAT: Get a directry entry block */ +/*-----------------------------------*/ + +static +FRESULT load_xdir ( /* FR_INT_ERR: invalid entry block */ + DIR* dp /* Pointer to the reading direcotry object pointing the 85 entry */ +) +{ + FRESULT res; + UINT i, sz_ent; + BYTE* dirb = dp->obj.fs->dirbuf; /* Pointer to the on-memory direcotry entry block 85+C0+C1s */ + + + /* Load 85 entry */ + res = move_window(dp->obj.fs, dp->sect); + if (res != FR_OK) return res; + if (dp->dir[XDIR_Type] != 0x85) return FR_INT_ERR; + mem_cpy(dirb + 0, dp->dir, SZDIRE); + sz_ent = (dirb[XDIR_NumSec] + 1) * SZDIRE; + if (sz_ent < 3 * SZDIRE || sz_ent > 19 * SZDIRE) return FR_INT_ERR; + + /* Load C0 entry */ + res = dir_next(dp, 0); + if (res != FR_OK) return res; + res = move_window(dp->obj.fs, dp->sect); + if (res != FR_OK) return res; + if (dp->dir[XDIR_Type] != 0xC0) return FR_INT_ERR; + mem_cpy(dirb + SZDIRE, dp->dir, SZDIRE); + if (MAXDIRB(dirb[XDIR_NumName]) > sz_ent) return FR_INT_ERR; + + /* Load C1 entries */ + i = SZDIRE * 2; /* C1 offset */ + do { + res = dir_next(dp, 0); + if (res != FR_OK) return res; + res = move_window(dp->obj.fs, dp->sect); + if (res != FR_OK) return res; + if (dp->dir[XDIR_Type] != 0xC1) return FR_INT_ERR; + if (i < MAXDIRB(_MAX_LFN)) mem_cpy(dirb + i, dp->dir, SZDIRE); + } while ((i += SZDIRE) < sz_ent); + + /* Sanity check (do it when accessible object name) */ + if (i <= MAXDIRB(_MAX_LFN)) { + if (xdir_sum(dirb) != ld_word(dirb + XDIR_SetSum)) return FR_INT_ERR; + } + return FR_OK; +} + + +#if !_FS_READONLY || _FS_RPATH != 0 +/*------------------------------------------------*/ +/* exFAT: Load the object's directory entry block */ +/*------------------------------------------------*/ +static +FRESULT load_obj_dir ( + DIR* dp, /* Blank directory object to be used to access containing direcotry */ + const _FDID* obj /* Object with its containing directory information */ +) +{ + FRESULT res; + + /* Open object containing directory */ + dp->obj.fs = obj->fs; + dp->obj.sclust = obj->c_scl; + dp->obj.stat = (BYTE)obj->c_size; + dp->obj.objsize = obj->c_size & 0xFFFFFF00; + dp->blk_ofs = obj->c_ofs; + + res = dir_sdi(dp, dp->blk_ofs); /* Goto object's entry block */ + if (res == FR_OK) { + res = load_xdir(dp); /* Load the object's entry block */ + } + return res; +} +#endif + + +#if !_FS_READONLY +/*-----------------------------------------------*/ +/* exFAT: Store the directory block to the media */ +/*-----------------------------------------------*/ +static +FRESULT store_xdir ( + DIR* dp /* Pointer to the direcotry object */ +) +{ + FRESULT res; + UINT nent; + BYTE* dirb = dp->obj.fs->dirbuf; /* Pointer to the direcotry entry block 85+C0+C1s */ + + /* Create set sum */ + st_word(dirb + XDIR_SetSum, xdir_sum(dirb)); + nent = dirb[XDIR_NumSec] + 1; + + /* Store the set of directory to the volume */ + res = dir_sdi(dp, dp->blk_ofs); + while (res == FR_OK) { + res = move_window(dp->obj.fs, dp->sect); + if (res != FR_OK) break; + mem_cpy(dp->dir, dirb, SZDIRE); + dp->obj.fs->wflag = 1; + if (--nent == 0) break; + dirb += SZDIRE; + res = dir_next(dp, 0); + } + return (res == FR_OK || res == FR_DISK_ERR) ? res : FR_INT_ERR; +} + + + +/*-------------------------------------------*/ +/* exFAT: Create a new directory enrty block */ +/*-------------------------------------------*/ + +static +void create_xdir ( + BYTE* dirb, /* Pointer to the direcotry entry block buffer */ + const WCHAR* lfn /* Pointer to the nul terminated file name */ +) +{ + UINT i; + BYTE nb, nc; + WCHAR chr; + + + /* Create 85+C0 entry */ + mem_set(dirb, 0, 2 * SZDIRE); + dirb[XDIR_Type] = 0x85; + dirb[XDIR_Type + SZDIRE] = 0xC0; + + /* Create C1 entries */ + nc = 0; nb = 1; chr = 1; i = SZDIRE * 2; + do { + dirb[i++] = 0xC1; dirb[i++] = 0; /* Entry type C1 */ + do { /* Fill name field */ + if (chr && (chr = lfn[nc]) != 0) nc++; /* Get a character if exist */ + st_word(dirb + i, chr); /* Store it */ + } while ((i += 2) % SZDIRE != 0); + nb++; + } while (lfn[nc]); /* Fill next entry if any char follows */ + + dirb[XDIR_NumName] = nc; /* Set name length */ + dirb[XDIR_NumSec] = nb; /* Set block length */ + st_word(dirb + XDIR_NameHash, xname_sum(lfn)); /* Set name hash */ +} + +#endif /* !_FS_READONLY */ +#endif /* _FS_EXFAT */ + + + +#if _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 || _USE_LABEL || _FS_EXFAT +/*-----------------------------------------------------------------------*/ +/* Read an object from the directory */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_read ( + DIR* dp, /* Pointer to the directory object */ + int vol /* Filtered by 0:file/directory or 1:volume label */ +) +{ + FRESULT res = FR_NO_FILE; + FATFS *fs = dp->obj.fs; + BYTE a, c; +#if _USE_LFN != 0 + BYTE ord = 0xFF, sum = 0xFF; +#endif + + while (dp->sect) { + res = move_window(fs, dp->sect); + if (res != FR_OK) break; + c = dp->dir[DIR_Name]; /* Test for the entry type */ + if (c == 0) { + res = FR_NO_FILE; break; /* Reached to end of the directory */ + } +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + if (_USE_LABEL && vol) { + if (c == 0x83) break; /* Volume label entry? */ + } else { + if (c == 0x85) { /* Start of the file entry block? */ + dp->blk_ofs = dp->dptr; /* Get location of the block */ + res = load_xdir(dp); /* Load the entry block */ + if (res == FR_OK) { + dp->obj.attr = fs->dirbuf[XDIR_Attr] & AM_MASK; /* Get attribute */ + } + break; + } + } + } else +#endif + { /* On the FAT12/16/32 volume */ + dp->obj.attr = a = dp->dir[DIR_Attr] & AM_MASK; /* Get attribute */ +#if _USE_LFN != 0 /* LFN configuration */ + if (c == DDEM || c == '.' || (int)((a & ~AM_ARC) == AM_VOL) != vol) { /* An entry without valid data */ + ord = 0xFF; + } else { + if (a == AM_LFN) { /* An LFN entry is found */ + if (c & LLEF) { /* Is it start of an LFN sequence? */ + sum = dp->dir[LDIR_Chksum]; + c &= (BYTE)~LLEF; ord = c; + dp->blk_ofs = dp->dptr; + } + /* Check LFN validity and capture it */ + ord = (c == ord && sum == dp->dir[LDIR_Chksum] && pick_lfn(fs->lfnbuf, dp->dir)) ? ord - 1 : 0xFF; + } else { /* An SFN entry is found */ + if (ord || sum != sum_sfn(dp->dir)) { /* Is there a valid LFN? */ + dp->blk_ofs = 0xFFFFFFFF; /* It has no LFN. */ + } + break; + } + } +#else /* Non LFN configuration */ + if (c != DDEM && c != '.' && a != AM_LFN && (int)((a & ~AM_ARC) == AM_VOL) == vol) { /* Is it a valid entry? */ + break; + } +#endif + } + res = dir_next(dp, 0); /* Next entry */ + if (res != FR_OK) break; + } + + if (res != FR_OK) dp->sect = 0; /* Terminate the read operation on error or EOT */ + return res; +} + +#endif /* _FS_MINIMIZE <= 1 || _USE_LABEL || _FS_RPATH >= 2 */ + + + +/*-----------------------------------------------------------------------*/ +/* Directory handling - Find an object in the directory */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_find ( /* FR_OK(0):succeeded, !=0:error */ + DIR* dp /* Pointer to the directory object with the file name */ +) +{ + FRESULT res; + FATFS *fs = dp->obj.fs; + BYTE c; +#if _USE_LFN != 0 + BYTE a, ord, sum; +#endif + + res = dir_sdi(dp, 0); /* Rewind directory object */ + if (res != FR_OK) return res; +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + BYTE nc; + UINT di, ni; + WORD hash = xname_sum(fs->lfnbuf); /* Hash value of the name to find */ + + while ((res = dir_read(dp, 0)) == FR_OK) { /* Read an item */ +#if _MAX_LFN < 255 + if (fs->dirbuf[XDIR_NumName] > _MAX_LFN) continue; /* Skip comparison if inaccessible object name */ +#endif + if (ld_word(fs->dirbuf + XDIR_NameHash) != hash) continue; /* Skip comparison if hash mismatched */ + for (nc = fs->dirbuf[XDIR_NumName], di = SZDIRE * 2, ni = 0; nc; nc--, di += 2, ni++) { /* Compare the name */ + if ((di % SZDIRE) == 0) di += 2; + if (ff_wtoupper(ld_word(fs->dirbuf + di)) != ff_wtoupper(fs->lfnbuf[ni])) break; + } + if (nc == 0 && !fs->lfnbuf[ni]) break; /* Name matched? */ + } + return res; + } +#endif + /* On the FAT12/16/32 volume */ +#if _USE_LFN != 0 + ord = sum = 0xFF; dp->blk_ofs = 0xFFFFFFFF; /* Reset LFN sequence */ +#endif + do { + res = move_window(fs, dp->sect); + if (res != FR_OK) break; + c = dp->dir[DIR_Name]; + if (c == 0) { res = FR_NO_FILE; break; } /* Reached to end of table */ +#if _USE_LFN != 0 /* LFN configuration */ + dp->obj.attr = a = dp->dir[DIR_Attr] & AM_MASK; + if (c == DDEM || ((a & AM_VOL) && a != AM_LFN)) { /* An entry without valid data */ + ord = 0xFF; dp->blk_ofs = 0xFFFFFFFF; /* Reset LFN sequence */ + } else { + if (a == AM_LFN) { /* An LFN entry is found */ + if (!(dp->fn[NSFLAG] & NS_NOLFN)) { + if (c & LLEF) { /* Is it start of LFN sequence? */ + sum = dp->dir[LDIR_Chksum]; + c &= (BYTE)~LLEF; ord = c; /* LFN start order */ + dp->blk_ofs = dp->dptr; /* Start offset of LFN */ + } + /* Check validity of the LFN entry and compare it with given name */ + ord = (c == ord && sum == dp->dir[LDIR_Chksum] && cmp_lfn(fs->lfnbuf, dp->dir)) ? ord - 1 : 0xFF; + } + } else { /* An SFN entry is found */ + if (!ord && sum == sum_sfn(dp->dir)) break; /* LFN matched? */ + if (!(dp->fn[NSFLAG] & NS_LOSS) && !mem_cmp(dp->dir, dp->fn, 11)) break; /* SFN matched? */ + ord = 0xFF; dp->blk_ofs = 0xFFFFFFFF; /* Reset LFN sequence */ + } + } +#else /* Non LFN configuration */ + dp->obj.attr = dp->dir[DIR_Attr] & AM_MASK; + if (!(dp->dir[DIR_Attr] & AM_VOL) && !mem_cmp(dp->dir, dp->fn, 11)) break; /* Is it a valid entry? */ +#endif + res = dir_next(dp, 0); /* Next entry */ + } while (res == FR_OK); + + return res; +} + + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Register an object to the directory */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_register ( /* FR_OK:succeeded, FR_DENIED:no free entry or too many SFN collision, FR_DISK_ERR:disk error */ + DIR* dp /* Target directory with object name to be created */ +) +{ + FRESULT res; + FATFS *fs = dp->obj.fs; +#if _USE_LFN != 0 /* LFN configuration */ + UINT n, nlen, nent; + BYTE sn[12], sum; + + + if (dp->fn[NSFLAG] & (NS_DOT | NS_NONAME)) return FR_INVALID_NAME; /* Check name validity */ + for (nlen = 0; fs->lfnbuf[nlen]; nlen++) ; /* Get lfn length */ + +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + DIR dj; + + nent = (nlen + 14) / 15 + 2; /* Number of entries to allocate (85+C0+C1s) */ + res = dir_alloc(dp, nent); /* Allocate entries */ + if (res != FR_OK) return res; + dp->blk_ofs = dp->dptr - SZDIRE * (nent - 1); /* Set the allocated entry block offset */ + + if (dp->obj.sclust != 0 && (dp->obj.stat & 4)) { /* Has the sub-directory been stretched? */ + dp->obj.objsize += (DWORD)fs->csize * SS(fs); /* Increase the directory size by cluster size */ + res = fill_first_frag(&dp->obj); /* Fill first fragment on the FAT if needed */ + if (res != FR_OK) return res; + res = fill_last_frag(&dp->obj, dp->clust, 0xFFFFFFFF); /* Fill last fragment on the FAT if needed */ + if (res != FR_OK) return res; + res = load_obj_dir(&dj, &dp->obj); /* Load the object status */ + if (res != FR_OK) return res; + st_qword(fs->dirbuf + XDIR_FileSize, dp->obj.objsize); /* Update the allocation status */ + st_qword(fs->dirbuf + XDIR_ValidFileSize, dp->obj.objsize); + fs->dirbuf[XDIR_GenFlags] = dp->obj.stat | 1; + res = store_xdir(&dj); /* Store the object status */ + if (res != FR_OK) return res; + } + + create_xdir(fs->dirbuf, fs->lfnbuf); /* Create on-memory directory block to be written later */ + return FR_OK; + } +#endif + /* On the FAT12/16/32 volume */ + mem_cpy(sn, dp->fn, 12); + if (sn[NSFLAG] & NS_LOSS) { /* When LFN is out of 8.3 format, generate a numbered name */ + dp->fn[NSFLAG] = NS_NOLFN; /* Find only SFN */ + for (n = 1; n < 100; n++) { + gen_numname(dp->fn, sn, fs->lfnbuf, n); /* Generate a numbered name */ + res = dir_find(dp); /* Check if the name collides with existing SFN */ + if (res != FR_OK) break; + } + if (n == 100) return FR_DENIED; /* Abort if too many collisions */ + if (res != FR_NO_FILE) return res; /* Abort if the result is other than 'not collided' */ + dp->fn[NSFLAG] = sn[NSFLAG]; + } + + /* Create an SFN with/without LFNs. */ + nent = (sn[NSFLAG] & NS_LFN) ? (nlen + 12) / 13 + 1 : 1; /* Number of entries to allocate */ + res = dir_alloc(dp, nent); /* Allocate entries */ + if (res == FR_OK && --nent) { /* Set LFN entry if needed */ + res = dir_sdi(dp, dp->dptr - nent * SZDIRE); + if (res == FR_OK) { + sum = sum_sfn(dp->fn); /* Checksum value of the SFN tied to the LFN */ + do { /* Store LFN entries in bottom first */ + res = move_window(fs, dp->sect); + if (res != FR_OK) break; + put_lfn(fs->lfnbuf, dp->dir, (BYTE)nent, sum); + fs->wflag = 1; + res = dir_next(dp, 0); /* Next entry */ + } while (res == FR_OK && --nent); + } + } + +#else /* Non LFN configuration */ + res = dir_alloc(dp, 1); /* Allocate an entry for SFN */ + +#endif + + /* Set SFN entry */ + if (res == FR_OK) { + res = move_window(fs, dp->sect); + if (res == FR_OK) { + mem_set(dp->dir, 0, SZDIRE); /* Clean the entry */ + mem_cpy(dp->dir + DIR_Name, dp->fn, 11); /* Put SFN */ +#if _USE_LFN != 0 + dp->dir[DIR_NTres] = dp->fn[NSFLAG] & (NS_BODY | NS_EXT); /* Put NT flag */ +#endif + fs->wflag = 1; + } + } + + return res; +} + +#endif /* !_FS_READONLY */ + + + +#if !_FS_READONLY && _FS_MINIMIZE == 0 +/*-----------------------------------------------------------------------*/ +/* Remove an object from the directory */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT dir_remove ( /* FR_OK:Succeeded, FR_DISK_ERR:A disk error */ + DIR* dp /* Directory object pointing the entry to be removed */ +) +{ + FRESULT res; + FATFS *fs = dp->obj.fs; +#if _USE_LFN != 0 /* LFN configuration */ + DWORD last = dp->dptr; + + res = (dp->blk_ofs == 0xFFFFFFFF) ? FR_OK : dir_sdi(dp, dp->blk_ofs); /* Goto top of the entry block if LFN is exist */ + if (res == FR_OK) { + do { + res = move_window(fs, dp->sect); + if (res != FR_OK) break; + /* Mark an entry 'deleted' */ + if (_FS_EXFAT && fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + dp->dir[XDIR_Type] &= 0x7F; + } else { /* On the FAT12/16/32 volume */ + dp->dir[DIR_Name] = DDEM; + } + fs->wflag = 1; + if (dp->dptr >= last) break; /* If reached last entry then all entries of the object has been deleted. */ + res = dir_next(dp, 0); /* Next entry */ + } while (res == FR_OK); + if (res == FR_NO_FILE) res = FR_INT_ERR; + } +#else /* Non LFN configuration */ + + res = move_window(fs, dp->sect); + if (res == FR_OK) { + dp->dir[DIR_Name] = DDEM; + fs->wflag = 1; + } +#endif + + return res; +} + +#endif /* !_FS_READONLY && _FS_MINIMIZE == 0 */ + + + +#if _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 +/*-----------------------------------------------------------------------*/ +/* Get file information from directory entry */ +/*-----------------------------------------------------------------------*/ + +static +void get_fileinfo ( /* No return code */ + DIR* dp, /* Pointer to the directory object */ + FILINFO* fno /* Pointer to the file information to be filled */ +) +{ + UINT i, j; + TCHAR c; + DWORD tm; +#if _USE_LFN != 0 + WCHAR w, lfv; + FATFS *fs = dp->obj.fs; +#endif + + + fno->fname[0] = 0; /* Invaidate file info */ + if (!dp->sect) return; /* Exit if read pointer has reached end of directory */ + +#if _USE_LFN != 0 /* LFN configuration */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + get_xdir_info(fs->dirbuf, fno); + return; + } else +#endif + { /* On the FAT12/16/32 volume */ + if (dp->blk_ofs != 0xFFFFFFFF) { /* Get LFN if available */ + i = j = 0; + while ((w = fs->lfnbuf[j++]) != 0) { /* Get an LFN character */ +#if !_LFN_UNICODE + w = ff_convert(w, 0); /* Unicode -> OEM */ + if (w == 0) { i = 0; break; } /* No LFN if it could not be converted */ + if (_DF1S && w >= 0x100) { /* Put 1st byte if it is a DBC (always false at SBCS cfg) */ + fno->fname[i++] = (char)(w >> 8); + } +#endif + if (i >= _MAX_LFN) { i = 0; break; } /* No LFN if buffer overflow */ + fno->fname[i++] = (TCHAR)w; + } + fno->fname[i] = 0; /* Terminate the LFN */ + } + } + + i = j = 0; + lfv = fno->fname[i]; /* LFN is exist if non-zero */ + while (i < 11) { /* Copy name body and extension */ + c = (TCHAR)dp->dir[i++]; + if (c == ' ') continue; /* Skip padding spaces */ + if (c == RDDEM) c = (TCHAR)DDEM; /* Restore replaced DDEM character */ + if (i == 9) { /* Insert a . if extension is exist */ + if (!lfv) fno->fname[j] = '.'; + fno->altname[j++] = '.'; + } +#if _LFN_UNICODE + if (IsDBCS1(c) && i != 8 && i != 11 && IsDBCS2(dp->dir[i])) { + c = c << 8 | dp->dir[i++]; + } + c = ff_convert(c, 1); /* OEM -> Unicode */ + if (!c) c = '?'; +#endif + fno->altname[j] = c; + if (!lfv) { + if (IsUpper(c) && (dp->dir[DIR_NTres] & ((i >= 9) ? NS_EXT : NS_BODY))) { + c += 0x20; /* To lower */ + } + fno->fname[j] = c; + } + j++; + } + if (!lfv) { + fno->fname[j] = 0; + if (!dp->dir[DIR_NTres]) j = 0; /* Altname is no longer needed if neither LFN nor case info is exist. */ + } + fno->altname[j] = 0; /* Terminate the SFN */ + +#else /* Non-LFN configuration */ + i = j = 0; + while (i < 11) { /* Copy name body and extension */ + c = (TCHAR)dp->dir[i++]; + if (c == ' ') continue; /* Skip padding spaces */ + if (c == RDDEM) c = (TCHAR)DDEM; /* Restore replaced DDEM character */ + if (i == 9) fno->fname[j++] = '.'; /* Insert a . if extension is exist */ + fno->fname[j++] = c; + } + fno->fname[j] = 0; +#endif + + fno->fattrib = dp->dir[DIR_Attr]; /* Attribute */ + fno->fsize = ld_dword(dp->dir + DIR_FileSize); /* Size */ + tm = ld_dword(dp->dir + DIR_ModTime); /* Timestamp */ + fno->ftime = (WORD)tm; fno->fdate = (WORD)(tm >> 16); +} + +#endif /* _FS_MINIMIZE <= 1 || _FS_RPATH >= 2 */ + + + +#if _USE_FIND && _FS_MINIMIZE <= 1 +/*-----------------------------------------------------------------------*/ +/* Pattern matching */ +/*-----------------------------------------------------------------------*/ + +static +WCHAR get_achar ( /* Get a character and advances ptr 1 or 2 */ + const TCHAR** ptr /* Pointer to pointer to the SBCS/DBCS/Unicode string */ +) +{ +#if !_LFN_UNICODE + WCHAR chr; + + chr = (BYTE)*(*ptr)++; /* Get a byte */ + if (IsLower(chr)) chr -= 0x20; /* To upper ASCII char */ +#ifdef _EXCVT + if (chr >= 0x80) chr = ExCvt[chr - 0x80]; /* To upper SBCS extended char */ +#else + if (IsDBCS1(chr) && IsDBCS2(**ptr)) { /* Get DBC 2nd byte if needed */ + chr = chr << 8 | (BYTE)*(*ptr)++; + } +#endif + return chr; +#else + return ff_wtoupper(*(*ptr)++); /* Get a word and to upper */ +#endif +} + + +static +int pattern_matching ( /* 0:not matched, 1:matched */ + const TCHAR* pat, /* Matching pattern */ + const TCHAR* nam, /* String to be tested */ + int skip, /* Number of pre-skip chars (number of ?s) */ + int inf /* Infinite search (* specified) */ +) +{ + const TCHAR *pp, *np; + WCHAR pc, nc; + int nm, nx; + + + while (skip--) { /* Pre-skip name chars */ + if (!get_achar(&nam)) return 0; /* Branch mismatched if less name chars */ + } + if (!*pat && inf) return 1; /* (short circuit) */ + + do { + pp = pat; np = nam; /* Top of pattern and name to match */ + for (;;) { + if (*pp == '?' || *pp == '*') { /* Wildcard? */ + nm = nx = 0; + do { /* Analyze the wildcard chars */ + if (*pp++ == '?') nm++; else nx = 1; + } while (*pp == '?' || *pp == '*'); + if (pattern_matching(pp, np, nm, nx)) return 1; /* Test new branch (recurs upto number of wildcard blocks in the pattern) */ + nc = *np; break; /* Branch mismatched */ + } + pc = get_achar(&pp); /* Get a pattern char */ + nc = get_achar(&np); /* Get a name char */ + if (pc != nc) break; /* Branch mismatched? */ + if (pc == 0) return 1; /* Branch matched? (matched at end of both strings) */ + } + get_achar(&nam); /* nam++ */ + } while (inf && nc); /* Retry until end of name if infinite search is specified */ + + return 0; +} + +#endif /* _USE_FIND && _FS_MINIMIZE <= 1 */ + + + +/*-----------------------------------------------------------------------*/ +/* Pick a top segment and create the object name in directory form */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT create_name ( /* FR_OK: successful, FR_INVALID_NAME: could not create */ + DIR* dp, /* Pointer to the directory object */ + const TCHAR** path /* Pointer to pointer to the segment in the path string */ +) +{ +#if _USE_LFN != 0 /* LFN configuration */ + BYTE b, cf; + WCHAR w, *lfn; + UINT i, ni, si, di; + const TCHAR *p; + + /* Create LFN in Unicode */ + p = *path; lfn = dp->obj.fs->lfnbuf; si = di = 0; + for (;;) { + w = p[si++]; /* Get a character */ + if (w < ' ') break; /* Break if end of the path name */ + if (w == '/' || w == '\\') { /* Break if a separator is found */ + while (p[si] == '/' || p[si] == '\\') si++; /* Skip duplicated separator if exist */ + break; + } + if (di >= _MAX_LFN) return FR_INVALID_NAME; /* Reject too long name */ +#if !_LFN_UNICODE + w &= 0xFF; + if (IsDBCS1(w)) { /* Check if it is a DBC 1st byte (always false on SBCS cfg) */ + b = (BYTE)p[si++]; /* Get 2nd byte */ + w = (w << 8) + b; /* Create a DBC */ + if (!IsDBCS2(b)) return FR_INVALID_NAME; /* Reject invalid sequence */ + } + w = ff_convert(w, 1); /* Convert ANSI/OEM to Unicode */ + if (!w) return FR_INVALID_NAME; /* Reject invalid code */ +#endif + if (w < 0x80 && chk_chr("\"*:<>\?|\x7F", w)) return FR_INVALID_NAME; /* Reject illegal characters for LFN */ + lfn[di++] = w; /* Store the Unicode character */ + } + *path = &p[si]; /* Return pointer to the next segment */ + cf = (w < ' ') ? NS_LAST : 0; /* Set last segment flag if end of the path */ +#if _FS_RPATH != 0 + if ((di == 1 && lfn[di - 1] == '.') || + (di == 2 && lfn[di - 1] == '.' && lfn[di - 2] == '.')) { /* Is this segment a dot name? */ + lfn[di] = 0; + for (i = 0; i < 11; i++) /* Create dot name for SFN entry */ + dp->fn[i] = (i < di) ? '.' : ' '; + dp->fn[i] = cf | NS_DOT; /* This is a dot entry */ + return FR_OK; + } +#endif + while (di) { /* Snip off trailing spaces and dots if exist */ + w = lfn[di - 1]; + if (w != ' ' && w != '.') break; + di--; + } + lfn[di] = 0; /* LFN is created */ + if (di == 0) return FR_INVALID_NAME; /* Reject nul name */ + + /* Create SFN in directory form */ + mem_set(dp->fn, ' ', 11); + for (si = 0; lfn[si] == ' ' || lfn[si] == '.'; si++) ; /* Strip leading spaces and dots */ + if (si) cf |= NS_LOSS | NS_LFN; + while (di && lfn[di - 1] != '.') di--; /* Find extension (di<=si: no extension) */ + + i = b = 0; ni = 8; + for (;;) { + w = lfn[si++]; /* Get an LFN character */ + if (!w) break; /* Break on end of the LFN */ + if (w == ' ' || (w == '.' && si != di)) { /* Remove spaces and dots */ + cf |= NS_LOSS | NS_LFN; continue; + } + + if (i >= ni || si == di) { /* Extension or end of SFN */ + if (ni == 11) { /* Long extension */ + cf |= NS_LOSS | NS_LFN; break; + } + if (si != di) cf |= NS_LOSS | NS_LFN; /* Out of 8.3 format */ + if (si > di) break; /* No extension */ + si = di; i = 8; ni = 11; /* Enter extension section */ + b <<= 2; continue; + } + + if (w >= 0x80) { /* Non ASCII character */ +#ifdef _EXCVT + w = ff_convert(w, 0); /* Unicode -> OEM code */ + if (w) w = ExCvt[w - 0x80]; /* Convert extended character to upper (SBCS) */ +#else + w = ff_convert(ff_wtoupper(w), 0); /* Upper converted Unicode -> OEM code */ +#endif + cf |= NS_LFN; /* Force create LFN entry */ + } + + if (_DF1S && w >= 0x100) { /* Is this DBC? (always false at SBCS cfg) */ + if (i >= ni - 1) { + cf |= NS_LOSS | NS_LFN; i = ni; continue; + } + dp->fn[i++] = (BYTE)(w >> 8); + } else { /* SBC */ + if (!w || chk_chr("+,;=[]", w)) { /* Replace illegal characters for SFN */ + w = '_'; cf |= NS_LOSS | NS_LFN;/* Lossy conversion */ + } else { + if (IsUpper(w)) { /* ASCII large capital */ + b |= 2; + } else { + if (IsLower(w)) { /* ASCII small capital */ + b |= 1; w -= 0x20; + } + } + } + } + dp->fn[i++] = (BYTE)w; + } + + if (dp->fn[0] == DDEM) dp->fn[0] = RDDEM; /* If the first character collides with DDEM, replace it with RDDEM */ + + if (ni == 8) b <<= 2; + if ((b & 0x0C) == 0x0C || (b & 0x03) == 0x03) cf |= NS_LFN; /* Create LFN entry when there are composite capitals */ + if (!(cf & NS_LFN)) { /* When LFN is in 8.3 format without extended character, NT flags are created */ + if ((b & 0x03) == 0x01) cf |= NS_EXT; /* NT flag (Extension has only small capital) */ + if ((b & 0x0C) == 0x04) cf |= NS_BODY; /* NT flag (Filename has only small capital) */ + } + + dp->fn[NSFLAG] = cf; /* SFN is created */ + + return FR_OK; + + +#else /* _USE_LFN != 0 : Non-LFN configuration */ + BYTE c, d, *sfn; + UINT ni, si, i; + const char *p; + + /* Create file name in directory form */ + p = *path; sfn = dp->fn; + mem_set(sfn, ' ', 11); + si = i = 0; ni = 8; +#if _FS_RPATH != 0 + if (p[si] == '.') { /* Is this a dot entry? */ + for (;;) { + c = (BYTE)p[si++]; + if (c != '.' || si >= 3) break; + sfn[i++] = c; + } + if (c != '/' && c != '\\' && c > ' ') return FR_INVALID_NAME; + *path = p + si; /* Return pointer to the next segment */ + sfn[NSFLAG] = (c <= ' ') ? NS_LAST | NS_DOT : NS_DOT; /* Set last segment flag if end of the path */ + return FR_OK; + } +#endif + for (;;) { + c = (BYTE)p[si++]; + if (c <= ' ') break; /* Break if end of the path name */ + if (c == '/' || c == '\\') { /* Break if a separator is found */ + while (p[si] == '/' || p[si] == '\\') si++; /* Skip duplicated separator if exist */ + break; + } + if (c == '.' || i >= ni) { /* End of body or over size? */ + if (ni == 11 || c != '.') return FR_INVALID_NAME; /* Over size or invalid dot */ + i = 8; ni = 11; /* Goto extension */ + continue; + } + if (c >= 0x80) { /* Extended character? */ +#ifdef _EXCVT + c = ExCvt[c - 0x80]; /* To upper extended characters (SBCS cfg) */ +#else +#if !_DF1S + return FR_INVALID_NAME; /* Reject extended characters (ASCII only cfg) */ +#endif +#endif + } + if (IsDBCS1(c)) { /* Check if it is a DBC 1st byte (always false at SBCS cfg.) */ + d = (BYTE)p[si++]; /* Get 2nd byte */ + if (!IsDBCS2(d) || i >= ni - 1) return FR_INVALID_NAME; /* Reject invalid DBC */ + sfn[i++] = c; + sfn[i++] = d; + } else { /* SBC */ + if (chk_chr("\"*+,:;<=>\?[]|\x7F", c)) return FR_INVALID_NAME; /* Reject illegal chrs for SFN */ + if (IsLower(c)) c -= 0x20; /* To upper */ + sfn[i++] = c; + } + } + *path = p + si; /* Return pointer to the next segment */ + if (i == 0) return FR_INVALID_NAME; /* Reject nul string */ + + if (sfn[0] == DDEM) sfn[0] = RDDEM; /* If the first character collides with DDEM, replace it with RDDEM */ + sfn[NSFLAG] = (c <= ' ') ? NS_LAST : 0; /* Set last segment flag if end of the path */ + + return FR_OK; +#endif /* _USE_LFN != 0 */ +} + + + + +/*-----------------------------------------------------------------------*/ +/* Follow a file path */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT follow_path ( /* FR_OK(0): successful, !=0: error code */ + DIR* dp, /* Directory object to return last directory and found object */ + const TCHAR* path /* Full-path string to find a file or directory */ +) +{ + FRESULT res; + BYTE ns; + _FDID *obj = &dp->obj; + FATFS *fs = obj->fs; + + +#if _FS_RPATH != 0 + if (*path != '/' && *path != '\\') { /* Without heading separator */ + obj->sclust = fs->cdir; /* Start from current directory */ + } else +#endif + { /* With heading separator */ + while (*path == '/' || *path == '\\') path++; /* Strip heading separator */ + obj->sclust = 0; /* Start from root directory */ + } +#if _FS_EXFAT + obj->n_frag = 0; /* Invalidate last fragment counter of the object */ +#if _FS_RPATH != 0 + if (fs->fs_type == FS_EXFAT && obj->sclust) { /* Retrieve the sub-directory status if needed */ + DIR dj; + + obj->c_scl = fs->cdc_scl; + obj->c_size = fs->cdc_size; + obj->c_ofs = fs->cdc_ofs; + res = load_obj_dir(&dj, obj); + if (res != FR_OK) return res; + obj->objsize = ld_dword(fs->dirbuf + XDIR_FileSize); + obj->stat = fs->dirbuf[XDIR_GenFlags] & 2; + } +#endif +#endif + + if ((UINT)*path < ' ') { /* Null path name is the origin directory itself */ + dp->fn[NSFLAG] = NS_NONAME; + res = dir_sdi(dp, 0); + + } else { /* Follow path */ + for (;;) { + res = create_name(dp, &path); /* Get a segment name of the path */ + if (res != FR_OK) break; + res = dir_find(dp); /* Find an object with the segment name */ + ns = dp->fn[NSFLAG]; + if (res != FR_OK) { /* Failed to find the object */ + if (res == FR_NO_FILE) { /* Object is not found */ + if (_FS_RPATH && (ns & NS_DOT)) { /* If dot entry is not exist, stay there */ + if (!(ns & NS_LAST)) continue; /* Continue to follow if not last segment */ + dp->fn[NSFLAG] = NS_NONAME; + res = FR_OK; + } else { /* Could not find the object */ + if (!(ns & NS_LAST)) res = FR_NO_PATH; /* Adjust error code if not last segment */ + } + } + break; + } + if (ns & NS_LAST) break; /* Last segment matched. Function completed. */ + /* Get into the sub-directory */ + if (!(obj->attr & AM_DIR)) { /* It is not a sub-directory and cannot follow */ + res = FR_NO_PATH; break; + } +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* Save containing directory information for next dir */ + obj->c_scl = obj->sclust; + obj->c_size = ((DWORD)obj->objsize & 0xFFFFFF00) | obj->stat; + obj->c_ofs = dp->blk_ofs; + obj->sclust = ld_dword(fs->dirbuf + XDIR_FstClus); /* Open next directory */ + obj->stat = fs->dirbuf[XDIR_GenFlags] & 2; + obj->objsize = ld_qword(fs->dirbuf + XDIR_FileSize); + } else +#endif + { + obj->sclust = ld_clust(fs, fs->win + dp->dptr % SS(fs)); /* Open next directory */ + } + } + } + + return res; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Get logical drive number from path name */ +/*-----------------------------------------------------------------------*/ + +static +int get_ldnumber ( /* Returns logical drive number (-1:invalid drive) */ + const TCHAR** path /* Pointer to pointer to the path name */ +) +{ + const TCHAR *tp, *tt; + UINT i; + int vol = -1; +#if _STR_VOLUME_ID /* Find string drive id */ + static const char* const volid[] = {_VOLUME_STRS}; + const char *sp; + char c; + TCHAR tc; +#endif + + + if (*path) { /* If the pointer is not a null */ + for (tt = *path; (UINT)*tt >= (_USE_LFN ? ' ' : '!') && *tt != ':'; tt++) ; /* Find ':' in the path */ + if (*tt == ':') { /* If a ':' is exist in the path name */ + tp = *path; + i = *tp++ - '0'; + if (i < 10 && tp == tt) { /* Is there a numeric drive id? */ + if (i < _VOLUMES) { /* If a drive id is found, get the value and strip it */ + vol = (int)i; + *path = ++tt; + } + } +#if _STR_VOLUME_ID + else { /* No numeric drive number, find string drive id */ + i = 0; tt++; + do { + sp = volid[i]; tp = *path; + do { /* Compare a string drive id with path name */ + c = *sp++; tc = *tp++; + if (IsLower(tc)) tc -= 0x20; + } while (c && (TCHAR)c == tc); + } while ((c || tp != tt) && ++i < _VOLUMES); /* Repeat for each id until pattern match */ + if (i < _VOLUMES) { /* If a drive id is found, get the value and strip it */ + vol = (int)i; + *path = tt; + } + } +#endif + return vol; + } +#if _FS_RPATH != 0 && _VOLUMES >= 2 + vol = CurrVol; /* Current drive */ +#else + vol = 0; /* Drive 0 */ +#endif + } + return vol; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Load a sector and check if it is an FAT boot sector */ +/*-----------------------------------------------------------------------*/ + +static +BYTE check_fs ( /* 0:FAT, 1:exFAT, 2:Valid BS but not FAT, 3:Not a BS, 4:Disk error */ + FATFS* fs, /* File system object */ + DWORD sect /* Sector# (lba) to load and check if it is an FAT-VBR or not */ +) +{ + fs->wflag = 0; fs->winsect = 0xFFFFFFFF; /* Invaidate window */ + if (move_window(fs, sect) != FR_OK) return 4; /* Load boot record */ + + if (ld_word(fs->win + BS_55AA) != 0xAA55) return 3; /* Check boot record signature (always placed here even if the sector size is >512) */ + + if (fs->win[BS_JmpBoot] == 0xE9 || (fs->win[BS_JmpBoot] == 0xEB && fs->win[BS_JmpBoot + 2] == 0x90)) { + if ((ld_dword(fs->win + BS_FilSysType) & 0xFFFFFF) == 0x544146) return 0; /* Check "FAT" string */ + if (ld_dword(fs->win + BS_FilSysType32) == 0x33544146) return 0; /* Check "FAT3" string */ + } +#if _FS_EXFAT + if (!mem_cmp(fs->win + BS_JmpBoot, "\xEB\x76\x90" "EXFAT ", 11)) return 1; +#endif + return 2; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Find logical drive and check if the volume is mounted */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT find_volume ( /* FR_OK(0): successful, !=0: any error occurred */ + const TCHAR** path, /* Pointer to pointer to the path name (drive number) */ + FATFS** rfs, /* Pointer to pointer to the found file system object */ + BYTE mode /* !=0: Check write protection for write access */ +) +{ + BYTE fmt, *pt; + int vol; + DSTATUS stat; + DWORD bsect, fasize, tsect, sysect, nclst, szbfat, br[4]; + WORD nrsv; + FATFS *fs; + UINT i; + + + /* Get logical drive number */ + *rfs = 0; + vol = get_ldnumber(path); + if (vol < 0) return FR_INVALID_DRIVE; + + /* Check if the file system object is valid or not */ + fs = FatFs[vol]; /* Get pointer to the file system object */ + if (!fs) return FR_NOT_ENABLED; /* Is the file system object available? */ + + ENTER_FF(fs); /* Lock the volume */ + *rfs = fs; /* Return pointer to the file system object */ + + mode &= (BYTE)~FA_READ; /* Desired access mode, write access or not */ + if (fs->fs_type) { /* If the volume has been mounted */ + stat = disk_status(fs->drv); + if (!(stat & STA_NOINIT)) { /* and the physical drive is kept initialized */ + if (!_FS_READONLY && mode && (stat & STA_PROTECT)) { /* Check write protection if needed */ + return FR_WRITE_PROTECTED; + } + return FR_OK; /* The file system object is valid */ + } + } + + /* The file system object is not valid. */ + /* Following code attempts to mount the volume. (analyze BPB and initialize the fs object) */ + + fs->fs_type = 0; /* Clear the file system object */ + fs->drv = LD2PD(vol); /* Bind the logical drive and a physical drive */ + stat = disk_initialize(fs->drv); /* Initialize the physical drive */ + if (stat & STA_NOINIT) { /* Check if the initialization succeeded */ + return FR_NOT_READY; /* Failed to initialize due to no medium or hard error */ + } + if (!_FS_READONLY && mode && (stat & STA_PROTECT)) { /* Check disk write protection if needed */ + return FR_WRITE_PROTECTED; + } +#if _MAX_SS != _MIN_SS /* Get sector size (multiple sector size cfg only) */ + if (disk_ioctl(fs->drv, GET_SECTOR_SIZE, &SS(fs)) != RES_OK) return FR_DISK_ERR; + if (SS(fs) > _MAX_SS || SS(fs) < _MIN_SS || (SS(fs) & (SS(fs) - 1))) return FR_DISK_ERR; +#endif + + /* Find an FAT partition on the drive. Supports only generic partitioning rules, FDISK and SFD. */ + bsect = 0; + fmt = check_fs(fs, bsect); /* Load sector 0 and check if it is an FAT-VBR as SFD */ + if (fmt == 2 || (fmt < 2 && LD2PT(vol) != 0)) { /* Not an FAT-VBR or forced partition number */ + for (i = 0; i < 4; i++) { /* Get partition offset */ + pt = fs->win + (MBR_Table + i * SZ_PTE); + br[i] = pt[PTE_System] ? ld_dword(pt + PTE_StLba) : 0; + } + i = LD2PT(vol); /* Partition number: 0:auto, 1-4:forced */ + if (i) i--; + do { /* Find an FAT volume */ + bsect = br[i]; + fmt = bsect ? check_fs(fs, bsect) : 3; /* Check the partition */ + } while (LD2PT(vol) == 0 && fmt >= 2 && ++i < 4); + } + if (fmt == 4) return FR_DISK_ERR; /* An error occured in the disk I/O layer */ + if (fmt >= 2) return FR_NO_FILESYSTEM; /* No FAT volume is found */ + + /* An FAT volume is found (bsect). Following code initializes the file system object */ + +#if _FS_EXFAT + if (fmt == 1) { + QWORD maxlba; + + for (i = BPB_ZeroedEx; i < BPB_ZeroedEx + 53 && fs->win[i] == 0; i++) ; /* Check zero filler */ + if (i < BPB_ZeroedEx + 53) return FR_NO_FILESYSTEM; + + if (ld_word(fs->win + BPB_FSVerEx) != 0x100) return FR_NO_FILESYSTEM; /* Check exFAT revision (Must be 1.0) */ + + if (1 << fs->win[BPB_BytsPerSecEx] != SS(fs)) { /* (BPB_BytsPerSecEx must be equal to the physical sector size) */ + return FR_NO_FILESYSTEM; + } + + maxlba = ld_qword(fs->win + BPB_TotSecEx) + bsect; /* Last LBA + 1 of the volume */ + if (maxlba >= 0x100000000) return FR_NO_FILESYSTEM; /* (It cannot be handled in 32-bit LBA) */ + + fs->fsize = ld_dword(fs->win + BPB_FatSzEx); /* Number of sectors per FAT */ + + fs->n_fats = fs->win[BPB_NumFATsEx]; /* Number of FATs */ + if (fs->n_fats != 1) return FR_NO_FILESYSTEM; /* (Supports only 1 FAT) */ + + fs->csize = 1 << fs->win[BPB_SecPerClusEx]; /* Cluster size */ + if (fs->csize == 0) return FR_NO_FILESYSTEM; /* (Must be 1..32768) */ + + nclst = ld_dword(fs->win + BPB_NumClusEx); /* Number of clusters */ + if (nclst > MAX_EXFAT) return FR_NO_FILESYSTEM; /* (Too many clusters) */ + fs->n_fatent = nclst + 2; + + /* Boundaries and Limits */ + fs->volbase = bsect; + fs->database = bsect + ld_dword(fs->win + BPB_DataOfsEx); + fs->fatbase = bsect + ld_dword(fs->win + BPB_FatOfsEx); + if (maxlba < (QWORD)fs->database + nclst * fs->csize) return FR_NO_FILESYSTEM; /* (Volume size must not be smaller than the size requiered) */ + fs->dirbase = ld_dword(fs->win + BPB_RootClusEx); + + /* Check if bitmap location is in assumption (at the first cluster) */ + if (move_window(fs, clust2sect(fs, fs->dirbase)) != FR_OK) return FR_DISK_ERR; + for (i = 0; i < SS(fs); i += SZDIRE) { + if (fs->win[i] == 0x81 && ld_dword(fs->win + i + 20) == 2) break; /* 81 entry with cluster #2? */ + } + if (i == SS(fs)) return FR_NO_FILESYSTEM; +#if !_FS_READONLY + fs->last_clst = fs->free_clst = 0xFFFFFFFF; /* Initialize cluster allocation information */ +#endif + fmt = FS_EXFAT; /* FAT sub-type */ + } else +#endif /* _FS_EXFAT */ + { + if (ld_word(fs->win + BPB_BytsPerSec) != SS(fs)) return FR_NO_FILESYSTEM; /* (BPB_BytsPerSec must be equal to the physical sector size) */ + + fasize = ld_word(fs->win + BPB_FATSz16); /* Number of sectors per FAT */ + if (fasize == 0) fasize = ld_dword(fs->win + BPB_FATSz32); + fs->fsize = fasize; + + fs->n_fats = fs->win[BPB_NumFATs]; /* Number of FATs */ + if (fs->n_fats != 1 && fs->n_fats != 2) return FR_NO_FILESYSTEM; /* (Must be 1 or 2) */ + fasize *= fs->n_fats; /* Number of sectors for FAT area */ + + fs->csize = fs->win[BPB_SecPerClus]; /* Cluster size */ + if (fs->csize == 0 || (fs->csize & (fs->csize - 1))) return FR_NO_FILESYSTEM; /* (Must be power of 2) */ + + fs->n_rootdir = ld_word(fs->win + BPB_RootEntCnt); /* Number of root directory entries */ + if (fs->n_rootdir % (SS(fs) / SZDIRE)) return FR_NO_FILESYSTEM; /* (Must be sector aligned) */ + + tsect = ld_word(fs->win + BPB_TotSec16); /* Number of sectors on the volume */ + if (tsect == 0) tsect = ld_dword(fs->win + BPB_TotSec32); + + nrsv = ld_word(fs->win + BPB_RsvdSecCnt); /* Number of reserved sectors */ + if (nrsv == 0) return FR_NO_FILESYSTEM; /* (Must not be 0) */ + + /* Determine the FAT sub type */ + sysect = nrsv + fasize + fs->n_rootdir / (SS(fs) / SZDIRE); /* RSV + FAT + DIR */ + if (tsect < sysect) return FR_NO_FILESYSTEM; /* (Invalid volume size) */ + nclst = (tsect - sysect) / fs->csize; /* Number of clusters */ + if (nclst == 0) return FR_NO_FILESYSTEM; /* (Invalid volume size) */ + fmt = FS_FAT32; + if (nclst <= MAX_FAT16) fmt = FS_FAT16; + if (nclst <= MAX_FAT12) fmt = FS_FAT12; + + /* Boundaries and Limits */ + fs->n_fatent = nclst + 2; /* Number of FAT entries */ + fs->volbase = bsect; /* Volume start sector */ + fs->fatbase = bsect + nrsv; /* FAT start sector */ + fs->database = bsect + sysect; /* Data start sector */ + if (fmt == FS_FAT32) { + if (ld_word(fs->win + BPB_FSVer32) != 0) return FR_NO_FILESYSTEM; /* (Must be FAT32 revision 0.0) */ + if (fs->n_rootdir) return FR_NO_FILESYSTEM; /* (BPB_RootEntCnt must be 0) */ + fs->dirbase = ld_dword(fs->win + BPB_RootClus32); /* Root directory start cluster */ + szbfat = fs->n_fatent * 4; /* (Needed FAT size) */ + } else { + if (fs->n_rootdir == 0) return FR_NO_FILESYSTEM;/* (BPB_RootEntCnt must not be 0) */ + fs->dirbase = fs->fatbase + fasize; /* Root directory start sector */ + szbfat = (fmt == FS_FAT16) ? /* (Needed FAT size) */ + fs->n_fatent * 2 : fs->n_fatent * 3 / 2 + (fs->n_fatent & 1); + } + if (fs->fsize < (szbfat + (SS(fs) - 1)) / SS(fs)) return FR_NO_FILESYSTEM; /* (BPB_FATSz must not be less than the size needed) */ + +#if !_FS_READONLY + /* Get FSINFO if available */ + fs->last_clst = fs->free_clst = 0xFFFFFFFF; /* Initialize cluster allocation information */ + fs->fsi_flag = 0x80; +#if (_FS_NOFSINFO & 3) != 3 + if (fmt == FS_FAT32 /* Enable FSINFO only if FAT32 and BPB_FSInfo32 == 1 */ + && ld_word(fs->win + BPB_FSInfo32) == 1 + && move_window(fs, bsect + 1) == FR_OK) + { + fs->fsi_flag = 0; + if (ld_word(fs->win + BS_55AA) == 0xAA55 /* Load FSINFO data if available */ + && ld_dword(fs->win + FSI_LeadSig) == 0x41615252 + && ld_dword(fs->win + FSI_StrucSig) == 0x61417272) + { +#if (_FS_NOFSINFO & 1) == 0 + fs->free_clst = ld_dword(fs->win + FSI_Free_Count); +#endif +#if (_FS_NOFSINFO & 2) == 0 + fs->last_clst = ld_dword(fs->win + FSI_Nxt_Free); +#endif + } + } +#endif /* (_FS_NOFSINFO & 3) != 3 */ +#endif /* !_FS_READONLY */ + } + + fs->fs_type = fmt; /* FAT sub-type */ + fs->id = ++Fsid; /* File system mount ID */ +#if _USE_LFN == 1 + fs->lfnbuf = LfnBuf; /* Static LFN working buffer */ +#if _FS_EXFAT + fs->dirbuf = DirBuf; /* Static directory block scratchpad buuffer */ +#endif +#endif +#if _FS_RPATH != 0 + fs->cdir = 0; /* Initialize current directory */ +#endif +#if _FS_LOCK != 0 /* Clear file lock semaphores */ + clear_lock(fs); +#endif + return FR_OK; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Check if the file/directory object is valid or not */ +/*-----------------------------------------------------------------------*/ + +static +FRESULT validate ( /* Returns FR_OK or FR_INVALID_OBJECT */ + _FDID* obj, /* Pointer to the _OBJ, the 1st member in the FIL/DIR object, to check validity */ + FATFS** fs /* Pointer to pointer to the owner file system object to return */ +) +{ + FRESULT res = FR_INVALID_OBJECT; + + + if (obj && obj->fs && obj->fs->fs_type && obj->id == obj->fs->id) { /* Test if the object is valid */ +#if _FS_REENTRANT + if (lock_fs(obj->fs)) { /* Obtain the filesystem object */ + if (!(disk_status(obj->fs->drv) & STA_NOINIT)) { /* Test if the phsical drive is kept initialized */ + res = FR_OK; + } else { + unlock_fs(obj->fs, FR_OK); + } + } else { + res = FR_TIMEOUT; + } +#else + if (!(disk_status(obj->fs->drv) & STA_NOINIT)) { /* Test if the phsical drive is kept initialized */ + res = FR_OK; + } +#endif + } + *fs = (res == FR_OK) ? obj->fs : 0; /* Corresponding filesystem object */ + return res; +} + + + + +/*--------------------------------------------------------------------------- + + Public Functions (FatFs API) + +----------------------------------------------------------------------------*/ + + + +/*-----------------------------------------------------------------------*/ +/* Mount/Unmount a Logical Drive */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_mount ( + FATFS* fs, /* Pointer to the file system object (NULL:unmount)*/ + const TCHAR* path, /* Logical drive number to be mounted/unmounted */ + BYTE opt /* Mode option 0:Do not mount (delayed mount), 1:Mount immediately */ +) +{ + FATFS *cfs; + int vol; + FRESULT res; + const TCHAR *rp = path; + + + /* Get logical drive number */ + vol = get_ldnumber(&rp); + if (vol < 0) return FR_INVALID_DRIVE; + cfs = FatFs[vol]; /* Pointer to fs object */ + + if (cfs) { +#if _FS_LOCK != 0 + clear_lock(cfs); +#endif +#if _FS_REENTRANT /* Discard sync object of the current volume */ + if (!ff_del_syncobj(cfs->sobj)) return FR_INT_ERR; +#endif + cfs->fs_type = 0; /* Clear old fs object */ + } + + if (fs) { + fs->fs_type = 0; /* Clear new fs object */ +#if _FS_REENTRANT /* Create sync object for the new volume */ + if (!ff_cre_syncobj((BYTE)vol, &fs->sobj)) return FR_INT_ERR; +#endif + } + FatFs[vol] = fs; /* Register new fs object */ + + if (!fs || opt != 1) return FR_OK; /* Do not mount now, it will be mounted later */ + + res = find_volume(&path, &fs, 0); /* Force mounted the volume */ + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Open or Create a File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_open ( + FIL* fp, /* Pointer to the blank file object */ + const TCHAR* path, /* Pointer to the file name */ + BYTE mode /* Access mode and file open mode flags */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; +#if !_FS_READONLY + DWORD dw, cl, bcs, clst, sc; + FSIZE_t ofs; +#endif + DEF_NAMBUF + + + if (!fp) return FR_INVALID_OBJECT; + + /* Get logical drive */ + mode &= _FS_READONLY ? FA_READ : FA_READ | FA_WRITE | FA_CREATE_ALWAYS | FA_CREATE_NEW | FA_OPEN_ALWAYS | FA_OPEN_APPEND | FA_SEEKEND; + res = find_volume(&path, &fs, mode); + if (res == FR_OK) { + dj.obj.fs = fs; + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the file path */ +#if !_FS_READONLY /* R/W configuration */ + if (res == FR_OK) { + if (dj.fn[NSFLAG] & NS_NONAME) { /* Origin directory itself? */ + res = FR_INVALID_NAME; + } +#if _FS_LOCK != 0 + else { + res = chk_lock(&dj, (mode & ~FA_READ) ? 1 : 0); + } +#endif + } + /* Create or Open a file */ + if (mode & (FA_CREATE_ALWAYS | FA_OPEN_ALWAYS | FA_CREATE_NEW)) { + if (res != FR_OK) { /* No file, create new */ + if (res == FR_NO_FILE) { /* There is no file to open, create a new entry */ +#if _FS_LOCK != 0 + res = enq_lock() ? dir_register(&dj) : FR_TOO_MANY_OPEN_FILES; +#else + res = dir_register(&dj); +#endif + } + mode |= FA_CREATE_ALWAYS; /* File is created */ + } + else { /* Any object is already existing */ + if (dj.obj.attr & (AM_RDO | AM_DIR)) { /* Cannot overwrite it (R/O or DIR) */ + res = FR_DENIED; + } else { + if (mode & FA_CREATE_NEW) res = FR_EXIST; /* Cannot create as new file */ + } + } + if (res == FR_OK && (mode & FA_CREATE_ALWAYS)) { /* Truncate it if overwrite mode */ + dw = GET_FATTIME(); +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + /* Get current allocation info */ + fp->obj.fs = fs; + fp->obj.sclust = ld_dword(fs->dirbuf + XDIR_FstClus); + fp->obj.objsize = ld_qword(fs->dirbuf + XDIR_FileSize); + fp->obj.stat = fs->dirbuf[XDIR_GenFlags] & 2; + fp->obj.n_frag = 0; + /* Initialize directory entry block */ + st_dword(fs->dirbuf + XDIR_CrtTime, dw); /* Set created time */ + fs->dirbuf[XDIR_CrtTime10] = 0; + st_dword(fs->dirbuf + XDIR_ModTime, dw); /* Set modified time */ + fs->dirbuf[XDIR_ModTime10] = 0; + fs->dirbuf[XDIR_Attr] = AM_ARC; /* Reset attribute */ + st_dword(fs->dirbuf + XDIR_FstClus, 0); /* Reset file allocation info */ + st_qword(fs->dirbuf + XDIR_FileSize, 0); + st_qword(fs->dirbuf + XDIR_ValidFileSize, 0); + fs->dirbuf[XDIR_GenFlags] = 1; + res = store_xdir(&dj); + if (res == FR_OK && fp->obj.sclust) { /* Remove the cluster chain if exist */ + res = remove_chain(&fp->obj, fp->obj.sclust, 0); + fs->last_clst = fp->obj.sclust - 1; /* Reuse the cluster hole */ + } + } else +#endif + { + /* Clean directory info */ + st_dword(dj.dir + DIR_CrtTime, dw); /* Set created time */ + st_dword(dj.dir + DIR_ModTime, dw); /* Set modified time */ + dj.dir[DIR_Attr] = AM_ARC; /* Reset attribute */ + cl = ld_clust(fs, dj.dir); /* Get cluster chain */ + st_clust(fs, dj.dir, 0); /* Reset file allocation info */ + st_dword(dj.dir + DIR_FileSize, 0); + fs->wflag = 1; + + if (cl) { /* Remove the cluster chain if exist */ + dw = fs->winsect; + res = remove_chain(&dj.obj, cl, 0); + if (res == FR_OK) { + res = move_window(fs, dw); + fs->last_clst = cl - 1; /* Reuse the cluster hole */ + } + } + } + } + } + else { /* Open an existing file */ + if (res == FR_OK) { /* Following succeeded */ + if (dj.obj.attr & AM_DIR) { /* It is a directory */ + res = FR_NO_FILE; + } else { + if ((mode & FA_WRITE) && (dj.obj.attr & AM_RDO)) { /* R/O violation */ + res = FR_DENIED; + } + } + } + } + if (res == FR_OK) { + if (mode & FA_CREATE_ALWAYS) /* Set file change flag if created or overwritten */ + mode |= FA_MODIFIED; + fp->dir_sect = fs->winsect; /* Pointer to the directory entry */ + fp->dir_ptr = dj.dir; +#if _FS_LOCK != 0 + fp->obj.lockid = inc_lock(&dj, (mode & ~FA_READ) ? 1 : 0); + if (!fp->obj.lockid) res = FR_INT_ERR; +#endif + } +#else /* R/O configuration */ + if (res == FR_OK) { + if (dj.fn[NSFLAG] & NS_NONAME) { /* Origin directory itself? */ + res = FR_INVALID_NAME; + } else { + if (dj.obj.attr & AM_DIR) { /* It is a directory */ + res = FR_NO_FILE; + } + } + } +#endif + + if (res == FR_OK) { +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + fp->obj.c_scl = dj.obj.sclust; /* Get containing directory info */ + fp->obj.c_size = ((DWORD)dj.obj.objsize & 0xFFFFFF00) | dj.obj.stat; + fp->obj.c_ofs = dj.blk_ofs; + fp->obj.sclust = ld_dword(fs->dirbuf + XDIR_FstClus); /* Get object allocation info */ + fp->obj.objsize = ld_qword(fs->dirbuf + XDIR_FileSize); + fp->obj.stat = fs->dirbuf[XDIR_GenFlags] & 2; + } else +#endif + { + fp->obj.sclust = ld_clust(fs, dj.dir); /* Get object allocation info */ + fp->obj.objsize = ld_dword(dj.dir + DIR_FileSize); + } +#if _USE_FASTSEEK + fp->cltbl = 0; /* Disable fast seek mode */ +#endif + fp->obj.fs = fs; /* Validate the file object */ + fp->obj.id = fs->id; + fp->flag = mode; /* Set file access mode */ + fp->err = 0; /* Clear error flag */ + fp->sect = 0; /* Invalidate current data sector */ + fp->fptr = 0; /* Set file pointer top of the file */ +#if !_FS_READONLY +#if !_FS_TINY + mem_set(fp->buf, 0, _MAX_SS); /* Clear sector buffer */ +#endif + if ((mode & FA_SEEKEND) && fp->obj.objsize > 0) { /* Seek to end of file if FA_OPEN_APPEND is specified */ + fp->fptr = fp->obj.objsize; /* Offset to seek */ + bcs = (DWORD)fs->csize * SS(fs); /* Cluster size in byte */ + clst = fp->obj.sclust; /* Follow the cluster chain */ + for (ofs = fp->obj.objsize; res == FR_OK && ofs > bcs; ofs -= bcs) { + clst = get_fat(&fp->obj, clst); + if (clst <= 1) res = FR_INT_ERR; + if (clst == 0xFFFFFFFF) res = FR_DISK_ERR; + } + fp->clust = clst; + if (res == FR_OK && ofs % SS(fs)) { /* Fill sector buffer if not on the sector boundary */ + if ((sc = clust2sect(fs, clst)) == 0) { + res = FR_INT_ERR; + } else { + fp->sect = sc + (DWORD)(ofs / SS(fs)); +#if !_FS_TINY + if (disk_read(fs->drv, fp->buf, fp->sect, 1) != RES_OK) res = FR_DISK_ERR; +#endif + } + } + } +#endif + } + + FREE_NAMBUF(); + } + + if (res != FR_OK) fp->obj.fs = 0; /* Invalidate file object on error */ + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Read File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_read ( + FIL* fp, /* Pointer to the file object */ + void* buff, /* Pointer to data buffer */ + UINT btr, /* Number of bytes to read */ + UINT* br /* Pointer to number of bytes read */ +) +{ + FRESULT res; + FATFS *fs; + DWORD clst, sect; + FSIZE_t remain; + UINT rcnt, cc, csect; + BYTE *rbuff = (BYTE*)buff; + + + *br = 0; /* Clear read byte counter */ + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res != FR_OK || (res = (FRESULT)fp->err) != FR_OK) LEAVE_FF(fs, res); /* Check validity */ + if (!(fp->flag & FA_READ)) LEAVE_FF(fs, FR_DENIED); /* Check access mode */ + remain = fp->obj.objsize - fp->fptr; + if (btr > remain) btr = (UINT)remain; /* Truncate btr by remaining bytes */ + + for ( ; btr; /* Repeat until all data read */ + rbuff += rcnt, fp->fptr += rcnt, *br += rcnt, btr -= rcnt) { + if (fp->fptr % SS(fs) == 0) { /* On the sector boundary? */ + csect = (UINT)(fp->fptr / SS(fs) & (fs->csize - 1)); /* Sector offset in the cluster */ + if (csect == 0) { /* On the cluster boundary? */ + if (fp->fptr == 0) { /* On the top of the file? */ + clst = fp->obj.sclust; /* Follow cluster chain from the origin */ + } else { /* Middle or end of the file */ +#if _USE_FASTSEEK + if (fp->cltbl) { + clst = clmt_clust(fp, fp->fptr); /* Get cluster# from the CLMT */ + } else +#endif + { + clst = get_fat(&fp->obj, fp->clust); /* Follow cluster chain on the FAT */ + } + } + if (clst < 2) ABORT(fs, FR_INT_ERR); + if (clst == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + fp->clust = clst; /* Update current cluster */ + } + sect = clust2sect(fs, fp->clust); /* Get current sector */ + if (!sect) ABORT(fs, FR_INT_ERR); + sect += csect; + cc = btr / SS(fs); /* When remaining bytes >= sector size, */ + if (cc) { /* Read maximum contiguous sectors directly */ + if (csect + cc > fs->csize) { /* Clip at cluster boundary */ + cc = fs->csize - csect; + } + if (disk_read(fs->drv, rbuff, sect, cc) != RES_OK) ABORT(fs, FR_DISK_ERR); +#if !_FS_READONLY && _FS_MINIMIZE <= 2 /* Replace one of the read sectors with cached data if it contains a dirty sector */ +#if _FS_TINY + if (fs->wflag && fs->winsect - sect < cc) { + mem_cpy(rbuff + ((fs->winsect - sect) * SS(fs)), fs->win, SS(fs)); + } +#else + if ((fp->flag & FA_DIRTY) && fp->sect - sect < cc) { + mem_cpy(rbuff + ((fp->sect - sect) * SS(fs)), fp->buf, SS(fs)); + } +#endif +#endif + rcnt = SS(fs) * cc; /* Number of bytes transferred */ + continue; + } +#if !_FS_TINY + if (fp->sect != sect) { /* Load data sector if not in cache */ +#if !_FS_READONLY + if (fp->flag & FA_DIRTY) { /* Write-back dirty sector cache */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + if (disk_read(fs->drv, fp->buf, sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); /* Fill sector cache */ + } +#endif + fp->sect = sect; + } + rcnt = SS(fs) - (UINT)fp->fptr % SS(fs); /* Number of bytes left in the sector */ + if (rcnt > btr) rcnt = btr; /* Clip it by btr if needed */ +#if _FS_TINY + if (move_window(fs, fp->sect) != FR_OK) ABORT(fs, FR_DISK_ERR); /* Move sector window */ + mem_cpy(rbuff, fs->win + fp->fptr % SS(fs), rcnt); /* Extract partial sector */ +#else + mem_cpy(rbuff, fp->buf + fp->fptr % SS(fs), rcnt); /* Extract partial sector */ +#endif + } + + LEAVE_FF(fs, FR_OK); +} + + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Write File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_write ( + FIL* fp, /* Pointer to the file object */ + const void* buff, /* Pointer to the data to be written */ + UINT btw, /* Number of bytes to write */ + UINT* bw /* Pointer to number of bytes written */ +) +{ + FRESULT res; + FATFS *fs; + DWORD clst, sect; + UINT wcnt, cc, csect; + const BYTE *wbuff = (const BYTE*)buff; + + + *bw = 0; /* Clear write byte counter */ + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res != FR_OK || (res = (FRESULT)fp->err) != FR_OK) LEAVE_FF(fs, res); /* Check validity */ + if (!(fp->flag & FA_WRITE)) LEAVE_FF(fs, FR_DENIED); /* Check access mode */ + + /* Check fptr wrap-around (file size cannot reach 4GiB on FATxx) */ + if ((!_FS_EXFAT || fs->fs_type != FS_EXFAT) && (DWORD)(fp->fptr + btw) < (DWORD)fp->fptr) { + btw = (UINT)(0xFFFFFFFF - (DWORD)fp->fptr); + } + + for ( ; btw; /* Repeat until all data written */ + wbuff += wcnt, fp->fptr += wcnt, fp->obj.objsize = (fp->fptr > fp->obj.objsize) ? fp->fptr : fp->obj.objsize, *bw += wcnt, btw -= wcnt) { + if (fp->fptr % SS(fs) == 0) { /* On the sector boundary? */ + csect = (UINT)(fp->fptr / SS(fs)) & (fs->csize - 1); /* Sector offset in the cluster */ + if (csect == 0) { /* On the cluster boundary? */ + if (fp->fptr == 0) { /* On the top of the file? */ + clst = fp->obj.sclust; /* Follow from the origin */ + if (clst == 0) { /* If no cluster is allocated, */ + clst = create_chain(&fp->obj, 0); /* create a new cluster chain */ + } + } else { /* On the middle or end of the file */ +#if _USE_FASTSEEK + if (fp->cltbl) { + clst = clmt_clust(fp, fp->fptr); /* Get cluster# from the CLMT */ + } else +#endif + { + clst = create_chain(&fp->obj, fp->clust); /* Follow or stretch cluster chain on the FAT */ + } + } + if (clst == 0) break; /* Could not allocate a new cluster (disk full) */ + if (clst == 1) ABORT(fs, FR_INT_ERR); + if (clst == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + fp->clust = clst; /* Update current cluster */ + if (fp->obj.sclust == 0) fp->obj.sclust = clst; /* Set start cluster if the first write */ + } +#if _FS_TINY + if (fs->winsect == fp->sect && sync_window(fs) != FR_OK) ABORT(fs, FR_DISK_ERR); /* Write-back sector cache */ +#else + if (fp->flag & FA_DIRTY) { /* Write-back sector cache */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + sect = clust2sect(fs, fp->clust); /* Get current sector */ + if (!sect) ABORT(fs, FR_INT_ERR); + sect += csect; + cc = btw / SS(fs); /* When remaining bytes >= sector size, */ + if (cc) { /* Write maximum contiguous sectors directly */ + if (csect + cc > fs->csize) { /* Clip at cluster boundary */ + cc = fs->csize - csect; + } + if (disk_write(fs->drv, wbuff, sect, cc) != RES_OK) ABORT(fs, FR_DISK_ERR); +#if _FS_MINIMIZE <= 2 +#if _FS_TINY + if (fs->winsect - sect < cc) { /* Refill sector cache if it gets invalidated by the direct write */ + mem_cpy(fs->win, wbuff + ((fs->winsect - sect) * SS(fs)), SS(fs)); + fs->wflag = 0; + } +#else + if (fp->sect - sect < cc) { /* Refill sector cache if it gets invalidated by the direct write */ + mem_cpy(fp->buf, wbuff + ((fp->sect - sect) * SS(fs)), SS(fs)); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif +#endif + wcnt = SS(fs) * cc; /* Number of bytes transferred */ + continue; + } +#if _FS_TINY + if (fp->fptr >= fp->obj.objsize) { /* Avoid silly cache filling on the growing edge */ + if (sync_window(fs) != FR_OK) ABORT(fs, FR_DISK_ERR); + fs->winsect = sect; + } +#else + if (fp->sect != sect && /* Fill sector cache with file data */ + fp->fptr < fp->obj.objsize && + disk_read(fs->drv, fp->buf, sect, 1) != RES_OK) { + ABORT(fs, FR_DISK_ERR); + } +#endif + fp->sect = sect; + } + wcnt = SS(fs) - (UINT)fp->fptr % SS(fs); /* Number of bytes left in the sector */ + if (wcnt > btw) wcnt = btw; /* Clip it by btw if needed */ +#if _FS_TINY + if (move_window(fs, fp->sect) != FR_OK) ABORT(fs, FR_DISK_ERR); /* Move sector window */ + mem_cpy(fs->win + fp->fptr % SS(fs), wbuff, wcnt); /* Fit data to the sector */ + fs->wflag = 1; +#else + mem_cpy(fp->buf + fp->fptr % SS(fs), wbuff, wcnt); /* Fit data to the sector */ + fp->flag |= FA_DIRTY; +#endif + } + + fp->flag |= FA_MODIFIED; /* Set file change flag */ + + LEAVE_FF(fs, FR_OK); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Synchronize the File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_sync ( + FIL* fp /* Pointer to the file object */ +) +{ + FRESULT res; + FATFS *fs; + DWORD tm; + BYTE *dir; +#if _FS_EXFAT + DIR dj; + DEF_NAMBUF +#endif + + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res == FR_OK) { + if (fp->flag & FA_MODIFIED) { /* Is there any change to the file? */ +#if !_FS_TINY + if (fp->flag & FA_DIRTY) { /* Write-back cached data if needed */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) LEAVE_FF(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + /* Update the directory entry */ + tm = GET_FATTIME(); /* Modified time */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + res = fill_first_frag(&fp->obj); /* Fill first fragment on the FAT if needed */ + if (res == FR_OK) { + res = fill_last_frag(&fp->obj, fp->clust, 0xFFFFFFFF); /* Fill last fragment on the FAT if needed */ + } + if (res == FR_OK) { + INIT_NAMBUF(fs); + res = load_obj_dir(&dj, &fp->obj); /* Load directory entry block */ + if (res == FR_OK) { + fs->dirbuf[XDIR_Attr] |= AM_ARC; /* Set archive bit */ + fs->dirbuf[XDIR_GenFlags] = fp->obj.stat | 1; /* Update file allocation info */ + st_dword(fs->dirbuf + XDIR_FstClus, fp->obj.sclust); + st_qword(fs->dirbuf + XDIR_FileSize, fp->obj.objsize); + st_qword(fs->dirbuf + XDIR_ValidFileSize, fp->obj.objsize); + st_dword(fs->dirbuf + XDIR_ModTime, tm); /* Update modified time */ + fs->dirbuf[XDIR_ModTime10] = 0; + st_dword(fs->dirbuf + XDIR_AccTime, 0); + res = store_xdir(&dj); /* Restore it to the directory */ + if (res == FR_OK) { + res = sync_fs(fs); + fp->flag &= (BYTE)~FA_MODIFIED; + } + } + FREE_NAMBUF(); + } + } else +#endif + { + res = move_window(fs, fp->dir_sect); + if (res == FR_OK) { + dir = fp->dir_ptr; + dir[DIR_Attr] |= AM_ARC; /* Set archive bit */ + st_clust(fp->obj.fs, dir, fp->obj.sclust); /* Update file allocation info */ + st_dword(dir + DIR_FileSize, (DWORD)fp->obj.objsize); /* Update file size */ + st_dword(dir + DIR_ModTime, tm); /* Update modified time */ + st_word(dir + DIR_LstAccDate, 0); + fs->wflag = 1; + res = sync_fs(fs); /* Restore it to the directory */ + fp->flag &= (BYTE)~FA_MODIFIED; + } + } + } + } + + LEAVE_FF(fs, res); +} + +#endif /* !_FS_READONLY */ + + + + +/*-----------------------------------------------------------------------*/ +/* Close File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_close ( + FIL* fp /* Pointer to the file object to be closed */ +) +{ + FRESULT res; + FATFS *fs; + +#if !_FS_READONLY + res = f_sync(fp); /* Flush cached data */ + if (res == FR_OK) +#endif + { + res = validate(&fp->obj, &fs); /* Lock volume */ + if (res == FR_OK) { +#if _FS_LOCK != 0 + res = dec_lock(fp->obj.lockid); /* Decrement file open counter */ + if (res == FR_OK) +#endif + { + fp->obj.fs = 0; /* Invalidate file object */ + } +#if _FS_REENTRANT + unlock_fs(fs, FR_OK); /* Unlock volume */ +#endif + } + } + return res; +} + + + + +#if _FS_RPATH >= 1 +/*-----------------------------------------------------------------------*/ +/* Change Current Directory or Current Drive, Get Current Directory */ +/*-----------------------------------------------------------------------*/ + +#if _VOLUMES >= 2 +FRESULT f_chdrive ( + const TCHAR* path /* Drive number */ +) +{ + int vol; + + + /* Get logical drive number */ + vol = get_ldnumber(&path); + if (vol < 0) return FR_INVALID_DRIVE; + + CurrVol = (BYTE)vol; /* Set it as current volume */ + + return FR_OK; +} +#endif + + +FRESULT f_chdir ( + const TCHAR* path /* Pointer to the directory path */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + DEF_NAMBUF + + /* Get logical drive */ + res = find_volume(&path, &fs, 0); + if (res == FR_OK) { + dj.obj.fs = fs; + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the path */ + if (res == FR_OK) { /* Follow completed */ + if (dj.fn[NSFLAG] & NS_NONAME) { + fs->cdir = dj.obj.sclust; /* It is the start directory itself */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + fs->cdc_scl = dj.obj.c_scl; + fs->cdc_size = dj.obj.c_size; + fs->cdc_ofs = dj.obj.c_ofs; + } +#endif + } else { + if (dj.obj.attr & AM_DIR) { /* It is a sub-directory */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + fs->cdir = ld_dword(fs->dirbuf + XDIR_FstClus); /* Sub-directory cluster */ + fs->cdc_scl = dj.obj.sclust; /* Save containing directory information */ + fs->cdc_size = ((DWORD)dj.obj.objsize & 0xFFFFFF00) | dj.obj.stat; + fs->cdc_ofs = dj.blk_ofs; + } else +#endif + { + fs->cdir = ld_clust(fs, dj.dir); /* Sub-directory cluster */ + } + } else { + res = FR_NO_PATH; /* Reached but a file */ + } + } + } + FREE_NAMBUF(); + if (res == FR_NO_FILE) res = FR_NO_PATH; + } + + LEAVE_FF(fs, res); +} + + +#if _FS_RPATH >= 2 +FRESULT f_getcwd ( + TCHAR* buff, /* Pointer to the directory path */ + UINT len /* Size of path */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + UINT i, n; + DWORD ccl; + TCHAR *tp; + FILINFO fno; + DEF_NAMBUF + + + *buff = 0; + /* Get logical drive */ + res = find_volume((const TCHAR**)&buff, &fs, 0); /* Get current volume */ + if (res == FR_OK) { + dj.obj.fs = fs; + INIT_NAMBUF(fs); + i = len; /* Bottom of buffer (directory stack base) */ + if (!_FS_EXFAT || fs->fs_type != FS_EXFAT) { /* (Cannot do getcwd on exFAT and returns root path) */ + dj.obj.sclust = fs->cdir; /* Start to follow upper directory from current directory */ + while ((ccl = dj.obj.sclust) != 0) { /* Repeat while current directory is a sub-directory */ + res = dir_sdi(&dj, 1 * SZDIRE); /* Get parent directory */ + if (res != FR_OK) break; + res = move_window(fs, dj.sect); + if (res != FR_OK) break; + dj.obj.sclust = ld_clust(fs, dj.dir); /* Goto parent directory */ + res = dir_sdi(&dj, 0); + if (res != FR_OK) break; + do { /* Find the entry links to the child directory */ + res = dir_read(&dj, 0); + if (res != FR_OK) break; + if (ccl == ld_clust(fs, dj.dir)) break; /* Found the entry */ + res = dir_next(&dj, 0); + } while (res == FR_OK); + if (res == FR_NO_FILE) res = FR_INT_ERR;/* It cannot be 'not found'. */ + if (res != FR_OK) break; + get_fileinfo(&dj, &fno); /* Get the directory name and push it to the buffer */ + for (n = 0; fno.fname[n]; n++) ; + if (i < n + 3) { + res = FR_NOT_ENOUGH_CORE; break; + } + while (n) buff[--i] = fno.fname[--n]; + buff[--i] = '/'; + } + } + tp = buff; + if (res == FR_OK) { +#if _VOLUMES >= 2 + *tp++ = '0' + CurrVol; /* Put drive number */ + *tp++ = ':'; +#endif + if (i == len) { /* Root-directory */ + *tp++ = '/'; + } else { /* Sub-directroy */ + do /* Add stacked path str */ + *tp++ = buff[i++]; + while (i < len); + } + } + *tp = 0; + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + +#endif /* _FS_RPATH >= 2 */ +#endif /* _FS_RPATH >= 1 */ + + + +#if _FS_MINIMIZE <= 2 +/*-----------------------------------------------------------------------*/ +/* Seek File R/W Pointer */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_lseek ( + FIL* fp, /* Pointer to the file object */ + FSIZE_t ofs /* File pointer from top of file */ +) +{ + FRESULT res; + FATFS *fs; + DWORD clst, bcs, nsect; + FSIZE_t ifptr; +#if _USE_FASTSEEK + DWORD cl, pcl, ncl, tcl, dsc, tlen, ulen, *tbl; +#endif + + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res == FR_OK) res = (FRESULT)fp->err; +#if _FS_EXFAT && !_FS_READONLY + if (res == FR_OK && fs->fs_type == FS_EXFAT) { + res = fill_last_frag(&fp->obj, fp->clust, 0xFFFFFFFF); /* Fill last fragment on the FAT if needed */ + } +#endif + if (res != FR_OK) LEAVE_FF(fs, res); + +#if _USE_FASTSEEK + if (fp->cltbl) { /* Fast seek */ + if (ofs == CREATE_LINKMAP) { /* Create CLMT */ + tbl = fp->cltbl; + tlen = *tbl++; ulen = 2; /* Given table size and required table size */ + cl = fp->obj.sclust; /* Origin of the chain */ + if (cl) { + do { + /* Get a fragment */ + tcl = cl; ncl = 0; ulen += 2; /* Top, length and used items */ + do { + pcl = cl; ncl++; + cl = get_fat(&fp->obj, cl); + if (cl <= 1) ABORT(fs, FR_INT_ERR); + if (cl == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + } while (cl == pcl + 1); + if (ulen <= tlen) { /* Store the length and top of the fragment */ + *tbl++ = ncl; *tbl++ = tcl; + } + } while (cl < fs->n_fatent); /* Repeat until end of chain */ + } + *fp->cltbl = ulen; /* Number of items used */ + if (ulen <= tlen) { + *tbl = 0; /* Terminate table */ + } else { + res = FR_NOT_ENOUGH_CORE; /* Given table size is smaller than required */ + } + } else { /* Fast seek */ + if (ofs > fp->obj.objsize) ofs = fp->obj.objsize; /* Clip offset at the file size */ + fp->fptr = ofs; /* Set file pointer */ + if (ofs) { + fp->clust = clmt_clust(fp, ofs - 1); + dsc = clust2sect(fs, fp->clust); + if (!dsc) ABORT(fs, FR_INT_ERR); + dsc += (DWORD)((ofs - 1) / SS(fs)) & (fs->csize - 1); + if (fp->fptr % SS(fs) && dsc != fp->sect) { /* Refill sector cache if needed */ +#if !_FS_TINY +#if !_FS_READONLY + if (fp->flag & FA_DIRTY) { /* Write-back dirty sector cache */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + if (disk_read(fs->drv, fp->buf, dsc, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); /* Load current sector */ +#endif + fp->sect = dsc; + } + } + } + } else +#endif + + /* Normal Seek */ + { +#if _FS_EXFAT + if (fs->fs_type != FS_EXFAT && ofs >= 0x100000000) ofs = 0xFFFFFFFF; /* Clip at 4GiB-1 if at FATxx */ +#endif + if (ofs > fp->obj.objsize && (_FS_READONLY || !(fp->flag & FA_WRITE))) { /* In read-only mode, clip offset with the file size */ + ofs = fp->obj.objsize; + } + ifptr = fp->fptr; + fp->fptr = nsect = 0; + if (ofs) { + bcs = (DWORD)fs->csize * SS(fs); /* Cluster size (byte) */ + if (ifptr > 0 && + (ofs - 1) / bcs >= (ifptr - 1) / bcs) { /* When seek to same or following cluster, */ + fp->fptr = (ifptr - 1) & ~(FSIZE_t)(bcs - 1); /* start from the current cluster */ + ofs -= fp->fptr; + clst = fp->clust; + } else { /* When seek to back cluster, */ + clst = fp->obj.sclust; /* start from the first cluster */ +#if !_FS_READONLY + if (clst == 0) { /* If no cluster chain, create a new chain */ + clst = create_chain(&fp->obj, 0); + if (clst == 1) ABORT(fs, FR_INT_ERR); + if (clst == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + fp->obj.sclust = clst; + } +#endif + fp->clust = clst; + } + if (clst != 0) { + while (ofs > bcs) { /* Cluster following loop */ + ofs -= bcs; fp->fptr += bcs; +#if !_FS_READONLY + if (fp->flag & FA_WRITE) { /* Check if in write mode or not */ + if (_FS_EXFAT && fp->fptr > fp->obj.objsize) { /* No FAT chain object needs correct objsize to generate FAT value */ + fp->obj.objsize = fp->fptr; + fp->flag |= FA_MODIFIED; + } + clst = create_chain(&fp->obj, clst); /* Follow chain with forceed stretch */ + if (clst == 0) { /* Clip file size in case of disk full */ + ofs = 0; break; + } + } else +#endif + { + clst = get_fat(&fp->obj, clst); /* Follow cluster chain if not in write mode */ + } + if (clst == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + if (clst <= 1 || clst >= fs->n_fatent) ABORT(fs, FR_INT_ERR); + fp->clust = clst; + } + fp->fptr += ofs; + if (ofs % SS(fs)) { + nsect = clust2sect(fs, clst); /* Current sector */ + if (!nsect) ABORT(fs, FR_INT_ERR); + nsect += (DWORD)(ofs / SS(fs)); + } + } + } + if (!_FS_READONLY && fp->fptr > fp->obj.objsize) { /* Set file change flag if the file size is extended */ + fp->obj.objsize = fp->fptr; + fp->flag |= FA_MODIFIED; + } + if (fp->fptr % SS(fs) && nsect != fp->sect) { /* Fill sector cache if needed */ +#if !_FS_TINY +#if !_FS_READONLY + if (fp->flag & FA_DIRTY) { /* Write-back dirty sector cache */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + if (disk_read(fs->drv, fp->buf, nsect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); /* Fill sector cache */ +#endif + fp->sect = nsect; + } + } + + LEAVE_FF(fs, res); +} + + + +#if _FS_MINIMIZE <= 1 +/*-----------------------------------------------------------------------*/ +/* Create a Directory Object */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_opendir ( + DIR* dp, /* Pointer to directory object to create */ + const TCHAR* path /* Pointer to the directory path */ +) +{ + FRESULT res; + FATFS *fs; + _FDID *obj; + DEF_NAMBUF + + + if (!dp) return FR_INVALID_OBJECT; + + /* Get logical drive */ + obj = &dp->obj; + res = find_volume(&path, &fs, 0); + if (res == FR_OK) { + obj->fs = fs; + INIT_NAMBUF(fs); + res = follow_path(dp, path); /* Follow the path to the directory */ + if (res == FR_OK) { /* Follow completed */ + if (!(dp->fn[NSFLAG] & NS_NONAME)) { /* It is not the origin directory itself */ + if (obj->attr & AM_DIR) { /* This object is a sub-directory */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + obj->c_scl = obj->sclust; /* Get containing directory inforamation */ + obj->c_size = ((DWORD)obj->objsize & 0xFFFFFF00) | obj->stat; + obj->c_ofs = dp->blk_ofs; + obj->sclust = ld_dword(fs->dirbuf + XDIR_FstClus); /* Get object allocation info */ + obj->objsize = ld_qword(fs->dirbuf + XDIR_FileSize); + obj->stat = fs->dirbuf[XDIR_GenFlags] & 2; + } else +#endif + { + obj->sclust = ld_clust(fs, dp->dir); /* Get object allocation info */ + } + } else { /* This object is a file */ + res = FR_NO_PATH; + } + } + if (res == FR_OK) { + obj->id = fs->id; + res = dir_sdi(dp, 0); /* Rewind directory */ +#if _FS_LOCK != 0 + if (res == FR_OK) { + if (obj->sclust) { + obj->lockid = inc_lock(dp, 0); /* Lock the sub directory */ + if (!obj->lockid) res = FR_TOO_MANY_OPEN_FILES; + } else { + obj->lockid = 0; /* Root directory need not to be locked */ + } + } +#endif + } + } + FREE_NAMBUF(); + if (res == FR_NO_FILE) res = FR_NO_PATH; + } + if (res != FR_OK) obj->fs = 0; /* Invalidate the directory object if function faild */ + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Close Directory */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_closedir ( + DIR *dp /* Pointer to the directory object to be closed */ +) +{ + FRESULT res; + FATFS *fs; + + + res = validate(&dp->obj, &fs); /* Check validity of the file object */ + if (res == FR_OK) { +#if _FS_LOCK != 0 + if (dp->obj.lockid) { /* Decrement sub-directory open counter */ + res = dec_lock(dp->obj.lockid); + } + if (res == FR_OK) +#endif + { + dp->obj.fs = 0; /* Invalidate directory object */ + } +#if _FS_REENTRANT + unlock_fs(fs, FR_OK); /* Unlock volume */ +#endif + } + return res; +} + + + + +/*-----------------------------------------------------------------------*/ +/* Read Directory Entries in Sequence */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_readdir ( + DIR* dp, /* Pointer to the open directory object */ + FILINFO* fno /* Pointer to file information to return */ +) +{ + FRESULT res; + FATFS *fs; + DEF_NAMBUF + + + res = validate(&dp->obj, &fs); /* Check validity of the directory object */ + if (res == FR_OK) { + if (!fno) { + res = dir_sdi(dp, 0); /* Rewind the directory object */ + } else { + INIT_NAMBUF(fs); + res = dir_read(dp, 0); /* Read an item */ + if (res == FR_NO_FILE) res = FR_OK; /* Ignore end of directory */ + if (res == FR_OK) { /* A valid entry is found */ + get_fileinfo(dp, fno); /* Get the object information */ + res = dir_next(dp, 0); /* Increment index for next */ + if (res == FR_NO_FILE) res = FR_OK; /* Ignore end of directory now */ + } + FREE_NAMBUF(); + } + } + LEAVE_FF(fs, res); +} + + + +#if _USE_FIND +/*-----------------------------------------------------------------------*/ +/* Find Next File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_findnext ( + DIR* dp, /* Pointer to the open directory object */ + FILINFO* fno /* Pointer to the file information structure */ +) +{ + FRESULT res; + + + for (;;) { + res = f_readdir(dp, fno); /* Get a directory item */ + if (res != FR_OK || !fno || !fno->fname[0]) break; /* Terminate if any error or end of directory */ + if (pattern_matching(dp->pat, fno->fname, 0, 0)) break; /* Test for the file name */ +#if _USE_LFN != 0 && _USE_FIND == 2 + if (pattern_matching(dp->pat, fno->altname, 0, 0)) break; /* Test for alternative name if exist */ +#endif + } + return res; +} + + + +/*-----------------------------------------------------------------------*/ +/* Find First File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_findfirst ( + DIR* dp, /* Pointer to the blank directory object */ + FILINFO* fno, /* Pointer to the file information structure */ + const TCHAR* path, /* Pointer to the directory to open */ + const TCHAR* pattern /* Pointer to the matching pattern */ +) +{ + FRESULT res; + + + dp->pat = pattern; /* Save pointer to pattern string */ + res = f_opendir(dp, path); /* Open the target directory */ + if (res == FR_OK) { + res = f_findnext(dp, fno); /* Find the first item */ + } + return res; +} + +#endif /* _USE_FIND */ + + + +#if _FS_MINIMIZE == 0 +/*-----------------------------------------------------------------------*/ +/* Get File Status */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_stat ( + const TCHAR* path, /* Pointer to the file path */ + FILINFO* fno /* Pointer to file information to return */ +) +{ + FRESULT res; + DIR dj; + DEF_NAMBUF + + + /* Get logical drive */ + res = find_volume(&path, &dj.obj.fs, 0); + if (res == FR_OK) { + INIT_NAMBUF(dj.obj.fs); + res = follow_path(&dj, path); /* Follow the file path */ + if (res == FR_OK) { /* Follow completed */ + if (dj.fn[NSFLAG] & NS_NONAME) { /* It is origin directory */ + res = FR_INVALID_NAME; + } else { /* Found an object */ + if (fno) get_fileinfo(&dj, fno); + } + } + FREE_NAMBUF(); + } + + LEAVE_FF(dj.obj.fs, res); +} + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Get Number of Free Clusters */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_getfree ( + const TCHAR* path, /* Path name of the logical drive number */ + DWORD* nclst, /* Pointer to a variable to return number of free clusters */ + FATFS** fatfs /* Pointer to return pointer to corresponding file system object */ +) +{ + FRESULT res; + FATFS *fs; + DWORD nfree, clst, sect, stat; + UINT i; + BYTE *p; + _FDID obj; + + + /* Get logical drive */ + res = find_volume(&path, &fs, 0); + if (res == FR_OK) { + *fatfs = fs; /* Return ptr to the fs object */ + /* If free_clst is valid, return it without full cluster scan */ + if (fs->free_clst <= fs->n_fatent - 2) { + *nclst = fs->free_clst; + } else { + /* Get number of free clusters */ + nfree = 0; + if (fs->fs_type == FS_FAT12) { /* FAT12: Sector unalighed FAT entries */ + clst = 2; obj.fs = fs; + do { + stat = get_fat(&obj, clst); + if (stat == 0xFFFFFFFF) { res = FR_DISK_ERR; break; } + if (stat == 1) { res = FR_INT_ERR; break; } + if (stat == 0) nfree++; + } while (++clst < fs->n_fatent); + } else { +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* exFAT: Scan bitmap table */ + BYTE bm; + UINT b; + + clst = fs->n_fatent - 2; + sect = fs->database; + i = 0; + do { + if (i == 0 && (res = move_window(fs, sect++)) != FR_OK) break; + for (b = 8, bm = fs->win[i]; b && clst; b--, clst--) { + if (!(bm & 1)) nfree++; + bm >>= 1; + } + i = (i + 1) % SS(fs); + } while (clst); + } else +#endif + { /* FAT16/32: Sector alighed FAT entries */ + clst = fs->n_fatent; sect = fs->fatbase; + i = 0; p = 0; + do { + if (i == 0) { + res = move_window(fs, sect++); + if (res != FR_OK) break; + p = fs->win; + i = SS(fs); + } + if (fs->fs_type == FS_FAT16) { + if (ld_word(p) == 0) nfree++; + p += 2; i -= 2; + } else { + if ((ld_dword(p) & 0x0FFFFFFF) == 0) nfree++; + p += 4; i -= 4; + } + } while (--clst); + } + } + *nclst = nfree; /* Return the free clusters */ + fs->free_clst = nfree; /* Now free_clst is valid */ + fs->fsi_flag |= 1; /* FSInfo is to be updated */ + } + } + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Truncate File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_truncate ( + FIL* fp /* Pointer to the file object */ +) +{ + FRESULT res; + FATFS *fs; + DWORD ncl; + + + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res != FR_OK || (res = (FRESULT)fp->err) != FR_OK) LEAVE_FF(fs, res); + if (!(fp->flag & FA_WRITE)) LEAVE_FF(fs, FR_DENIED); /* Check access mode */ + + if (fp->fptr < fp->obj.objsize) { /* Process when fptr is not on the eof */ + if (fp->fptr == 0) { /* When set file size to zero, remove entire cluster chain */ + res = remove_chain(&fp->obj, fp->obj.sclust, 0); + fp->obj.sclust = 0; + } else { /* When truncate a part of the file, remove remaining clusters */ + ncl = get_fat(&fp->obj, fp->clust); + res = FR_OK; + if (ncl == 0xFFFFFFFF) res = FR_DISK_ERR; + if (ncl == 1) res = FR_INT_ERR; + if (res == FR_OK && ncl < fs->n_fatent) { + res = remove_chain(&fp->obj, ncl, fp->clust); + } + } + fp->obj.objsize = fp->fptr; /* Set file size to current R/W point */ + fp->flag |= FA_MODIFIED; +#if !_FS_TINY + if (res == FR_OK && (fp->flag & FA_DIRTY)) { + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) { + res = FR_DISK_ERR; + } else { + fp->flag &= (BYTE)~FA_DIRTY; + } + } +#endif + if (res != FR_OK) ABORT(fs, res); + } + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Delete a File/Directory */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_unlink ( + const TCHAR* path /* Pointer to the file or directory path */ +) +{ + FRESULT res; + DIR dj, sdj; + DWORD dclst = 0; + FATFS *fs; +#if _FS_EXFAT + _FDID obj; +#endif + DEF_NAMBUF + + + /* Get logical drive */ + res = find_volume(&path, &fs, FA_WRITE); + dj.obj.fs = fs; + if (res == FR_OK) { + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the file path */ + if (_FS_RPATH && res == FR_OK && (dj.fn[NSFLAG] & NS_DOT)) { + res = FR_INVALID_NAME; /* Cannot remove dot entry */ + } +#if _FS_LOCK != 0 + if (res == FR_OK) res = chk_lock(&dj, 2); /* Check if it is an open object */ +#endif + if (res == FR_OK) { /* The object is accessible */ + if (dj.fn[NSFLAG] & NS_NONAME) { + res = FR_INVALID_NAME; /* Cannot remove the origin directory */ + } else { + if (dj.obj.attr & AM_RDO) { + res = FR_DENIED; /* Cannot remove R/O object */ + } + } + if (res == FR_OK) { +#if _FS_EXFAT + obj.fs = fs; + if (fs->fs_type == FS_EXFAT) { + obj.sclust = dclst = ld_dword(fs->dirbuf + XDIR_FstClus); + obj.objsize = ld_qword(fs->dirbuf + XDIR_FileSize); + obj.stat = fs->dirbuf[XDIR_GenFlags] & 2; + } else +#endif + { + dclst = ld_clust(fs, dj.dir); + } + if (dj.obj.attr & AM_DIR) { /* Is it a sub-directory? */ +#if _FS_RPATH != 0 + if (dclst == fs->cdir) { /* Is it the current directory? */ + res = FR_DENIED; + } else +#endif + { + sdj.obj.fs = fs; /* Open the sub-directory */ + sdj.obj.sclust = dclst; +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + sdj.obj.objsize = obj.objsize; + sdj.obj.stat = obj.stat; + } +#endif + res = dir_sdi(&sdj, 0); + if (res == FR_OK) { + res = dir_read(&sdj, 0); /* Read an item */ + if (res == FR_OK) res = FR_DENIED; /* Not empty? */ + if (res == FR_NO_FILE) res = FR_OK; /* Empty? */ + } + } + } + } + if (res == FR_OK) { + res = dir_remove(&dj); /* Remove the directory entry */ + if (res == FR_OK && dclst) { /* Remove the cluster chain if exist */ +#if _FS_EXFAT + res = remove_chain(&obj, dclst, 0); +#else + res = remove_chain(&dj.obj, dclst, 0); +#endif + } + if (res == FR_OK) res = sync_fs(fs); + } + } + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Create a Directory */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_mkdir ( + const TCHAR* path /* Pointer to the directory path */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + BYTE *dir; + UINT n; + DWORD dsc, dcl, pcl, tm; + DEF_NAMBUF + + + /* Get logical drive */ + res = find_volume(&path, &fs, FA_WRITE); + dj.obj.fs = fs; + if (res == FR_OK) { + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the file path */ + if (res == FR_OK) res = FR_EXIST; /* Any object with same name is already existing */ + if (_FS_RPATH && res == FR_NO_FILE && (dj.fn[NSFLAG] & NS_DOT)) { + res = FR_INVALID_NAME; + } + if (res == FR_NO_FILE) { /* Can create a new directory */ + dcl = create_chain(&dj.obj, 0); /* Allocate a cluster for the new directory table */ + dj.obj.objsize = (DWORD)fs->csize * SS(fs); + res = FR_OK; + if (dcl == 0) res = FR_DENIED; /* No space to allocate a new cluster */ + if (dcl == 1) res = FR_INT_ERR; + if (dcl == 0xFFFFFFFF) res = FR_DISK_ERR; + if (res == FR_OK) res = sync_window(fs); /* Flush FAT */ + tm = GET_FATTIME(); + if (res == FR_OK) { /* Initialize the new directory table */ + dsc = clust2sect(fs, dcl); + dir = fs->win; + mem_set(dir, 0, SS(fs)); + if (!_FS_EXFAT || fs->fs_type != FS_EXFAT) { + mem_set(dir + DIR_Name, ' ', 11); /* Create "." entry */ + dir[DIR_Name] = '.'; + dir[DIR_Attr] = AM_DIR; + st_dword(dir + DIR_ModTime, tm); + st_clust(fs, dir, dcl); + mem_cpy(dir + SZDIRE, dir, SZDIRE); /* Create ".." entry */ + dir[SZDIRE + 1] = '.'; pcl = dj.obj.sclust; + if (fs->fs_type == FS_FAT32 && pcl == fs->dirbase) pcl = 0; + st_clust(fs, dir + SZDIRE, pcl); + } + for (n = fs->csize; n; n--) { /* Write dot entries and clear following sectors */ + fs->winsect = dsc++; + fs->wflag = 1; + res = sync_window(fs); + if (res != FR_OK) break; + mem_set(dir, 0, SS(fs)); + } + } + if (res == FR_OK) { + res = dir_register(&dj); /* Register the object to the directoy */ + } + if (res == FR_OK) { +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* Initialize directory entry block */ + st_dword(fs->dirbuf + XDIR_ModTime, tm); /* Created time */ + st_dword(fs->dirbuf + XDIR_FstClus, dcl); /* Table start cluster */ + st_dword(fs->dirbuf + XDIR_FileSize, (DWORD)dj.obj.objsize); /* File size needs to be valid */ + st_dword(fs->dirbuf + XDIR_ValidFileSize, (DWORD)dj.obj.objsize); + fs->dirbuf[XDIR_GenFlags] = 3; /* Initialize the object flag (contiguous) */ + fs->dirbuf[XDIR_Attr] = AM_DIR; /* Attribute */ + res = store_xdir(&dj); + } else +#endif + { + dir = dj.dir; + st_dword(dir + DIR_ModTime, tm); /* Created time */ + st_clust(fs, dir, dcl); /* Table start cluster */ + dir[DIR_Attr] = AM_DIR; /* Attribute */ + fs->wflag = 1; + } + if (res == FR_OK) { + res = sync_fs(fs); + } + } else { + remove_chain(&dj.obj, dcl, 0); /* Could not register, remove cluster chain */ + } + } + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Rename a File/Directory */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_rename ( + const TCHAR* path_old, /* Pointer to the object name to be renamed */ + const TCHAR* path_new /* Pointer to the new name */ +) +{ + FRESULT res; + DIR djo, djn; + FATFS *fs; + BYTE buf[_FS_EXFAT ? SZDIRE * 2 : 24], *dir; + DWORD dw; + DEF_NAMBUF + + + get_ldnumber(&path_new); /* Snip drive number of new name off */ + res = find_volume(&path_old, &fs, FA_WRITE); /* Get logical drive of the old object */ + if (res == FR_OK) { + djo.obj.fs = fs; + INIT_NAMBUF(fs); + res = follow_path(&djo, path_old); /* Check old object */ + if (res == FR_OK && (djo.fn[NSFLAG] & (NS_DOT | NS_NONAME))) res = FR_INVALID_NAME; /* Check validity of name */ +#if _FS_LOCK != 0 + if (res == FR_OK) { + res = chk_lock(&djo, 2); + } +#endif + if (res == FR_OK) { /* Object to be renamed is found */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* At exFAT */ + BYTE nf, nn; + WORD nh; + + mem_cpy(buf, fs->dirbuf, SZDIRE * 2); /* Save 85+C0 entry of old object */ + mem_cpy(&djn, &djo, sizeof djo); + res = follow_path(&djn, path_new); /* Make sure if new object name is not in use */ + if (res == FR_OK) { /* Is new name already in use by any other object? */ + res = (djn.obj.sclust == djo.obj.sclust && djn.dptr == djo.dptr) ? FR_NO_FILE : FR_EXIST; + } + if (res == FR_NO_FILE) { /* It is a valid path and no name collision */ + res = dir_register(&djn); /* Register the new entry */ + if (res == FR_OK) { + nf = fs->dirbuf[XDIR_NumSec]; nn = fs->dirbuf[XDIR_NumName]; + nh = ld_word(fs->dirbuf + XDIR_NameHash); + mem_cpy(fs->dirbuf, buf, SZDIRE * 2); + fs->dirbuf[XDIR_NumSec] = nf; fs->dirbuf[XDIR_NumName] = nn; + st_word(fs->dirbuf + XDIR_NameHash, nh); +/* Start of critical section where an interruption can cause a cross-link */ + res = store_xdir(&djn); + } + } + } else +#endif + { /* At FAT12/FAT16/FAT32 */ + mem_cpy(buf, djo.dir + DIR_Attr, 21); /* Save information about the object except name */ + mem_cpy(&djn, &djo, sizeof (DIR)); /* Duplicate the directory object */ + res = follow_path(&djn, path_new); /* Make sure if new object name is not in use */ + if (res == FR_OK) { /* Is new name already in use by any other object? */ + res = (djn.obj.sclust == djo.obj.sclust && djn.dptr == djo.dptr) ? FR_NO_FILE : FR_EXIST; + } + if (res == FR_NO_FILE) { /* It is a valid path and no name collision */ + res = dir_register(&djn); /* Register the new entry */ + if (res == FR_OK) { + dir = djn.dir; /* Copy information about object except name */ + mem_cpy(dir + 13, buf + 2, 19); + dir[DIR_Attr] = buf[0] | AM_ARC; + fs->wflag = 1; + if ((dir[DIR_Attr] & AM_DIR) && djo.obj.sclust != djn.obj.sclust) { /* Update .. entry in the sub-directory if needed */ + dw = clust2sect(fs, ld_clust(fs, dir)); + if (!dw) { + res = FR_INT_ERR; + } else { +/* Start of critical section where an interruption can cause a cross-link */ + res = move_window(fs, dw); + dir = fs->win + SZDIRE * 1; /* Ptr to .. entry */ + if (res == FR_OK && dir[1] == '.') { + st_clust(fs, dir, djn.obj.sclust); + fs->wflag = 1; + } + } + } + } + } + } + if (res == FR_OK) { + res = dir_remove(&djo); /* Remove old entry */ + if (res == FR_OK) { + res = sync_fs(fs); + } + } +/* End of the critical section */ + } + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + +#endif /* !_FS_READONLY */ +#endif /* _FS_MINIMIZE == 0 */ +#endif /* _FS_MINIMIZE <= 1 */ +#endif /* _FS_MINIMIZE <= 2 */ + + + +#if _USE_CHMOD && !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Change Attribute */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_chmod ( + const TCHAR* path, /* Pointer to the file path */ + BYTE attr, /* Attribute bits */ + BYTE mask /* Attribute mask to change */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + DEF_NAMBUF + + + res = find_volume(&path, &fs, FA_WRITE); /* Get logical drive */ + dj.obj.fs = fs; + if (res == FR_OK) { + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the file path */ + if (res == FR_OK && (dj.fn[NSFLAG] & (NS_DOT | NS_NONAME))) res = FR_INVALID_NAME; /* Check object validity */ + if (res == FR_OK) { + mask &= AM_RDO|AM_HID|AM_SYS|AM_ARC; /* Valid attribute mask */ +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + fs->dirbuf[XDIR_Attr] = (attr & mask) | (fs->dirbuf[XDIR_Attr] & (BYTE)~mask); /* Apply attribute change */ + res = store_xdir(&dj); + } else +#endif + { + dj.dir[DIR_Attr] = (attr & mask) | (dj.dir[DIR_Attr] & (BYTE)~mask); /* Apply attribute change */ + fs->wflag = 1; + } + if (res == FR_OK) { + res = sync_fs(fs); + } + } + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Change Timestamp */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_utime ( + const TCHAR* path, /* Pointer to the file/directory name */ + const FILINFO* fno /* Pointer to the time stamp to be set */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + DEF_NAMBUF + + + res = find_volume(&path, &fs, FA_WRITE); /* Get logical drive */ + dj.obj.fs = fs; + if (res == FR_OK) { + INIT_NAMBUF(fs); + res = follow_path(&dj, path); /* Follow the file path */ + if (res == FR_OK && (dj.fn[NSFLAG] & (NS_DOT | NS_NONAME))) res = FR_INVALID_NAME; /* Check object validity */ + if (res == FR_OK) { +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + st_dword(fs->dirbuf + XDIR_ModTime, (DWORD)fno->fdate << 16 | fno->ftime); + res = store_xdir(&dj); + } else +#endif + { + st_dword(dj.dir + DIR_ModTime, (DWORD)fno->fdate << 16 | fno->ftime); + fs->wflag = 1; + } + if (res == FR_OK) { + res = sync_fs(fs); + } + } + FREE_NAMBUF(); + } + + LEAVE_FF(fs, res); +} + +#endif /* _USE_CHMOD && !_FS_READONLY */ + + + +#if _USE_LABEL +/*-----------------------------------------------------------------------*/ +/* Get Volume Label */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_getlabel ( + const TCHAR* path, /* Path name of the logical drive number */ + TCHAR* label, /* Pointer to a buffer to return the volume label */ + DWORD* vsn /* Pointer to a variable to return the volume serial number */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + UINT si, di; +#if _LFN_UNICODE || _FS_EXFAT + WCHAR w; +#endif + + /* Get logical drive */ + res = find_volume(&path, &fs, 0); + + /* Get volume label */ + if (res == FR_OK && label) { + dj.obj.fs = fs; dj.obj.sclust = 0; /* Open root directory */ + res = dir_sdi(&dj, 0); + if (res == FR_OK) { + res = dir_read(&dj, 1); /* Find a volume label entry */ + if (res == FR_OK) { +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + for (si = di = 0; si < dj.dir[XDIR_NumLabel]; si++) { /* Extract volume label from 83 entry */ + w = ld_word(dj.dir + XDIR_Label + si * 2); +#if _LFN_UNICODE + label[di++] = w; +#else + w = ff_convert(w, 0); /* Unicode -> OEM */ + if (w == 0) w = '?'; /* Replace wrong character */ + if (_DF1S && w >= 0x100) label[di++] = (char)(w >> 8); + label[di++] = (char)w; +#endif + } + label[di] = 0; + } else +#endif + { + si = di = 0; /* Extract volume label from AM_VOL entry with code comversion */ + do { +#if _LFN_UNICODE + w = (si < 11) ? dj.dir[si++] : ' '; + if (IsDBCS1(w) && si < 11 && IsDBCS2(dj.dir[si])) { + w = w << 8 | dj.dir[si++]; + } + label[di++] = ff_convert(w, 1); /* OEM -> Unicode */ +#else + label[di++] = dj.dir[si++]; +#endif + } while (di < 11); + do { /* Truncate trailing spaces */ + label[di] = 0; + if (di == 0) break; + } while (label[--di] == ' '); + } + } + } + if (res == FR_NO_FILE) { /* No label entry and return nul string */ + label[0] = 0; + res = FR_OK; + } + } + + /* Get volume serial number */ + if (res == FR_OK && vsn) { + res = move_window(fs, fs->volbase); + if (res == FR_OK) { + switch (fs->fs_type) { + case FS_EXFAT: + di = BPB_VolIDEx; break; + + case FS_FAT32: + di = BS_VolID32; break; + + default: + di = BS_VolID; + } + *vsn = ld_dword(fs->win + di); + } + } + + LEAVE_FF(fs, res); +} + + + +#if !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Set Volume Label */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_setlabel ( + const TCHAR* label /* Pointer to the volume label to set */ +) +{ + FRESULT res; + DIR dj; + FATFS *fs; + BYTE dirvn[22]; + UINT i, j, slen; + WCHAR w; + static const char badchr[] = "\"*+,.:;<=>\?[]|\x7F"; + + + /* Get logical drive */ + res = find_volume(&label, &fs, FA_WRITE); + if (res != FR_OK) LEAVE_FF(fs, res); + dj.obj.fs = fs; + + /* Get length of given volume label */ + for (slen = 0; (UINT)label[slen] >= ' '; slen++) ; /* Get name length */ + +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { /* On the exFAT volume */ + for (i = j = 0; i < slen; ) { /* Create volume label in directory form */ + w = label[i++]; +#if !_LFN_UNICODE + if (IsDBCS1(w)) { + w = (i < slen && IsDBCS2(label[i])) ? w << 8 | (BYTE)label[i++] : 0; + } + w = ff_convert(w, 1); +#endif + if (w == 0 || chk_chr(badchr, w) || j == 22) { /* Check validity check validity of the volume label */ + LEAVE_FF(fs, FR_INVALID_NAME); + } + st_word(dirvn + j, w); j += 2; + } + slen = j; + } else +#endif + { /* On the FAT12/16/32 volume */ + for ( ; slen && label[slen - 1] == ' '; slen--) ; /* Remove trailing spaces */ + if (slen) { /* Is there a volume label to be set? */ + dirvn[0] = 0; i = j = 0; /* Create volume label in directory form */ + do { +#if _LFN_UNICODE + w = ff_convert(ff_wtoupper(label[i++]), 0); +#else + w = (BYTE)label[i++]; + if (IsDBCS1(w)) { + w = (j < 10 && i < slen && IsDBCS2(label[i])) ? w << 8 | (BYTE)label[i++] : 0; + } +#if _USE_LFN != 0 + w = ff_convert(ff_wtoupper(ff_convert(w, 1)), 0); +#else + if (IsLower(w)) w -= 0x20; /* To upper ASCII characters */ +#ifdef _EXCVT + if (w >= 0x80) w = ExCvt[w - 0x80]; /* To upper extended characters (SBCS cfg) */ +#else + if (!_DF1S && w >= 0x80) w = 0; /* Reject extended characters (ASCII cfg) */ +#endif +#endif +#endif + if (w == 0 || chk_chr(badchr, w) || j >= (UINT)((w >= 0x100) ? 10 : 11)) { /* Reject invalid characters for volume label */ + LEAVE_FF(fs, FR_INVALID_NAME); + } + if (w >= 0x100) dirvn[j++] = (BYTE)(w >> 8); + dirvn[j++] = (BYTE)w; + } while (i < slen); + while (j < 11) dirvn[j++] = ' '; /* Fill remaining name field */ + if (dirvn[0] == DDEM) LEAVE_FF(fs, FR_INVALID_NAME); /* Reject illegal name (heading DDEM) */ + } + } + + /* Set volume label */ + dj.obj.sclust = 0; /* Open root directory */ + res = dir_sdi(&dj, 0); + if (res == FR_OK) { + res = dir_read(&dj, 1); /* Get volume label entry */ + if (res == FR_OK) { + if (_FS_EXFAT && fs->fs_type == FS_EXFAT) { + dj.dir[XDIR_NumLabel] = (BYTE)(slen / 2); /* Change the volume label */ + mem_cpy(dj.dir + XDIR_Label, dirvn, slen); + } else { + if (slen) { + mem_cpy(dj.dir, dirvn, 11); /* Change the volume label */ + } else { + dj.dir[DIR_Name] = DDEM; /* Remove the volume label */ + } + } + fs->wflag = 1; + res = sync_fs(fs); + } else { /* No volume label entry is found or error */ + if (res == FR_NO_FILE) { + res = FR_OK; + if (slen) { /* Create a volume label entry */ + res = dir_alloc(&dj, 1); /* Allocate an entry */ + if (res == FR_OK) { + mem_set(dj.dir, 0, SZDIRE); /* Clear the entry */ + if (_FS_EXFAT && fs->fs_type == FS_EXFAT) { + dj.dir[XDIR_Type] = 0x83; /* Create 83 entry */ + dj.dir[XDIR_NumLabel] = (BYTE)(slen / 2); + mem_cpy(dj.dir + XDIR_Label, dirvn, slen); + } else { + dj.dir[DIR_Attr] = AM_VOL; /* Create volume label entry */ + mem_cpy(dj.dir, dirvn, 11); + } + fs->wflag = 1; + res = sync_fs(fs); + } + } + } + } + } + + LEAVE_FF(fs, res); +} + +#endif /* !_FS_READONLY */ +#endif /* _USE_LABEL */ + + + +#if _USE_EXPAND && !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Allocate a Contiguous Blocks to the File */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_expand ( + FIL* fp, /* Pointer to the file object */ + FSIZE_t fsz, /* File size to be expanded to */ + BYTE opt /* Operation mode 0:Find and prepare or 1:Find and allocate */ +) +{ + FRESULT res; + FATFS *fs; + DWORD n, clst, stcl, scl, ncl, tcl, lclst; + + + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res != FR_OK || (res = (FRESULT)fp->err) != FR_OK) LEAVE_FF(fs, res); + if (fsz == 0 || fp->obj.objsize != 0 || !(fp->flag & FA_WRITE)) LEAVE_FF(fs, FR_DENIED); +#if _FS_EXFAT + if (fs->fs_type != FS_EXFAT && fsz >= 0x100000000) LEAVE_FF(fs, FR_DENIED); /* Check if in size limit */ +#endif + n = (DWORD)fs->csize * SS(fs); /* Cluster size */ + tcl = (DWORD)(fsz / n) + ((fsz & (n - 1)) ? 1 : 0); /* Number of clusters required */ + stcl = fs->last_clst; lclst = 0; + if (stcl < 2 || stcl >= fs->n_fatent) stcl = 2; + +#if _FS_EXFAT + if (fs->fs_type == FS_EXFAT) { + scl = find_bitmap(fs, stcl, tcl); /* Find a contiguous cluster block */ + if (scl == 0) res = FR_DENIED; /* No contiguous cluster block was found */ + if (scl == 0xFFFFFFFF) res = FR_DISK_ERR; + if (res == FR_OK) { /* A contiguous free area is found */ + if (opt) { /* Allocate it now */ + res = change_bitmap(fs, scl, tcl, 1); /* Mark the cluster block 'in use' */ + lclst = scl + tcl - 1; + } else { /* Set it as suggested point for next allocation */ + lclst = scl - 1; + } + } + } else +#endif + { + scl = clst = stcl; ncl = 0; + for (;;) { /* Find a contiguous cluster block */ + n = get_fat(&fp->obj, clst); + if (++clst >= fs->n_fatent) clst = 2; + if (n == 1) { res = FR_INT_ERR; break; } + if (n == 0xFFFFFFFF) { res = FR_DISK_ERR; break; } + if (n == 0) { /* Is it a free cluster? */ + if (++ncl == tcl) break; /* Break if a contiguous cluster block is found */ + } else { + scl = clst; ncl = 0; /* Not a free cluster */ + } + if (clst == stcl) { res = FR_DENIED; break; } /* No contiguous cluster? */ + } + if (res == FR_OK) { /* A contiguous free area is found */ + if (opt) { /* Allocate it now */ + for (clst = scl, n = tcl; n; clst++, n--) { /* Create a cluster chain on the FAT */ + res = put_fat(fs, clst, (n == 1) ? 0xFFFFFFFF : clst + 1); + if (res != FR_OK) break; + lclst = clst; + } + } else { /* Set it as suggested point for next allocation */ + lclst = scl - 1; + } + } + } + + if (res == FR_OK) { + fs->last_clst = lclst; /* Set suggested start cluster to start next */ + if (opt) { /* Is it allocated now? */ + fp->obj.sclust = scl; /* Update object allocation information */ + fp->obj.objsize = fsz; + if (_FS_EXFAT) fp->obj.stat = 2; /* Set status 'contiguous chain' */ + fp->flag |= FA_MODIFIED; + if (fs->free_clst <= fs->n_fatent - 2) { /* Update FSINFO */ + fs->free_clst -= tcl; + fs->fsi_flag |= 1; + } + } + } + + LEAVE_FF(fs, res); +} + +#endif /* _USE_EXPAND && !_FS_READONLY */ + + + +#if _USE_FORWARD +/*-----------------------------------------------------------------------*/ +/* Forward data to the stream directly */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_forward ( + FIL* fp, /* Pointer to the file object */ + UINT (*func)(const BYTE*,UINT), /* Pointer to the streaming function */ + UINT btf, /* Number of bytes to forward */ + UINT* bf /* Pointer to number of bytes forwarded */ +) +{ + FRESULT res; + FATFS *fs; + DWORD clst, sect; + FSIZE_t remain; + UINT rcnt, csect; + BYTE *dbuf; + + + *bf = 0; /* Clear transfer byte counter */ + res = validate(&fp->obj, &fs); /* Check validity of the file object */ + if (res != FR_OK || (res = (FRESULT)fp->err) != FR_OK) LEAVE_FF(fs, res); + if (!(fp->flag & FA_READ)) LEAVE_FF(fs, FR_DENIED); /* Check access mode */ + + remain = fp->obj.objsize - fp->fptr; + if (btf > remain) btf = (UINT)remain; /* Truncate btf by remaining bytes */ + + for ( ; btf && (*func)(0, 0); /* Repeat until all data transferred or stream goes busy */ + fp->fptr += rcnt, *bf += rcnt, btf -= rcnt) { + csect = (UINT)(fp->fptr / SS(fs) & (fs->csize - 1)); /* Sector offset in the cluster */ + if (fp->fptr % SS(fs) == 0) { /* On the sector boundary? */ + if (csect == 0) { /* On the cluster boundary? */ + clst = (fp->fptr == 0) ? /* On the top of the file? */ + fp->obj.sclust : get_fat(&fp->obj, fp->clust); + if (clst <= 1) ABORT(fs, FR_INT_ERR); + if (clst == 0xFFFFFFFF) ABORT(fs, FR_DISK_ERR); + fp->clust = clst; /* Update current cluster */ + } + } + sect = clust2sect(fs, fp->clust); /* Get current data sector */ + if (!sect) ABORT(fs, FR_INT_ERR); + sect += csect; +#if _FS_TINY + if (move_window(fs, sect) != FR_OK) ABORT(fs, FR_DISK_ERR); /* Move sector window to the file data */ + dbuf = fs->win; +#else + if (fp->sect != sect) { /* Fill sector cache with file data */ +#if !_FS_READONLY + if (fp->flag & FA_DIRTY) { /* Write-back dirty sector cache */ + if (disk_write(fs->drv, fp->buf, fp->sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + fp->flag &= (BYTE)~FA_DIRTY; + } +#endif + if (disk_read(fs->drv, fp->buf, sect, 1) != RES_OK) ABORT(fs, FR_DISK_ERR); + } + dbuf = fp->buf; +#endif + fp->sect = sect; + rcnt = SS(fs) - (UINT)fp->fptr % SS(fs); /* Number of bytes left in the sector */ + if (rcnt > btf) rcnt = btf; /* Clip it by btr if needed */ + rcnt = (*func)(dbuf + ((UINT)fp->fptr % SS(fs)), rcnt); /* Forward the file data */ + if (!rcnt) ABORT(fs, FR_INT_ERR); + } + + LEAVE_FF(fs, FR_OK); +} +#endif /* _USE_FORWARD */ + + + +#if _USE_MKFS && !_FS_READONLY +/*-----------------------------------------------------------------------*/ +/* Create an FAT/exFAT volume */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_mkfs ( + const TCHAR* path, /* Logical drive number */ + BYTE opt, /* Format option */ + DWORD au, /* Size of allocation unit (cluster) [byte] */ + void* work, /* Pointer to working buffer */ + UINT len /* Size of working buffer */ +) +{ + const UINT n_fats = 1; /* Number of FATs for FAT12/16/32 volume (1 or 2) */ + const UINT n_rootdir = 512; /* Number of root directory entries for FAT12/16 volume */ + static const WORD cst[] = {1, 4, 16, 64, 256, 512, 0}; /* Cluster size boundary for FAT12/16 volume (4Ks unit) */ + static const WORD cst32[] = {1, 2, 4, 8, 16, 32, 0}; /* Cluster size boundary for FAT32 volume (128Ks unit) */ + BYTE fmt, sys, *buf, *pte, pdrv, part; + WORD ss; + DWORD szb_buf, sz_buf, sz_blk, n_clst, pau, sect, nsect, n; + DWORD b_vol, b_fat, b_data; /* Base LBA for volume, fat, data */ + DWORD sz_vol, sz_rsv, sz_fat, sz_dir; /* Size for volume, fat, dir, data */ + UINT i; + int vol; + DSTATUS stat; +#if _USE_TRIM || _FS_EXFAT + DWORD tbl[3]; +#endif + + + /* Check mounted drive and clear work area */ + vol = get_ldnumber(&path); /* Get target logical drive */ + if (vol < 0) return FR_INVALID_DRIVE; + if (FatFs[vol]) FatFs[vol]->fs_type = 0; /* Clear the volume */ + pdrv = LD2PD(vol); /* Physical drive */ + part = LD2PT(vol); /* Partition (0:create as new, 1-4:get from partition table) */ + + /* Check physical drive status */ + stat = disk_initialize(pdrv); + if (stat & STA_NOINIT) return FR_NOT_READY; + if (stat & STA_PROTECT) return FR_WRITE_PROTECTED; + if (disk_ioctl(pdrv, GET_BLOCK_SIZE, &sz_blk) != RES_OK || !sz_blk || sz_blk > 32768 || (sz_blk & (sz_blk - 1))) sz_blk = 1; /* Erase block to align data area */ +#if _MAX_SS != _MIN_SS /* Get sector size of the medium if variable sector size cfg. */ + if (disk_ioctl(pdrv, GET_SECTOR_SIZE, &ss) != RES_OK) return FR_DISK_ERR; + if (ss > _MAX_SS || ss < _MIN_SS || (ss & (ss - 1))) return FR_DISK_ERR; +#else + ss = _MAX_SS; +#endif + if ((au != 0 && au < ss) || au > 0x1000000 || (au & (au - 1))) return FR_INVALID_PARAMETER; /* Check if au is valid */ + au /= ss; /* Cluster size in unit of sector */ + + /* Get working buffer */ + buf = (BYTE*)work; /* Working buffer */ + sz_buf = len / ss; /* Size of working buffer (sector) */ + szb_buf = sz_buf * ss; /* Size of working buffer (byte) */ + if (!szb_buf) return FR_MKFS_ABORTED; + + /* Determine where the volume to be located (b_vol, sz_vol) */ + if (_MULTI_PARTITION && part != 0) { + /* Get partition information from partition table in the MBR */ + if (disk_read(pdrv, buf, 0, 1) != RES_OK) return FR_DISK_ERR; /* Load MBR */ + if (ld_word(buf + BS_55AA) != 0xAA55) return FR_MKFS_ABORTED; /* Check if MBR is valid */ + pte = buf + (MBR_Table + (part - 1) * SZ_PTE); + if (!pte[PTE_System]) return FR_MKFS_ABORTED; /* No partition? */ + b_vol = ld_dword(pte + PTE_StLba); /* Get volume start sector */ + sz_vol = ld_dword(pte + PTE_SizLba); /* Get volume size */ + } else { + /* Create a single-partition in this function */ + if (disk_ioctl(pdrv, GET_SECTOR_COUNT, &sz_vol) != RES_OK) return FR_DISK_ERR; + b_vol = (opt & FM_SFD) ? 0 : 63; /* Volume start sector */ + if (sz_vol < b_vol) return FR_MKFS_ABORTED; + sz_vol -= b_vol; /* Volume size */ + } + if (sz_vol < 128) return FR_MKFS_ABORTED; /* Check if volume size is >=128s */ + + /* Pre-determine the FAT type */ + do { + if (_FS_EXFAT && (opt & FM_EXFAT)) { /* exFAT possible? */ + if ((opt & FM_ANY) == FM_EXFAT || sz_vol >= 0x4000000 || au > 128) { /* exFAT only, vol >= 64Ms or au > 128s ? */ + fmt = FS_EXFAT; break; + } + } + if (au > 128) return FR_INVALID_PARAMETER; /* Too large au for FAT/FAT32 */ + if (opt & FM_FAT32) { /* FAT32 possible? */ + if ((opt & FM_ANY) == FM_FAT32 || !(opt & FM_FAT)) { /* FAT32 only or no-FAT? */ + fmt = FS_FAT32; break; + } + } + if (!(opt & FM_FAT)) return FR_INVALID_PARAMETER; /* no-FAT? */ + fmt = FS_FAT16; + } while (0); + +#if _FS_EXFAT + if (fmt == FS_EXFAT) { /* Create an exFAT volume */ + DWORD szb_bit, szb_case, sum, nb, cl; + WCHAR ch, si; + UINT j, st; + BYTE b; + + if (sz_vol < 0x1000) return FR_MKFS_ABORTED; /* Too small volume? */ +#if _USE_TRIM + tbl[0] = b_vol; tbl[1] = b_vol + sz_vol - 1; /* Inform the device the volume area may be erased */ + disk_ioctl(pdrv, CTRL_TRIM, tbl); +#endif + /* Determine FAT location, data location and number of clusters */ + if (!au) { /* au auto-selection */ + au = 8; + if (sz_vol >= 0x80000) au = 64; /* >= 512Ks */ + if (sz_vol >= 0x4000000) au = 256; /* >= 64Ms */ + } + b_fat = b_vol + 32; /* FAT start at offset 32 */ + sz_fat = ((sz_vol / au + 2) * 4 + ss - 1) / ss; /* Number of FAT sectors */ + b_data = (b_fat + sz_fat + sz_blk - 1) & ~(sz_blk - 1); /* Align data area to the erase block boundary */ + if (b_data >= sz_vol / 2) return FR_MKFS_ABORTED; /* Too small volume? */ + n_clst = (sz_vol - (b_data - b_vol)) / au; /* Number of clusters */ + if (n_clst <16) return FR_MKFS_ABORTED; /* Too few clusters? */ + if (n_clst > MAX_EXFAT) return FR_MKFS_ABORTED; /* Too many clusters? */ + + szb_bit = (n_clst + 7) / 8; /* Size of allocation bitmap */ + tbl[0] = (szb_bit + au * ss - 1) / (au * ss); /* Number of allocation bitmap clusters */ + + /* Create a compressed up-case table */ + sect = b_data + au * tbl[0]; /* Table start sector */ + sum = 0; /* Table checksum to be stored in the 82 entry */ + st = si = i = j = szb_case = 0; + do { + switch (st) { + case 0: + ch = ff_wtoupper(si); /* Get an up-case char */ + if (ch != si) { + si++; break; /* Store the up-case char if exist */ + } + for (j = 1; (WCHAR)(si + j) && (WCHAR)(si + j) == ff_wtoupper((WCHAR)(si + j)); j++) ; /* Get run length of no-case block */ + if (j >= 128) { + ch = 0xFFFF; st = 2; break; /* Compress the no-case block if run is >= 128 */ + } + st = 1; /* Do not compress short run */ + /* go to next case */ + case 1: + ch = si++; /* Fill the short run */ + if (--j == 0) st = 0; + break; + + default: + ch = (WCHAR)j; si += j; /* Number of chars to skip */ + st = 0; + } + sum = xsum32(buf[i + 0] = (BYTE)ch, sum); /* Put it into the write buffer */ + sum = xsum32(buf[i + 1] = (BYTE)(ch >> 8), sum); + i += 2; szb_case += 2; + if (!si || i == szb_buf) { /* Write buffered data when buffer full or end of process */ + n = (i + ss - 1) / ss; + if (disk_write(pdrv, buf, sect, n) != RES_OK) return FR_DISK_ERR; + sect += n; i = 0; + } + } while (si); + tbl[1] = (szb_case + au * ss - 1) / (au * ss); /* Number of up-case table clusters */ + tbl[2] = 1; /* Number of root dir clusters */ + + /* Initialize the allocation bitmap */ + sect = b_data; nsect = (szb_bit + ss - 1) / ss; /* Start of bitmap and number of sectors */ + nb = tbl[0] + tbl[1] + tbl[2]; /* Number of clusters in-use by system */ + do { + mem_set(buf, 0, szb_buf); + for (i = 0; nb >= 8 && i < szb_buf; buf[i++] = 0xFF, nb -= 8) ; + for (b = 1; nb && i < szb_buf; buf[i] |= b, b <<= 1, nb--) ; + n = (nsect > sz_buf) ? sz_buf : nsect; /* Write the buffered data */ + if (disk_write(pdrv, buf, sect, n) != RES_OK) return FR_DISK_ERR; + sect += n; nsect -= n; + } while (nsect); + + /* Initialize the FAT */ + sect = b_fat; nsect = sz_fat; /* Start of FAT and number of FAT sectors */ + j = nb = cl = 0; + do { + mem_set(buf, 0, szb_buf); i = 0; /* Clear work area and reset write index */ + if (cl == 0) { /* Set entry 0 and 1 */ + st_dword(buf + i, 0xFFFFFFF8); i += 4; cl++; + st_dword(buf + i, 0xFFFFFFFF); i += 4; cl++; + } + do { /* Create chains of bitmap, up-case and root dir */ + while (nb && i < szb_buf) { /* Create a chain */ + st_dword(buf + i, (nb > 1) ? cl + 1 : 0xFFFFFFFF); + i += 4; cl++; nb--; + } + if (!nb && j < 3) nb = tbl[j++]; /* Next chain */ + } while (nb && i < szb_buf); + n = (nsect > sz_buf) ? sz_buf : nsect; /* Write the buffered data */ + if (disk_write(pdrv, buf, sect, n) != RES_OK) return FR_DISK_ERR; + sect += n; nsect -= n; + } while (nsect); + + /* Initialize the root directory */ + mem_set(buf, 0, szb_buf); + buf[SZDIRE * 0 + 0] = 0x83; /* 83 entry (volume label) */ + buf[SZDIRE * 1 + 0] = 0x81; /* 81 entry (allocation bitmap) */ + st_dword(buf + SZDIRE * 1 + 20, 2); + st_dword(buf + SZDIRE * 1 + 24, szb_bit); + buf[SZDIRE * 2 + 0] = 0x82; /* 82 entry (up-case table) */ + st_dword(buf + SZDIRE * 2 + 4, sum); + st_dword(buf + SZDIRE * 2 + 20, 2 + tbl[0]); + st_dword(buf + SZDIRE * 2 + 24, szb_case); + sect = b_data + au * (tbl[0] + tbl[1]); nsect = au; /* Start of the root directory and number of sectors */ + do { /* Fill root directory sectors */ + n = (nsect > sz_buf) ? sz_buf : nsect; + if (disk_write(pdrv, buf, sect, n) != RES_OK) return FR_DISK_ERR; + mem_set(buf, 0, ss); + sect += n; nsect -= n; + } while (nsect); + + /* Create two set of the exFAT VBR blocks */ + sect = b_vol; + for (n = 0; n < 2; n++) { + /* Main record (+0) */ + mem_set(buf, 0, ss); + mem_cpy(buf + BS_JmpBoot, "\xEB\x76\x90" "EXFAT ", 11); /* Boot jump code (x86), OEM name */ + st_dword(buf + BPB_VolOfsEx, b_vol); /* Volume offset in the physical drive [sector] */ + st_dword(buf + BPB_TotSecEx, sz_vol); /* Volume size [sector] */ + st_dword(buf + BPB_FatOfsEx, b_fat - b_vol); /* FAT offset [sector] */ + st_dword(buf + BPB_FatSzEx, sz_fat); /* FAT size [sector] */ + st_dword(buf + BPB_DataOfsEx, b_data - b_vol); /* Data offset [sector] */ + st_dword(buf + BPB_NumClusEx, n_clst); /* Number of clusters */ + st_dword(buf + BPB_RootClusEx, 2 + tbl[0] + tbl[1]); /* Root dir cluster # */ + st_dword(buf + BPB_VolIDEx, GET_FATTIME()); /* VSN */ + st_word(buf + BPB_FSVerEx, 0x100); /* File system version (1.00) */ + for (buf[BPB_BytsPerSecEx] = 0, i = ss; i >>= 1; buf[BPB_BytsPerSecEx]++) ; /* Log2 of sector size [byte] */ + for (buf[BPB_SecPerClusEx] = 0, i = au; i >>= 1; buf[BPB_SecPerClusEx]++) ; /* Log2 of cluster size [sector] */ + buf[BPB_NumFATsEx] = 1; /* Number of FATs */ + buf[BPB_DrvNumEx] = 0x80; /* Drive number (for int13) */ + st_word(buf + BS_BootCodeEx, 0xFEEB); /* Boot code (x86) */ + st_word(buf + BS_55AA, 0xAA55); /* Signature (placed here regardless of sector size) */ + for (i = sum = 0; i < ss; i++) { /* VBR checksum */ + if (i != BPB_VolFlagEx && i != BPB_VolFlagEx + 1 && i != BPB_PercInUseEx) sum = xsum32(buf[i], sum); + } + if (disk_write(pdrv, buf, sect++, 1) != RES_OK) return FR_DISK_ERR; + /* Extended bootstrap record (+1..+8) */ + mem_set(buf, 0, ss); + st_word(buf + ss - 2, 0xAA55); /* Signature (placed at end of sector) */ + for (j = 1; j < 9; j++) { + for (i = 0; i < ss; sum = xsum32(buf[i++], sum)) ; /* VBR checksum */ + if (disk_write(pdrv, buf, sect++, 1) != RES_OK) return FR_DISK_ERR; + } + /* OEM/Reserved record (+9..+10) */ + mem_set(buf, 0, ss); + for ( ; j < 11; j++) { + for (i = 0; i < ss; sum = xsum32(buf[i++], sum)) ; /* VBR checksum */ + if (disk_write(pdrv, buf, sect++, 1) != RES_OK) return FR_DISK_ERR; + } + /* Sum record (+11) */ + for (i = 0; i < ss; i += 4) st_dword(buf + i, sum); /* Fill with checksum value */ + if (disk_write(pdrv, buf, sect++, 1) != RES_OK) return FR_DISK_ERR; + } + + } else +#endif /* _FS_EXFAT */ + { /* Create an FAT12/16/32 volume */ + do { + pau = au; + /* Pre-determine number of clusters and FAT sub-type */ + if (fmt == FS_FAT32) { /* FAT32 volume */ + if (!pau) { /* au auto-selection */ + n = sz_vol / 0x20000; /* Volume size in unit of 128KS */ + for (i = 0, pau = 1; cst32[i] && cst32[i] <= n; i++, pau <<= 1) ; /* Get from table */ + } + n_clst = sz_vol / pau; /* Number of clusters */ + sz_fat = (n_clst * 4 + 8 + ss - 1) / ss; /* FAT size [sector] */ + sz_rsv = 32; /* Number of reserved sectors */ + sz_dir = 0; /* No static directory */ + if (n_clst <= MAX_FAT16 || n_clst > MAX_FAT32) return FR_MKFS_ABORTED; + } else { /* FAT12/16 volume */ + if (!pau) { /* au auto-selection */ + n = sz_vol / 0x1000; /* Volume size in unit of 4KS */ + for (i = 0, pau = 1; cst[i] && cst[i] <= n; i++, pau <<= 1) ; /* Get from table */ + } + n_clst = sz_vol / pau; + if (n_clst > MAX_FAT12) { + n = n_clst * 2 + 4; /* FAT size [byte] */ + } else { + fmt = FS_FAT12; + n = (n_clst * 3 + 1) / 2 + 3; /* FAT size [byte] */ + } + sz_fat = (n + ss - 1) / ss; /* FAT size [sector] */ + sz_rsv = 1; /* Number of reserved sectors */ + sz_dir = (DWORD)n_rootdir * SZDIRE / ss; /* Rootdir size [sector] */ + } + b_fat = b_vol + sz_rsv; /* FAT base */ + b_data = b_fat + sz_fat * n_fats + sz_dir; /* Data base */ + + /* Align data base to erase block boundary (for flash memory media) */ + n = ((b_data + sz_blk - 1) & ~(sz_blk - 1)) - b_data; /* Next nearest erase block from current data base */ + if (fmt == FS_FAT32) { /* FAT32: Move FAT base */ + sz_rsv += n; b_fat += n; + } else { /* FAT12/16: Expand FAT size */ + sz_fat += n / n_fats; + } + + /* Determine number of clusters and final check of validity of the FAT sub-type */ + if (sz_vol < b_data + pau * 16 - b_vol) return FR_MKFS_ABORTED; /* Too small volume */ + n_clst = (sz_vol - sz_rsv - sz_fat * n_fats - sz_dir) / pau; + if (fmt == FS_FAT32) { + if (n_clst <= MAX_FAT16) { /* Too few clusters for FAT32 */ + if (!au && (au = pau / 2) != 0) continue; /* Adjust cluster size and retry */ + return FR_MKFS_ABORTED; + } + } + if (fmt == FS_FAT16) { + if (n_clst > MAX_FAT16) { /* Too many clusters for FAT16 */ + if (!au && (pau * 2) <= 64) { + au = pau * 2; continue; /* Adjust cluster size and retry */ + } + if ((opt & FM_FAT32)) { + fmt = FS_FAT32; continue; /* Switch type to FAT32 and retry */ + } + if (!au && (au = pau * 2) <= 128) continue; /* Adjust cluster size and retry */ + return FR_MKFS_ABORTED; + } + if (n_clst <= MAX_FAT12) { /* Too few clusters for FAT16 */ + if (!au && (au = pau * 2) <= 128) continue; /* Adjust cluster size and retry */ + return FR_MKFS_ABORTED; + } + } + if (fmt == FS_FAT12 && n_clst > MAX_FAT12) return FR_MKFS_ABORTED; /* Too many clusters for FAT12 */ + + /* Ok, it is the valid cluster configuration */ + break; + } while (1); + +#if _USE_TRIM + tbl[0] = b_vol; tbl[1] = b_vol + sz_vol - 1; /* Inform the device the volume area can be erased */ + disk_ioctl(pdrv, CTRL_TRIM, tbl); +#endif + /* Create FAT VBR */ + mem_set(buf, 0, ss); + mem_cpy(buf + BS_JmpBoot, "\xEB\xFE\x90" "MSDOS5.0", 11);/* Boot jump code (x86), OEM name */ + st_word(buf + BPB_BytsPerSec, ss); /* Sector size [byte] */ + buf[BPB_SecPerClus] = (BYTE)pau; /* Cluster size [sector] */ + st_word(buf + BPB_RsvdSecCnt, (WORD)sz_rsv); /* Size of reserved area */ + buf[BPB_NumFATs] = (BYTE)n_fats; /* Number of FATs */ + st_word(buf + BPB_RootEntCnt, (WORD)((fmt == FS_FAT32) ? 0 : n_rootdir)); /* Number of root directory entries */ + if (sz_vol < 0x10000) { + st_word(buf + BPB_TotSec16, (WORD)sz_vol); /* Volume size in 16-bit LBA */ + } else { + st_dword(buf + BPB_TotSec32, sz_vol); /* Volume size in 32-bit LBA */ + } + buf[BPB_Media] = 0xF8; /* Media descriptor byte */ + st_word(buf + BPB_SecPerTrk, 63); /* Number of sectors per track (for int13) */ + st_word(buf + BPB_NumHeads, 255); /* Number of heads (for int13) */ + st_dword(buf + BPB_HiddSec, b_vol); /* Volume offset in the physical drive [sector] */ + if (fmt == FS_FAT32) { + st_dword(buf + BS_VolID32, GET_FATTIME()); /* VSN */ + st_dword(buf + BPB_FATSz32, sz_fat); /* FAT size [sector] */ + st_dword(buf + BPB_RootClus32, 2); /* Root directory cluster # (2) */ + st_word(buf + BPB_FSInfo32, 1); /* Offset of FSINFO sector (VBR + 1) */ + st_word(buf + BPB_BkBootSec32, 6); /* Offset of backup VBR (VBR + 6) */ + buf[BS_DrvNum32] = 0x80; /* Drive number (for int13) */ + buf[BS_BootSig32] = 0x29; /* Extended boot signature */ + mem_cpy(buf + BS_VolLab32, "NO NAME " "FAT32 ", 19); /* Volume label, FAT signature */ + } else { + st_dword(buf + BS_VolID, GET_FATTIME()); /* VSN */ + st_word(buf + BPB_FATSz16, (WORD)sz_fat); /* FAT size [sector] */ + buf[BS_DrvNum] = 0x80; /* Drive number (for int13) */ + buf[BS_BootSig] = 0x29; /* Extended boot signature */ + mem_cpy(buf + BS_VolLab, "NO NAME " "FAT ", 19); /* Volume label, FAT signature */ + } + st_word(buf + BS_55AA, 0xAA55); /* Signature (offset is fixed here regardless of sector size) */ + if (disk_write(pdrv, buf, b_vol, 1) != RES_OK) return FR_DISK_ERR; /* Write it to the VBR sector */ + + /* Create FSINFO record if needed */ + if (fmt == FS_FAT32) { + disk_write(pdrv, buf, b_vol + 6, 1); /* Write backup VBR (VBR + 6) */ + mem_set(buf, 0, ss); + st_dword(buf + FSI_LeadSig, 0x41615252); + st_dword(buf + FSI_StrucSig, 0x61417272); + st_dword(buf + FSI_Free_Count, n_clst - 1); /* Number of free clusters */ + st_dword(buf + FSI_Nxt_Free, 2); /* Last allocated cluster# */ + st_word(buf + BS_55AA, 0xAA55); + disk_write(pdrv, buf, b_vol + 7, 1); /* Write backup FSINFO (VBR + 7) */ + disk_write(pdrv, buf, b_vol + 1, 1); /* Write original FSINFO (VBR + 1) */ + } + + /* Initialize FAT area */ + mem_set(buf, 0, (UINT)szb_buf); + sect = b_fat; /* FAT start sector */ + for (i = 0; i < n_fats; i++) { /* Initialize FATs each */ + if (fmt == FS_FAT32) { + st_dword(buf + 0, 0xFFFFFFF8); /* Entry 0 */ + st_dword(buf + 4, 0xFFFFFFFF); /* Entry 1 */ + st_dword(buf + 8, 0x0FFFFFFF); /* Entry 2 (root directory) */ + } else { + st_dword(buf + 0, (fmt == FS_FAT12) ? 0xFFFFF8 : 0xFFFFFFF8); /* Entry 0 and 1 */ + } + nsect = sz_fat; /* Number of FAT sectors */ + do { /* Fill FAT sectors */ + n = (nsect > sz_buf) ? sz_buf : nsect; + if (disk_write(pdrv, buf, sect, (UINT)n) != RES_OK) return FR_DISK_ERR; + mem_set(buf, 0, ss); + sect += n; nsect -= n; + } while (nsect); + } + + /* Initialize root directory (fill with zero) */ + nsect = (fmt == FS_FAT32) ? pau : sz_dir; /* Number of root directory sectors */ + do { + n = (nsect > sz_buf) ? sz_buf : nsect; + if (disk_write(pdrv, buf, sect, (UINT)n) != RES_OK) return FR_DISK_ERR; + sect += n; nsect -= n; + } while (nsect); + } + + /* Determine system ID in the partition table */ + if (_FS_EXFAT && fmt == FS_EXFAT) { + sys = 0x07; /* HPFS/NTFS/exFAT */ + } else { + if (fmt == FS_FAT32) { + sys = 0x0C; /* FAT32X */ + } else { + if (sz_vol >= 0x10000) { + sys = 0x06; /* FAT12/16 (>=64KS) */ + } else { + sys = (fmt == FS_FAT16) ? 0x04 : 0x01; /* FAT16 (<64KS) : FAT12 (<64KS) */ + } + } + } + + /* Update partition information */ + if (_MULTI_PARTITION && part != 0) { /* Created in the existing partition */ + /* Update system ID in the partition table */ + if (disk_read(pdrv, buf, 0, 1) != RES_OK) return FR_DISK_ERR; /* Read the MBR */ + buf[MBR_Table + (part - 1) * SZ_PTE + PTE_System] = sys; /* Set system ID */ + if (disk_write(pdrv, buf, 0, 1) != RES_OK) return FR_DISK_ERR; /* Write it back to the MBR */ + } else { /* Created as a new single partition */ + if (!(opt & FM_SFD)) { /* Create partition table if in FDISK format */ + mem_set(buf, 0, ss); + st_word(buf + BS_55AA, 0xAA55); /* MBR signature */ + pte = buf + MBR_Table; /* Create partition table for single partition in the drive */ + pte[PTE_Boot] = 0; /* Boot indicator */ + pte[PTE_StHead] = 1; /* Start head */ + pte[PTE_StSec] = 1; /* Start sector */ + pte[PTE_StCyl] = 0; /* Start cylinder */ + pte[PTE_System] = sys; /* System type */ + n = (b_vol + sz_vol) / (63 * 255); /* (End CHS may be invalid) */ + pte[PTE_EdHead] = 254; /* End head */ + pte[PTE_EdSec] = (BYTE)(n >> 2 | 63); /* End sector */ + pte[PTE_EdCyl] = (BYTE)n; /* End cylinder */ + st_dword(pte + PTE_StLba, b_vol); /* Start offset in LBA */ + st_dword(pte + PTE_SizLba, sz_vol); /* Size in sectors */ + if (disk_write(pdrv, buf, 0, 1) != RES_OK) return FR_DISK_ERR; /* Write it to the MBR */ + } + } + + if (disk_ioctl(pdrv, CTRL_SYNC, 0) != RES_OK) return FR_DISK_ERR; + + return FR_OK; +} + + + +#if _MULTI_PARTITION +/*-----------------------------------------------------------------------*/ +/* Create partition table on the physical drive */ +/*-----------------------------------------------------------------------*/ + +FRESULT f_fdisk ( + BYTE pdrv, /* Physical drive number */ + const DWORD* szt, /* Pointer to the size table for each partitions */ + void* work /* Pointer to the working buffer */ +) +{ + UINT i, n, sz_cyl, tot_cyl, b_cyl, e_cyl, p_cyl; + BYTE s_hd, e_hd, *p, *buf = (BYTE*)work; + DSTATUS stat; + DWORD sz_disk, sz_part, s_part; + + + stat = disk_initialize(pdrv); + if (stat & STA_NOINIT) return FR_NOT_READY; + if (stat & STA_PROTECT) return FR_WRITE_PROTECTED; + if (disk_ioctl(pdrv, GET_SECTOR_COUNT, &sz_disk)) return FR_DISK_ERR; + + /* Determine the CHS without any consideration of the drive geometry */ + for (n = 16; n < 256 && sz_disk / n / 63 > 1024; n *= 2) ; + if (n == 256) n--; + e_hd = n - 1; + sz_cyl = 63 * n; + tot_cyl = sz_disk / sz_cyl; + + /* Create partition table */ + mem_set(buf, 0, _MAX_SS); + p = buf + MBR_Table; b_cyl = 0; + for (i = 0; i < 4; i++, p += SZ_PTE) { + p_cyl = (szt[i] <= 100U) ? (DWORD)tot_cyl * szt[i] / 100 : szt[i] / sz_cyl; /* Number of cylinders */ + if (!p_cyl) continue; + s_part = (DWORD)sz_cyl * b_cyl; + sz_part = (DWORD)sz_cyl * p_cyl; + if (i == 0) { /* Exclude first track of cylinder 0 */ + s_hd = 1; + s_part += 63; sz_part -= 63; + } else { + s_hd = 0; + } + e_cyl = b_cyl + p_cyl - 1; /* End cylinder */ + if (e_cyl >= tot_cyl) return FR_INVALID_PARAMETER; + + /* Set partition table */ + p[1] = s_hd; /* Start head */ + p[2] = (BYTE)((b_cyl >> 2) + 1); /* Start sector */ + p[3] = (BYTE)b_cyl; /* Start cylinder */ + p[4] = 0x07; /* System type (temporary setting) */ + p[5] = e_hd; /* End head */ + p[6] = (BYTE)((e_cyl >> 2) + 63); /* End sector */ + p[7] = (BYTE)e_cyl; /* End cylinder */ + st_dword(p + 8, s_part); /* Start sector in LBA */ + st_dword(p + 12, sz_part); /* Number of sectors */ + + /* Next partition */ + b_cyl += p_cyl; + } + st_word(p, 0xAA55); + + /* Write it to the MBR */ + return (disk_write(pdrv, buf, 0, 1) != RES_OK || disk_ioctl(pdrv, CTRL_SYNC, 0) != RES_OK) ? FR_DISK_ERR : FR_OK; +} + +#endif /* _MULTI_PARTITION */ +#endif /* _USE_MKFS && !_FS_READONLY */ + + + + +#if _USE_STRFUNC +/*-----------------------------------------------------------------------*/ +/* Get a string from the file */ +/*-----------------------------------------------------------------------*/ + +TCHAR* f_gets ( + TCHAR* buff, /* Pointer to the string buffer to read */ + int len, /* Size of string buffer (characters) */ + FIL* fp /* Pointer to the file object */ +) +{ + int n = 0; + TCHAR c, *p = buff; + BYTE s[2]; + UINT rc; + + + while (n < len - 1) { /* Read characters until buffer gets filled */ +#if _LFN_UNICODE +#if _STRF_ENCODE == 3 /* Read a character in UTF-8 */ + f_read(fp, s, 1, &rc); + if (rc != 1) break; + c = s[0]; + if (c >= 0x80) { + if (c < 0xC0) continue; /* Skip stray trailer */ + if (c < 0xE0) { /* Two-byte sequence (0x80-0x7FF) */ + f_read(fp, s, 1, &rc); + if (rc != 1) break; + c = (c & 0x1F) << 6 | (s[0] & 0x3F); + if (c < 0x80) c = '?'; /* Reject invalid code range */ + } else { + if (c < 0xF0) { /* Three-byte sequence (0x800-0xFFFF) */ + f_read(fp, s, 2, &rc); + if (rc != 2) break; + c = c << 12 | (s[0] & 0x3F) << 6 | (s[1] & 0x3F); + if (c < 0x800) c = '?'; /* Reject invalid code range */ + } else { /* Reject four-byte sequence */ + c = '?'; + } + } + } +#elif _STRF_ENCODE == 2 /* Read a character in UTF-16BE */ + f_read(fp, s, 2, &rc); + if (rc != 2) break; + c = s[1] + (s[0] << 8); +#elif _STRF_ENCODE == 1 /* Read a character in UTF-16LE */ + f_read(fp, s, 2, &rc); + if (rc != 2) break; + c = s[0] + (s[1] << 8); +#else /* Read a character in ANSI/OEM */ + f_read(fp, s, 1, &rc); + if (rc != 1) break; + c = s[0]; + if (IsDBCS1(c)) { + f_read(fp, s, 1, &rc); + if (rc != 1) break; + c = (c << 8) + s[0]; + } + c = ff_convert(c, 1); /* OEM -> Unicode */ + if (!c) c = '?'; +#endif +#else /* Read a character without conversion */ + f_read(fp, s, 1, &rc); + if (rc != 1) break; + c = s[0]; +#endif + if (_USE_STRFUNC == 2 && c == '\r') continue; /* Strip '\r' */ + *p++ = c; + n++; + if (c == '\n') break; /* Break on EOL */ + } + *p = 0; + return n ? buff : 0; /* When no data read (eof or error), return with error. */ +} + + + + +#if !_FS_READONLY +#include +/*-----------------------------------------------------------------------*/ +/* Put a character to the file */ +/*-----------------------------------------------------------------------*/ + +typedef struct { + FIL *fp; /* Ptr to the writing file */ + int idx, nchr; /* Write index of buf[] (-1:error), number of chars written */ + BYTE buf[64]; /* Write buffer */ +} putbuff; + + +static +void putc_bfd ( /* Buffered write with code conversion */ + putbuff* pb, + TCHAR c +) +{ + UINT bw; + int i; + + + if (_USE_STRFUNC == 2 && c == '\n') { /* LF -> CRLF conversion */ + putc_bfd(pb, '\r'); + } + + i = pb->idx; /* Write index of pb->buf[] */ + if (i < 0) return; + +#if _LFN_UNICODE +#if _STRF_ENCODE == 3 /* Write a character in UTF-8 */ + if (c < 0x80) { /* 7-bit */ + pb->buf[i++] = (BYTE)c; + } else { + if (c < 0x800) { /* 11-bit */ + pb->buf[i++] = (BYTE)(0xC0 | c >> 6); + } else { /* 16-bit */ + pb->buf[i++] = (BYTE)(0xE0 | c >> 12); + pb->buf[i++] = (BYTE)(0x80 | (c >> 6 & 0x3F)); + } + pb->buf[i++] = (BYTE)(0x80 | (c & 0x3F)); + } +#elif _STRF_ENCODE == 2 /* Write a character in UTF-16BE */ + pb->buf[i++] = (BYTE)(c >> 8); + pb->buf[i++] = (BYTE)c; +#elif _STRF_ENCODE == 1 /* Write a character in UTF-16LE */ + pb->buf[i++] = (BYTE)c; + pb->buf[i++] = (BYTE)(c >> 8); +#else /* Write a character in ANSI/OEM */ + c = ff_convert(c, 0); /* Unicode -> OEM */ + if (!c) c = '?'; + if (c >= 0x100) + pb->buf[i++] = (BYTE)(c >> 8); + pb->buf[i++] = (BYTE)c; +#endif +#else /* Write a character without conversion */ + pb->buf[i++] = (BYTE)c; +#endif + + if (i >= (int)(sizeof pb->buf) - 3) { /* Write buffered characters to the file */ + f_write(pb->fp, pb->buf, (UINT)i, &bw); + i = (bw == (UINT)i) ? 0 : -1; + } + pb->idx = i; + pb->nchr++; +} + + +static +int putc_flush ( /* Flush left characters in the buffer */ + putbuff* pb +) +{ + UINT nw; + + if ( pb->idx >= 0 /* Flush buffered characters to the file */ + && f_write(pb->fp, pb->buf, (UINT)pb->idx, &nw) == FR_OK + && (UINT)pb->idx == nw) return pb->nchr; + return EOF; +} + + +static +void putc_init ( /* Initialize write buffer */ + putbuff* pb, + FIL* fp +) +{ + pb->fp = fp; + pb->nchr = pb->idx = 0; +} + + + +int f_putc ( + TCHAR c, /* A character to be output */ + FIL* fp /* Pointer to the file object */ +) +{ + putbuff pb; + + + putc_init(&pb, fp); + putc_bfd(&pb, c); /* Put the character */ + return putc_flush(&pb); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Put a string to the file */ +/*-----------------------------------------------------------------------*/ + +int f_puts ( + const TCHAR* str, /* Pointer to the string to be output */ + FIL* fp /* Pointer to the file object */ +) +{ + putbuff pb; + + + putc_init(&pb, fp); + while (*str) putc_bfd(&pb, *str++); /* Put the string */ + return putc_flush(&pb); +} + + + + +/*-----------------------------------------------------------------------*/ +/* Put a formatted string to the file */ +/*-----------------------------------------------------------------------*/ + +int f_printf ( + FIL* fp, /* Pointer to the file object */ + const TCHAR* fmt, /* Pointer to the format string */ + ... /* Optional arguments... */ +) +{ + va_list arp; + putbuff pb; + BYTE f, r; + UINT i, j, w; + DWORD v; + TCHAR c, d, str[32], *p; + + + putc_init(&pb, fp); + + va_start(arp, fmt); + + for (;;) { + c = *fmt++; + if (c == 0) break; /* End of string */ + if (c != '%') { /* Non escape character */ + putc_bfd(&pb, c); + continue; + } + w = f = 0; + c = *fmt++; + if (c == '0') { /* Flag: '0' padding */ + f = 1; c = *fmt++; + } else { + if (c == '-') { /* Flag: left justified */ + f = 2; c = *fmt++; + } + } + while (IsDigit(c)) { /* Precision */ + w = w * 10 + c - '0'; + c = *fmt++; + } + if (c == 'l' || c == 'L') { /* Prefix: Size is long int */ + f |= 4; c = *fmt++; + } + if (!c) break; + d = c; + if (IsLower(d)) d -= 0x20; + switch (d) { /* Type is... */ + case 'S' : /* String */ + p = va_arg(arp, TCHAR*); + for (j = 0; p[j]; j++) ; + if (!(f & 2)) { + while (j++ < w) putc_bfd(&pb, ' '); + } + while (*p) putc_bfd(&pb, *p++); + while (j++ < w) putc_bfd(&pb, ' '); + continue; + + case 'C' : /* Character */ + putc_bfd(&pb, (TCHAR)va_arg(arp, int)); continue; + + case 'B' : /* Binary */ + r = 2; break; + + case 'O' : /* Octal */ + r = 8; break; + + case 'D' : /* Signed decimal */ + case 'U' : /* Unsigned decimal */ + r = 10; break; + + case 'X' : /* Hexdecimal */ + r = 16; break; + + default: /* Unknown type (pass-through) */ + putc_bfd(&pb, c); continue; + } + + /* Get an argument and put it in numeral */ + v = (f & 4) ? (DWORD)va_arg(arp, long) : ((d == 'D') ? (DWORD)(long)va_arg(arp, int) : (DWORD)va_arg(arp, unsigned int)); + if (d == 'D' && (v & 0x80000000)) { + v = 0 - v; + f |= 8; + } + i = 0; + do { + d = (TCHAR)(v % r); v /= r; + if (d > 9) d += (c == 'x') ? 0x27 : 0x07; + str[i++] = d + '0'; + } while (v && i < sizeof str / sizeof str[0]); + if (f & 8) str[i++] = '-'; + j = i; d = (f & 1) ? '0' : ' '; + while (!(f & 2) && j++ < w) putc_bfd(&pb, d); + do { + putc_bfd(&pb, str[--i]); + } while (i); + while (j++ < w) putc_bfd(&pb, d); + } + + va_end(arp); + + return putc_flush(&pb); +} + +#endif /* !_FS_READONLY */ +#endif /* _USE_STRFUNC */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.h new file mode 100644 index 000000000..b14c3ce18 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff.h @@ -0,0 +1,361 @@ +/*----------------------------------------------------------------------------/ +/ FatFs - Generic FAT file system module R0.12c / +/-----------------------------------------------------------------------------/ +/ +/ Copyright (C) 2017, ChaN, all right reserved. +/ +/ FatFs module is an open source software. Redistribution and use of FatFs in +/ source and binary forms, with or without modification, are permitted provided +/ that the following condition is met: + +/ 1. Redistributions of source code must retain the above copyright notice, +/ this condition and the following disclaimer. +/ +/ This software is provided by the copyright holder and contributors "AS IS" +/ and any warranties related to this software are DISCLAIMED. +/ The copyright owner or contributors be NOT LIABLE for any damages caused +/ by use of this software. +/----------------------------------------------------------------------------*/ + + +#ifndef _FATFS +#define _FATFS 68300 /* Revision ID */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "integer.h" /* Basic integer types */ +#include "ffconf.h" /* FatFs configuration options */ + +#if _FATFS != _FFCONF +#error Wrong configuration file (ffconf.h). +#endif + + + +/* Definitions of volume management */ + +#if _MULTI_PARTITION /* Multiple partition configuration */ +typedef struct { + BYTE pd; /* Physical drive number */ + BYTE pt; /* Partition: 0:Auto detect, 1-4:Forced partition) */ +} PARTITION; +extern PARTITION VolToPart[]; /* Volume - Partition resolution table */ +#endif + + + +/* Type of path name strings on FatFs API */ + +#if _LFN_UNICODE /* Unicode (UTF-16) string */ +#if _USE_LFN == 0 +#error _LFN_UNICODE must be 0 at non-LFN cfg. +#endif +#ifndef _INC_TCHAR +typedef WCHAR TCHAR; +#define _T(x) L ## x +#define _TEXT(x) L ## x +#endif +#else /* ANSI/OEM string */ +#ifndef _INC_TCHAR +typedef char TCHAR; +#define _T(x) x +#define _TEXT(x) x +#endif +#endif + + + +/* Type of file size variables */ + +#if _FS_EXFAT +#if _USE_LFN == 0 +#error LFN must be enabled when enable exFAT +#endif +typedef QWORD FSIZE_t; +#else +typedef DWORD FSIZE_t; +#endif + + + +/* File system object structure (FATFS) */ + +typedef struct { + BYTE fs_type; /* File system type (0:N/A) */ + BYTE drv; /* Physical drive number */ + BYTE n_fats; /* Number of FATs (1 or 2) */ + BYTE wflag; /* win[] flag (b0:dirty) */ + BYTE fsi_flag; /* FSINFO flags (b7:disabled, b0:dirty) */ + WORD id; /* File system mount ID */ + WORD n_rootdir; /* Number of root directory entries (FAT12/16) */ + WORD csize; /* Cluster size [sectors] */ +#if _MAX_SS != _MIN_SS + WORD ssize; /* Sector size (512, 1024, 2048 or 4096) */ +#endif +#if _USE_LFN != 0 + WCHAR* lfnbuf; /* LFN working buffer */ +#endif +#if _FS_EXFAT + BYTE* dirbuf; /* Directory entry block scratchpad buffer */ +#endif +#if _FS_REENTRANT + _SYNC_t sobj; /* Identifier of sync object */ +#endif +#if !_FS_READONLY + DWORD last_clst; /* Last allocated cluster */ + DWORD free_clst; /* Number of free clusters */ +#endif +#if _FS_RPATH != 0 + DWORD cdir; /* Current directory start cluster (0:root) */ +#if _FS_EXFAT + DWORD cdc_scl; /* Containing directory start cluster (invalid when cdir is 0) */ + DWORD cdc_size; /* b31-b8:Size of containing directory, b7-b0: Chain status */ + DWORD cdc_ofs; /* Offset in the containing directory (invalid when cdir is 0) */ +#endif +#endif + DWORD n_fatent; /* Number of FAT entries (number of clusters + 2) */ + DWORD fsize; /* Size of an FAT [sectors] */ + DWORD volbase; /* Volume base sector */ + DWORD fatbase; /* FAT base sector */ + DWORD dirbase; /* Root directory base sector/cluster */ + DWORD database; /* Data base sector */ + DWORD winsect; /* Current sector appearing in the win[] */ + BYTE win[_MAX_SS]; /* Disk access window for Directory, FAT (and file data at tiny cfg) */ +} FATFS; + + + +/* Object ID and allocation information (_FDID) */ + +typedef struct { + FATFS* fs; /* Pointer to the owner file system object */ + WORD id; /* Owner file system mount ID */ + BYTE attr; /* Object attribute */ + BYTE stat; /* Object chain status (b1-0: =0:not contiguous, =2:contiguous (no data on FAT), =3:flagmented in this session, b2:sub-directory stretched) */ + DWORD sclust; /* Object start cluster (0:no cluster or root directory) */ + FSIZE_t objsize; /* Object size (valid when sclust != 0) */ +#if _FS_EXFAT + DWORD n_cont; /* Size of first fragment, clusters - 1 (valid when stat == 3) */ + DWORD n_frag; /* Size of last fragment needs to be written (valid when not zero) */ + DWORD c_scl; /* Containing directory start cluster (valid when sclust != 0) */ + DWORD c_size; /* b31-b8:Size of containing directory, b7-b0: Chain status (valid when c_scl != 0) */ + DWORD c_ofs; /* Offset in the containing directory (valid when sclust != 0 and non-directory object) */ +#endif +#if _FS_LOCK != 0 + UINT lockid; /* File lock ID origin from 1 (index of file semaphore table Files[]) */ +#endif +} _FDID; + + + +/* File object structure (FIL) */ + +typedef struct { + _FDID obj; /* Object identifier (must be the 1st member to detect invalid object pointer) */ + BYTE flag; /* File status flags */ + BYTE err; /* Abort flag (error code) */ + FSIZE_t fptr; /* File read/write pointer (Zeroed on file open) */ + DWORD clust; /* Current cluster of fpter (invalid when fptr is 0) */ + DWORD sect; /* Sector number appearing in buf[] (0:invalid) */ +#if !_FS_READONLY + DWORD dir_sect; /* Sector number containing the directory entry */ + BYTE* dir_ptr; /* Pointer to the directory entry in the win[] */ +#endif +#if _USE_FASTSEEK + DWORD* cltbl; /* Pointer to the cluster link map table (nulled on open, set by application) */ +#endif +#if !_FS_TINY + BYTE buf[_MAX_SS]; /* File private data read/write window */ +#endif +} FIL; + + + +/* Directory object structure (DIR) */ + +typedef struct { + _FDID obj; /* Object identifier */ + DWORD dptr; /* Current read/write offset */ + DWORD clust; /* Current cluster */ + DWORD sect; /* Current sector (0:Read operation has terminated) */ + BYTE* dir; /* Pointer to the directory item in the win[] */ + BYTE fn[12]; /* SFN (in/out) {body[8],ext[3],status[1]} */ +#if _USE_LFN != 0 + DWORD blk_ofs; /* Offset of current entry block being processed (0xFFFFFFFF:Invalid) */ +#endif +#if _USE_FIND + const TCHAR* pat; /* Pointer to the name matching pattern */ +#endif +} DIR; + + + +/* File information structure (FILINFO) */ + +typedef struct { + FSIZE_t fsize; /* File size */ + WORD fdate; /* Modified date */ + WORD ftime; /* Modified time */ + BYTE fattrib; /* File attribute */ +#if _USE_LFN != 0 + TCHAR altname[13]; /* Alternative file name */ + TCHAR fname[_MAX_LFN + 1]; /* Primary file name */ +#else + TCHAR fname[13]; /* File name */ +#endif +} FILINFO; + + + +/* File function return code (FRESULT) */ + +typedef enum { + FR_OK = 0, /* (0) Succeeded */ + FR_DISK_ERR, /* (1) A hard error occurred in the low level disk I/O layer */ + FR_INT_ERR, /* (2) Assertion failed */ + FR_NOT_READY, /* (3) The physical drive cannot work */ + FR_NO_FILE, /* (4) Could not find the file */ + FR_NO_PATH, /* (5) Could not find the path */ + FR_INVALID_NAME, /* (6) The path name format is invalid */ + FR_DENIED, /* (7) Access denied due to prohibited access or directory full */ + FR_EXIST, /* (8) Access denied due to prohibited access */ + FR_INVALID_OBJECT, /* (9) The file/directory object is invalid */ + FR_WRITE_PROTECTED, /* (10) The physical drive is write protected */ + FR_INVALID_DRIVE, /* (11) The logical drive number is invalid */ + FR_NOT_ENABLED, /* (12) The volume has no work area */ + FR_NO_FILESYSTEM, /* (13) There is no valid FAT volume */ + FR_MKFS_ABORTED, /* (14) The f_mkfs() aborted due to any problem */ + FR_TIMEOUT, /* (15) Could not get a grant to access the volume within defined period */ + FR_LOCKED, /* (16) The operation is rejected according to the file sharing policy */ + FR_NOT_ENOUGH_CORE, /* (17) LFN working buffer could not be allocated */ + FR_TOO_MANY_OPEN_FILES, /* (18) Number of open files > _FS_LOCK */ + FR_INVALID_PARAMETER /* (19) Given parameter is invalid */ +} FRESULT; + + + +/*--------------------------------------------------------------*/ +/* FatFs module application interface */ + +FRESULT f_open (FIL* fp, const TCHAR* path, BYTE mode); /* Open or create a file */ +FRESULT f_close (FIL* fp); /* Close an open file object */ +FRESULT f_read (FIL* fp, void* buff, UINT btr, UINT* br); /* Read data from the file */ +FRESULT f_write (FIL* fp, const void* buff, UINT btw, UINT* bw); /* Write data to the file */ +FRESULT f_lseek (FIL* fp, FSIZE_t ofs); /* Move file pointer of the file object */ +FRESULT f_truncate (FIL* fp); /* Truncate the file */ +FRESULT f_sync (FIL* fp); /* Flush cached data of the writing file */ +FRESULT f_opendir (DIR* dp, const TCHAR* path); /* Open a directory */ +FRESULT f_closedir (DIR* dp); /* Close an open directory */ +FRESULT f_readdir (DIR* dp, FILINFO* fno); /* Read a directory item */ +FRESULT f_findfirst (DIR* dp, FILINFO* fno, const TCHAR* path, const TCHAR* pattern); /* Find first file */ +FRESULT f_findnext (DIR* dp, FILINFO* fno); /* Find next file */ +FRESULT f_mkdir (const TCHAR* path); /* Create a sub directory */ +FRESULT f_unlink (const TCHAR* path); /* Delete an existing file or directory */ +FRESULT f_rename (const TCHAR* path_old, const TCHAR* path_new); /* Rename/Move a file or directory */ +FRESULT f_stat (const TCHAR* path, FILINFO* fno); /* Get file status */ +FRESULT f_chmod (const TCHAR* path, BYTE attr, BYTE mask); /* Change attribute of a file/dir */ +FRESULT f_utime (const TCHAR* path, const FILINFO* fno); /* Change timestamp of a file/dir */ +FRESULT f_chdir (const TCHAR* path); /* Change current directory */ +FRESULT f_chdrive (const TCHAR* path); /* Change current drive */ +FRESULT f_getcwd (TCHAR* buff, UINT len); /* Get current directory */ +FRESULT f_getfree (const TCHAR* path, DWORD* nclst, FATFS** fatfs); /* Get number of free clusters on the drive */ +FRESULT f_getlabel (const TCHAR* path, TCHAR* label, DWORD* vsn); /* Get volume label */ +FRESULT f_setlabel (const TCHAR* label); /* Set volume label */ +FRESULT f_forward (FIL* fp, UINT(*func)(const BYTE*,UINT), UINT btf, UINT* bf); /* Forward data to the stream */ +FRESULT f_expand (FIL* fp, FSIZE_t szf, BYTE opt); /* Allocate a contiguous block to the file */ +FRESULT f_mount (FATFS* fs, const TCHAR* path, BYTE opt); /* Mount/Unmount a logical drive */ +FRESULT f_mkfs (const TCHAR* path, BYTE opt, DWORD au, void* work, UINT len); /* Create a FAT volume */ +FRESULT f_fdisk (BYTE pdrv, const DWORD* szt, void* work); /* Divide a physical drive into some partitions */ +int f_putc (TCHAR c, FIL* fp); /* Put a character to the file */ +int f_puts (const TCHAR* str, FIL* cp); /* Put a string to the file */ +int f_printf (FIL* fp, const TCHAR* str, ...); /* Put a formatted string to the file */ +TCHAR* f_gets (TCHAR* buff, int len, FIL* fp); /* Get a string from the file */ + +#define f_eof(fp) ((int)((fp)->fptr == (fp)->obj.objsize)) +#define f_error(fp) ((fp)->err) +#define f_tell(fp) ((fp)->fptr) +#define f_size(fp) ((fp)->obj.objsize) +#define f_rewind(fp) f_lseek((fp), 0) +#define f_rewinddir(dp) f_readdir((dp), 0) +#define f_rmdir(path) f_unlink(path) + +#ifndef EOF +#define EOF (-1) +#endif + + + + +/*--------------------------------------------------------------*/ +/* Additional user defined functions */ + +/* RTC function */ +#if !_FS_READONLY && !_FS_NORTC +DWORD get_fattime (void); +#endif + +/* Unicode support functions */ +#if _USE_LFN != 0 /* Unicode - OEM code conversion */ +WCHAR ff_convert (WCHAR chr, UINT dir); /* OEM-Unicode bidirectional conversion */ +WCHAR ff_wtoupper (WCHAR chr); /* Unicode upper-case conversion */ +#if _USE_LFN == 3 /* Memory functions */ +void* ff_memalloc (UINT msize); /* Allocate memory block */ +void ff_memfree (void* mblock); /* Free memory block */ +#endif +#endif + +/* Sync functions */ +#if _FS_REENTRANT +int ff_cre_syncobj (BYTE vol, _SYNC_t* sobj); /* Create a sync object */ +int ff_req_grant (_SYNC_t sobj); /* Lock sync object */ +void ff_rel_grant (_SYNC_t sobj); /* Unlock sync object */ +int ff_del_syncobj (_SYNC_t sobj); /* Delete a sync object */ +#endif + + + + +/*--------------------------------------------------------------*/ +/* Flags and offset address */ + + +/* File access mode and open method flags (3rd argument of f_open) */ +#define FA_READ 0x01 +#define FA_WRITE 0x02 +#define FA_OPEN_EXISTING 0x00 +#define FA_CREATE_NEW 0x04 +#define FA_CREATE_ALWAYS 0x08 +#define FA_OPEN_ALWAYS 0x10 +#define FA_OPEN_APPEND 0x30 + +/* Fast seek controls (2nd argument of f_lseek) */ +#define CREATE_LINKMAP ((FSIZE_t)0 - 1) + +/* Format options (2nd argument of f_mkfs) */ +#define FM_FAT 0x01 +#define FM_FAT32 0x02 +#define FM_EXFAT 0x04 +#define FM_ANY 0x07 +#define FM_SFD 0x08 + +/* Filesystem type (FATFS.fs_type) */ +#define FS_FAT12 1 +#define FS_FAT16 2 +#define FS_FAT32 3 +#define FS_EXFAT 4 + +/* File attribute bits for directory entry (FILINFO.fattrib) */ +#define AM_RDO 0x01 /* Read only */ +#define AM_HID 0x02 /* Hidden */ +#define AM_SYS 0x04 /* System */ +#define AM_DIR 0x10 /* Directory */ +#define AM_ARC 0x20 /* Archive */ + + +#ifdef __cplusplus +} +#endif + +#endif /* _FATFS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.c new file mode 100644 index 000000000..ccd595bba --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.c @@ -0,0 +1,122 @@ +/** + ****************************************************************************** + * @file ff_gen_drv.c + * @author MCD Application Team + * @brief FatFs generic low level driver. + ***************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. All rights reserved. + * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** +**/ +/* Includes ------------------------------------------------------------------*/ +#include "ff_gen_drv.h" + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +Disk_drvTypeDef disk = {{0},{0},{0},0}; + +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** + * @brief Links a compatible diskio driver/lun id and increments the number of active + * linked drivers. + * @note The number of linked drivers (volumes) is up to 10 due to FatFs limits. + * @param drv: pointer to the disk IO Driver structure + * @param path: pointer to the logical drive path + * @param lun : only used for USB Key Disk to add multi-lun management + else the parameter must be equal to 0 + * @retval Returns 0 in case of success, otherwise 1. + */ +uint8_t FATFS_LinkDriverEx(const Diskio_drvTypeDef *drv, char *path, uint8_t lun) +{ + uint8_t ret = 1; + uint8_t DiskNum = 0; + + if(disk.nbr < _VOLUMES) + { + disk.is_initialized[disk.nbr] = 0; + disk.drv[disk.nbr] = drv; + disk.lun[disk.nbr] = lun; + DiskNum = disk.nbr++; + path[0] = DiskNum + '0'; + path[1] = ':'; + path[2] = '/'; + path[3] = 0; + ret = 0; + } + + return ret; +} + +/** + * @brief Links a compatible diskio driver and increments the number of active + * linked drivers. + * @note The number of linked drivers (volumes) is up to 10 due to FatFs limits + * @param drv: pointer to the disk IO Driver structure + * @param path: pointer to the logical drive path + * @retval Returns 0 in case of success, otherwise 1. + */ +uint8_t FATFS_LinkDriver(const Diskio_drvTypeDef *drv, char *path) +{ + return FATFS_LinkDriverEx(drv, path, 0); +} + +/** + * @brief Unlinks a diskio driver and decrements the number of active linked + * drivers. + * @param path: pointer to the logical drive path + * @param lun : not used + * @retval Returns 0 in case of success, otherwise 1. + */ +uint8_t FATFS_UnLinkDriverEx(char *path, uint8_t lun) +{ + uint8_t DiskNum = 0; + uint8_t ret = 1; + + if(disk.nbr >= 1) + { + DiskNum = path[0] - '0'; + if(disk.drv[DiskNum] != 0) + { + disk.drv[DiskNum] = 0; + disk.lun[DiskNum] = 0; + disk.nbr--; + ret = 0; + } + } + + return ret; +} + +/** + * @brief Unlinks a diskio driver and decrements the number of active linked + * drivers. + * @param path: pointer to the logical drive path + * @retval Returns 0 in case of success, otherwise 1. + */ +uint8_t FATFS_UnLinkDriver(char *path) +{ + return FATFS_UnLinkDriverEx(path, 0); +} + +/** + * @brief Gets number of linked drivers to the FatFs module. + * @param None + * @retval Number of attached drivers. + */ +uint8_t FATFS_GetAttachedDriversNbr(void) +{ + return disk.nbr; +} + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.h new file mode 100644 index 000000000..5172e0de9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/ff_gen_drv.h @@ -0,0 +1,80 @@ +/** + ****************************************************************************** + * @file ff_gen_drv.h + * @author MCD Application Team + * @brief Header for ff_gen_drv.c module. + ***************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. All rights reserved. + * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** +**/ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __FF_GEN_DRV_H +#define __FF_GEN_DRV_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "diskio.h" +#include "ff.h" +#include "stdint.h" + + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief Disk IO Driver structure definition + */ +typedef struct +{ + DSTATUS (*disk_initialize) (BYTE); /*!< Initialize Disk Drive */ + DSTATUS (*disk_status) (BYTE); /*!< Get Disk Status */ + DRESULT (*disk_read) (BYTE, BYTE*, DWORD, UINT); /*!< Read Sector(s) */ +#if _USE_WRITE == 1 + DRESULT (*disk_write) (BYTE, const BYTE*, DWORD, UINT); /*!< Write Sector(s) when _USE_WRITE = 0 */ +#endif /* _USE_WRITE == 1 */ +#if _USE_IOCTL == 1 + DRESULT (*disk_ioctl) (BYTE, BYTE, void*); /*!< I/O control operation when _USE_IOCTL = 1 */ +#endif /* _USE_IOCTL == 1 */ + +}Diskio_drvTypeDef; + +/** + * @brief Global Disk IO Drivers structure definition + */ +typedef struct +{ + uint8_t is_initialized[_VOLUMES]; + const Diskio_drvTypeDef *drv[_VOLUMES]; + uint8_t lun[_VOLUMES]; + volatile uint8_t nbr; + +}Disk_drvTypeDef; + +/* Exported constants --------------------------------------------------------*/ +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions ------------------------------------------------------- */ +uint8_t FATFS_LinkDriver(const Diskio_drvTypeDef *drv, char *path); +uint8_t FATFS_UnLinkDriver(char *path); +uint8_t FATFS_LinkDriverEx(const Diskio_drvTypeDef *drv, char *path, BYTE lun); +uint8_t FATFS_UnLinkDriverEx(char *path, BYTE lun); +uint8_t FATFS_GetAttachedDriversNbr(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __FF_GEN_DRV_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/integer.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/integer.h new file mode 100644 index 000000000..9ce7865bd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/integer.h @@ -0,0 +1,38 @@ +/*-------------------------------------------*/ +/* Integer type definitions for FatFs module */ +/*-------------------------------------------*/ + +#ifndef _FF_INTEGER +#define _FF_INTEGER + +#ifdef _WIN32 /* FatFs development platform */ + +#include +#include +typedef unsigned __int64 QWORD; + + +#else /* Embedded platform */ + +/* These types MUST be 16-bit or 32-bit */ +typedef int INT; +typedef unsigned int UINT; + +/* This type MUST be 8-bit */ +typedef unsigned char BYTE; + +/* These types MUST be 16-bit */ +typedef short SHORT; +typedef unsigned short WORD; +typedef unsigned short WCHAR; + +/* These types MUST be 32-bit */ +typedef long LONG; +typedef unsigned long DWORD; + +/* This type MUST be 64-bit (Remove this for ANSI C (C89) compatibility) */ +typedef unsigned long long QWORD; + +#endif + +#endif diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/ccsbcs.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/ccsbcs.c new file mode 100644 index 000000000..e2762dc65 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/ccsbcs.c @@ -0,0 +1,388 @@ +/*------------------------------------------------------------------------*/ +/* Unicode - Local code bidirectional converter (C)ChaN, 2015 */ +/* (SBCS code pages) */ +/*------------------------------------------------------------------------*/ +/* 437 U.S. +/ 720 Arabic +/ 737 Greek +/ 771 KBL +/ 775 Baltic +/ 850 Latin 1 +/ 852 Latin 2 +/ 855 Cyrillic +/ 857 Turkish +/ 860 Portuguese +/ 861 Icelandic +/ 862 Hebrew +/ 863 Canadian French +/ 864 Arabic +/ 865 Nordic +/ 866 Russian +/ 869 Greek 2 +*/ + +#include "../ff.h" + + +#if _CODE_PAGE == 437 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP437(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5, + 0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9, 0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 720 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP720(0x80-0xFF) to Unicode conversion table */ + 0x0000, 0x0000, 0x00E9, 0x00E2, 0x0000, 0x00E0, 0x0000, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0651, 0x0652, 0x00F4, 0x00A4, 0x0640, 0x00FB, 0x00F9, 0x0621, 0x0622, 0x0623, 0x0624, 0x00A3, 0x0625, 0x0626, 0x0627, + 0x0628, 0x0629, 0x062A, 0x062B, 0x062C, 0x062D, 0x062E, 0x062F, 0x0630, 0x0631, 0x0632, 0x0633, 0x0634, 0x0635, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x0636, 0x0637, 0x0638, 0x0639, 0x063A, 0x0641, 0x00B5, 0x0642, 0x0643, 0x0644, 0x0645, 0x0646, 0x0647, 0x0648, 0x0649, 0x064A, + 0x2261, 0x064B, 0x064C, 0x064D, 0x064E, 0x064F, 0x0650, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 737 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP737(0x80-0xFF) to Unicode conversion table */ + 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039A, 0x039B, 0x039C, 0x039D, 0x039E, 0x039F, 0x03A0, + 0x03A1, 0x03A3, 0x03A4, 0x03A5, 0x03A6, 0x03A7, 0x03A8, 0x03A9, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, + 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, 0x03C0, 0x03C1, 0x03C3, 0x03C2, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03C9, 0x03AC, 0x03AD, 0x03AE, 0x03CA, 0x03AF, 0x03CC, 0x03CD, 0x03CB, 0x03CE, 0x0386, 0x0388, 0x0389, 0x038A, 0x038C, 0x038E, + 0x038F, 0x00B1, 0x2265, 0x2264, 0x03AA, 0x03AB, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 771 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP771(0x80-0xFF) to Unicode conversion table */ + 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F, + 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F, + 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x2558, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x0104, 0x0105, 0x010C, 0x010D, + 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + 0x0118, 0x0119, 0x0116, 0x0117, 0x012E, 0x012F, 0x0160, 0x0161, 0x0172, 0x0173, 0x016A, 0x016B, 0x017D, 0x017E, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 775 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP775(0x80-0xFF) to Unicode conversion table */ + 0x0106, 0x00FC, 0x00E9, 0x0101, 0x00E4, 0x0123, 0x00E5, 0x0107, 0x0142, 0x0113, 0x0156, 0x0157, 0x012B, 0x0179, 0x00C4, 0x00C5, + 0x00C9, 0x00E6, 0x00C6, 0x014D, 0x00F6, 0x0122, 0x00A2, 0x015A, 0x015B, 0x00D6, 0x00DC, 0x00F8, 0x00A3, 0x00D8, 0x00D7, 0x00A4, + 0x0100, 0x012A, 0x00F3, 0x017B, 0x017C, 0x017A, 0x201D, 0x00A6, 0x00A9, 0x00AE, 0x00AC, 0x00BD, 0x00BC, 0x0141, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x0104, 0x010C, 0x0118, 0x0116, 0x2563, 0x2551, 0x2557, 0x255D, 0x012E, 0x0160, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x0172, 0x016A, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x017D, + 0x0105, 0x010D, 0x0119, 0x0117, 0x012F, 0x0161, 0x0173, 0x016B, 0x017E, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x00D3, 0x00DF, 0x014C, 0x0143, 0x00F5, 0x00D5, 0x00B5, 0x0144, 0x0136, 0x0137, 0x013B, 0x013C, 0x0146, 0x0112, 0x0145, 0x2019, + 0x00AD, 0x00B1, 0x201C, 0x00BE, 0x00B6, 0x00A7, 0x00F7, 0x201E, 0x00B0, 0x2219, 0x00B7, 0x00B9, 0x00B3, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 850 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP850(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5, + 0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9, 0x00FF, 0x00D6, 0x00DC, 0x00F8, 0x00A3, 0x00D8, 0x00D7, 0x0192, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x00AE, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00C1, 0x00C2, 0x00C0, 0x00A9, 0x2563, 0x2551, 0x2557, 0x255D, 0x00A2, 0x00A5, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x00E3, 0x00C3, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x00A4, + 0x00F0, 0x00D0, 0x00CA, 0x00CB, 0x00C8, 0x0131, 0x00CD, 0x00CE, 0x00CF, 0x2518, 0x250C, 0x2588, 0x2584, 0x00A6, 0x00CC, 0x2580, + 0x00D3, 0x00DF, 0x00D4, 0x00D2, 0x00F5, 0x00D5, 0x00B5, 0x00FE, 0x00DE, 0x00DA, 0x00DB, 0x00D9, 0x00FD, 0x00DD, 0x00AF, 0x00B4, + 0x00AD, 0x00B1, 0x2017, 0x00BE, 0x00B6, 0x00A7, 0x00F7, 0x00B8, 0x00B0, 0x00A8, 0x00B7, 0x00B9, 0x00B3, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 852 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP852(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x016F, 0x0107, 0x00E7, 0x0142, 0x00EB, 0x0150, 0x0151, 0x00EE, 0x0179, 0x00C4, 0x0106, + 0x00C9, 0x0139, 0x013A, 0x00F4, 0x00F6, 0x013D, 0x013E, 0x015A, 0x015B, 0x00D6, 0x00DC, 0x0164, 0x0165, 0x0141, 0x00D7, 0x010D, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x0104, 0x0105, 0x017D, 0x017E, 0x0118, 0x0119, 0x00AC, 0x017A, 0x010C, 0x015F, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00C1, 0x00C2, 0x011A, 0x015E, 0x2563, 0x2551, 0x2557, 0x255D, 0x017B, 0x017C, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x0102, 0x0103, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x00A4, + 0x0111, 0x0110, 0x010E, 0x00CB, 0x010F, 0x0147, 0x00CD, 0x00CE, 0x011B, 0x2518, 0x250C, 0x2588, 0x2584, 0x0162, 0x016E, 0x2580, + 0x00D3, 0x00DF, 0x00D4, 0x0143, 0x0144, 0x0148, 0x0160, 0x0161, 0x0154, 0x00DA, 0x0155, 0x0170, 0x00FD, 0x00DD, 0x0163, 0x00B4, + 0x00AD, 0x02DD, 0x02DB, 0x02C7, 0x02D8, 0x00A7, 0x00F7, 0x00B8, 0x00B0, 0x00A8, 0x02D9, 0x0171, 0x0158, 0x0159, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 855 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP855(0x80-0xFF) to Unicode conversion table */ + 0x0452, 0x0402, 0x0453, 0x0403, 0x0451, 0x0401, 0x0454, 0x0404, 0x0455, 0x0405, 0x0456, 0x0406, 0x0457, 0x0407, 0x0458, 0x0408, + 0x0459, 0x0409, 0x045A, 0x040A, 0x045B, 0x040B, 0x045C, 0x040C, 0x045E, 0x040E, 0x045F, 0x040F, 0x044E, 0x042E, 0x044A, 0x042A, + 0x0430, 0x0410, 0x0431, 0x0411, 0x0446, 0x0426, 0x0434, 0x0414, 0x0435, 0x0415, 0x0444, 0x0424, 0x0433, 0x0413, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x0445, 0x0425, 0x0438, 0x0418, 0x2563, 0x2551, 0x2557, 0x255D, 0x0439, 0x0419, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x043A, 0x041A, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x00A4, + 0x043B, 0x041B, 0x043C, 0x041C, 0x043D, 0x041D, 0x043E, 0x041E, 0x043F, 0x2518, 0x250C, 0x2588, 0x2584, 0x041F, 0x044F, 0x2580, + 0x042F, 0x0440, 0x0420, 0x0441, 0x0421, 0x0442, 0x0422, 0x0443, 0x0423, 0x0436, 0x0416, 0x0432, 0x0412, 0x044C, 0x042C, 0x2116, + 0x00AD, 0x044B, 0x042B, 0x0437, 0x0417, 0x0448, 0x0428, 0x044D, 0x042D, 0x0449, 0x0429, 0x0447, 0x0427, 0x00A7, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 857 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP857(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x0131, 0x00C4, 0x00C5, + 0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9, 0x0130, 0x00D6, 0x00DC, 0x00F8, 0x00A3, 0x00D8, 0x015E, 0x015F, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x011E, 0x011F, 0x00BF, 0x00AE, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00C1, 0x00C2, 0x00C0, 0x00A9, 0x2563, 0x2551, 0x2557, 0x255D, 0x00A2, 0x00A5, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x00E3, 0x00C3, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x00A4, + 0x00BA, 0x00AA, 0x00CA, 0x00CB, 0x00C8, 0x0000, 0x00CD, 0x00CE, 0x00CF, 0x2518, 0x250C, 0x2588, 0x2584, 0x00A6, 0x00CC, 0x2580, + 0x00D3, 0x00DF, 0x00D4, 0x00D2, 0x00F5, 0x00D5, 0x00B5, 0x0000, 0x00D7, 0x00DA, 0x00DB, 0x00D9, 0x00EC, 0x00FF, 0x00AF, 0x00B4, + 0x00AD, 0x00B1, 0x0000, 0x00BE, 0x00B6, 0x00A7, 0x00F7, 0x00B8, 0x00B0, 0x00A8, 0x00B7, 0x00B9, 0x00B3, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 860 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP860(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E3, 0x00E0, 0x00C1, 0x00E7, 0x00EA, 0x00CA, 0x00E8, 0x00CD, 0x00D4, 0x00EC, 0x00C3, 0x00C2, + 0x00C9, 0x00C0, 0x00C8, 0x00F4, 0x00F5, 0x00F2, 0x00DA, 0x00F9, 0x00CC, 0x00D5, 0x00DC, 0x00A2, 0x00A3, 0x00D9, 0x20A7, 0x00D3, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x00D2, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x2558, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 861 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP861(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E6, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00D0, 0x00F0, 0x00DE, 0x00C4, 0x00C5, + 0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00FE, 0x00FB, 0x00DD, 0x00FD, 0x00D6, 0x00DC, 0x00F8, 0x00A3, 0x00D8, 0x20A7, 0x0192, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00C1, 0x00CD, 0x00D3, 0x00DA, 0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 862 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP862(0x80-0xFF) to Unicode conversion table */ + 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF, + 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, 0x05E8, 0x05E9, 0x05EA, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 863 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP863(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00C2, 0x00E0, 0x00B6, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x2017, 0x00C0, + 0x00C9, 0x00C8, 0x00CA, 0x00F4, 0x00CB, 0x00CF, 0x00FB, 0x00F9, 0x00A4, 0x00D4, 0x00DC, 0x00A2, 0x00A3, 0x00D9, 0x00DB, 0x0192, + 0x00A6, 0x00B4, 0x00F3, 0x00FA, 0x00A8, 0x00BB, 0x00B3, 0x00AF, 0x00CE, 0x3210, 0x00AC, 0x00BD, 0x00BC, 0x00BE, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2219, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 864 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP864(0x80-0xFF) to Unicode conversion table */ + 0x00B0, 0x00B7, 0x2219, 0x221A, 0x2592, 0x2500, 0x2502, 0x253C, 0x2524, 0x252C, 0x251C, 0x2534, 0x2510, 0x250C, 0x2514, 0x2518, + 0x03B2, 0x221E, 0x03C6, 0x00B1, 0x00BD, 0x00BC, 0x2248, 0x00AB, 0x00BB, 0xFEF7, 0xFEF8, 0x0000, 0x0000, 0xFEFB, 0xFEFC, 0x0000, + 0x00A0, 0x00AD, 0xFE82, 0x00A3, 0x00A4, 0xFE84, 0x0000, 0x20AC, 0xFE8E, 0xFE8F, 0xFE95, 0xFE99, 0x060C, 0xFE9D, 0xFEA1, 0xFEA5, + 0x0660, 0x0661, 0x0662, 0x0663, 0x0664, 0x0665, 0x0666, 0x0667, 0x0668, 0x0669, 0xFED1, 0x061B, 0xFEB1, 0xFEB5, 0xFEB9, 0x061F, + 0x00A2, 0xFE80, 0xFE81, 0xFE83, 0xFE85, 0xFECA, 0xFE8B, 0xFE8D, 0xFE91, 0xFE93, 0xFE97, 0xFE9B, 0xFE9F, 0xFEA3, 0xFEA7, 0xFEA9, + 0xFEAB, 0xFEAD, 0xFEAF, 0xFEB3, 0xFEB7, 0xFEBB, 0xFEBF, 0xFEC1, 0xFEC5, 0xFECB, 0xFECF, 0x00A6, 0x00AC, 0x00F7, 0x00D7, 0xFEC9, + 0x0640, 0xFED3, 0xFED7, 0xFEDB, 0xFEDF, 0xFEE3, 0xFEE7, 0xFEEB, 0xFEED, 0xFEEF, 0xFEF3, 0xFEBD, 0xFECC, 0xFECE, 0xFECD, 0xFEE1, + 0xFE7D, 0x0651, 0xFEE5, 0xFEE9, 0xFEEC, 0xFEF0, 0xFEF2, 0xFED0, 0xFED5, 0xFEF5, 0xFEF6, 0xFEDD, 0xFED9, 0xFEF1, 0x25A0, 0x0000 +}; + +#elif _CODE_PAGE == 865 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP865(0x80-0xFF) to Unicode conversion table */ + 0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5, + 0x00C5, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9, 0x00FF, 0x00D6, 0x00DC, 0x00F8, 0x00A3, 0x00D8, 0x20A7, 0x0192, + 0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00A4, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x2558, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229, + 0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 866 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP866(0x80-0xFF) to Unicode conversion table */ + 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041A, 0x041B, 0x041C, 0x041D, 0x041E, 0x041F, + 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042A, 0x042B, 0x042C, 0x042D, 0x042E, 0x042F, + 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567, + 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580, + 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + 0x0401, 0x0451, 0x0404, 0x0454, 0x0407, 0x0457, 0x040E, 0x045E, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x2116, 0x00A4, 0x25A0, 0x00A0 +}; + +#elif _CODE_PAGE == 869 +#define _TBLDEF 1 +static +const WCHAR Tbl[] = { /* CP869(0x80-0xFF) to Unicode conversion table */ + 0x00B7, 0x00B7, 0x00B7, 0x00B7, 0x00B7, 0x00B7, 0x0386, 0x00B7, 0x00B7, 0x00AC, 0x00A6, 0x2018, 0x2019, 0x0388, 0x2015, 0x0389, + 0x038A, 0x03AA, 0x038C, 0x00B7, 0x00B7, 0x038E, 0x03AB, 0x00A9, 0x038F, 0x00B2, 0x00B3, 0x03AC, 0x00A3, 0x03AD, 0x03AE, 0x03AF, + 0x03CA, 0x0390, 0x03CC, 0x03CD, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x00BD, 0x0398, 0x0399, 0x00AB, 0x00BB, + 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x039A, 0x039B, 0x039C, 0x039D, 0x2563, 0x2551, 0x2557, 0x255D, 0x039E, 0x039F, 0x2510, + 0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x0A30, 0x03A1, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x03A3, + 0x03A4, 0x03A5, 0x03A6, 0x03A7, 0x03A8, 0x03A9, 0x03B1, 0x03B2, 0x03B3, 0x2518, 0x250C, 0x2588, 0x2584, 0x03B4, 0x03B5, 0x2580, + 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, 0x03C0, 0x03C1, 0x03C3, 0x03C2, 0x03C4, 0x0384, + 0x00AD, 0x00B1, 0x03C5, 0x03C6, 0x03C7, 0x00A7, 0x03C8, 0x0385, 0x00B0, 0x00A8, 0x03C9, 0x03CB, 0x03B0, 0x03CE, 0x25A0, 0x00A0 +}; + +#endif + + +#if !_TBLDEF || !_USE_LFN +#error This file is not needed at current configuration. Remove from the project. +#endif + + + + +WCHAR ff_convert ( /* Converted character, Returns zero on error */ + WCHAR chr, /* Character code to be converted */ + UINT dir /* 0: Unicode to OEM code, 1: OEM code to Unicode */ +) +{ + WCHAR c; + + + if (chr < 0x80) { /* ASCII */ + c = chr; + + } else { + if (dir) { /* OEM code to Unicode */ + c = (chr >= 0x100) ? 0 : Tbl[chr - 0x80]; + + } else { /* Unicode to OEM code */ + for (c = 0; c < 0x80; c++) { + if (chr == Tbl[c]) break; + } + c = (c + 0x80) & 0xFF; + } + } + + return c; +} + + + +WCHAR ff_wtoupper ( /* Returns upper converted character */ + WCHAR chr /* Unicode character to be upper converted (BMP only) */ +) +{ + /* Compressed upper conversion table */ + static const WCHAR cvt1[] = { /* U+0000 - U+0FFF */ + /* Basic Latin */ + 0x0061,0x031A, + /* Latin-1 Supplement */ + 0x00E0,0x0317, 0x00F8,0x0307, 0x00FF,0x0001,0x0178, + /* Latin Extended-A */ + 0x0100,0x0130, 0x0132,0x0106, 0x0139,0x0110, 0x014A,0x012E, 0x0179,0x0106, + /* Latin Extended-B */ + 0x0180,0x004D,0x0243,0x0181,0x0182,0x0182,0x0184,0x0184,0x0186,0x0187,0x0187,0x0189,0x018A,0x018B,0x018B,0x018D,0x018E,0x018F,0x0190,0x0191,0x0191,0x0193,0x0194,0x01F6,0x0196,0x0197,0x0198,0x0198,0x023D,0x019B,0x019C,0x019D,0x0220,0x019F,0x01A0,0x01A0,0x01A2,0x01A2,0x01A4,0x01A4,0x01A6,0x01A7,0x01A7,0x01A9,0x01AA,0x01AB,0x01AC,0x01AC,0x01AE,0x01AF,0x01AF,0x01B1,0x01B2,0x01B3,0x01B3,0x01B5,0x01B5,0x01B7,0x01B8,0x01B8,0x01BA,0x01BB,0x01BC,0x01BC,0x01BE,0x01F7,0x01C0,0x01C1,0x01C2,0x01C3,0x01C4,0x01C5,0x01C4,0x01C7,0x01C8,0x01C7,0x01CA,0x01CB,0x01CA, + 0x01CD,0x0110, 0x01DD,0x0001,0x018E, 0x01DE,0x0112, 0x01F3,0x0003,0x01F1,0x01F4,0x01F4, 0x01F8,0x0128, + 0x0222,0x0112, 0x023A,0x0009,0x2C65,0x023B,0x023B,0x023D,0x2C66,0x023F,0x0240,0x0241,0x0241, 0x0246,0x010A, + /* IPA Extensions */ + 0x0253,0x0040,0x0181,0x0186,0x0255,0x0189,0x018A,0x0258,0x018F,0x025A,0x0190,0x025C,0x025D,0x025E,0x025F,0x0193,0x0261,0x0262,0x0194,0x0264,0x0265,0x0266,0x0267,0x0197,0x0196,0x026A,0x2C62,0x026C,0x026D,0x026E,0x019C,0x0270,0x0271,0x019D,0x0273,0x0274,0x019F,0x0276,0x0277,0x0278,0x0279,0x027A,0x027B,0x027C,0x2C64,0x027E,0x027F,0x01A6,0x0281,0x0282,0x01A9,0x0284,0x0285,0x0286,0x0287,0x01AE,0x0244,0x01B1,0x01B2,0x0245,0x028D,0x028E,0x028F,0x0290,0x0291,0x01B7, + /* Greek, Coptic */ + 0x037B,0x0003,0x03FD,0x03FE,0x03FF, 0x03AC,0x0004,0x0386,0x0388,0x0389,0x038A, 0x03B1,0x0311, + 0x03C2,0x0002,0x03A3,0x03A3, 0x03C4,0x0308, 0x03CC,0x0003,0x038C,0x038E,0x038F, 0x03D8,0x0118, + 0x03F2,0x000A,0x03F9,0x03F3,0x03F4,0x03F5,0x03F6,0x03F7,0x03F7,0x03F9,0x03FA,0x03FA, + /* Cyrillic */ + 0x0430,0x0320, 0x0450,0x0710, 0x0460,0x0122, 0x048A,0x0136, 0x04C1,0x010E, 0x04CF,0x0001,0x04C0, 0x04D0,0x0144, + /* Armenian */ + 0x0561,0x0426, + + 0x0000 + }; + static const WCHAR cvt2[] = { /* U+1000 - U+FFFF */ + /* Phonetic Extensions */ + 0x1D7D,0x0001,0x2C63, + /* Latin Extended Additional */ + 0x1E00,0x0196, 0x1EA0,0x015A, + /* Greek Extended */ + 0x1F00,0x0608, 0x1F10,0x0606, 0x1F20,0x0608, 0x1F30,0x0608, 0x1F40,0x0606, + 0x1F51,0x0007,0x1F59,0x1F52,0x1F5B,0x1F54,0x1F5D,0x1F56,0x1F5F, 0x1F60,0x0608, + 0x1F70,0x000E,0x1FBA,0x1FBB,0x1FC8,0x1FC9,0x1FCA,0x1FCB,0x1FDA,0x1FDB,0x1FF8,0x1FF9,0x1FEA,0x1FEB,0x1FFA,0x1FFB, + 0x1F80,0x0608, 0x1F90,0x0608, 0x1FA0,0x0608, 0x1FB0,0x0004,0x1FB8,0x1FB9,0x1FB2,0x1FBC, + 0x1FCC,0x0001,0x1FC3, 0x1FD0,0x0602, 0x1FE0,0x0602, 0x1FE5,0x0001,0x1FEC, 0x1FF2,0x0001,0x1FFC, + /* Letterlike Symbols */ + 0x214E,0x0001,0x2132, + /* Number forms */ + 0x2170,0x0210, 0x2184,0x0001,0x2183, + /* Enclosed Alphanumerics */ + 0x24D0,0x051A, 0x2C30,0x042F, + /* Latin Extended-C */ + 0x2C60,0x0102, 0x2C67,0x0106, 0x2C75,0x0102, + /* Coptic */ + 0x2C80,0x0164, + /* Georgian Supplement */ + 0x2D00,0x0826, + /* Full-width */ + 0xFF41,0x031A, + + 0x0000 + }; + const WCHAR *p; + WCHAR bc, nc, cmd; + + + p = chr < 0x1000 ? cvt1 : cvt2; + for (;;) { + bc = *p++; /* Get block base */ + if (!bc || chr < bc) break; + nc = *p++; cmd = nc >> 8; nc &= 0xFF; /* Get processing command and block size */ + if (chr < bc + nc) { /* In the block? */ + switch (cmd) { + case 0: chr = p[chr - bc]; break; /* Table conversion */ + case 1: chr -= (chr - bc) & 1; break; /* Case pairs */ + case 2: chr -= 16; break; /* Shift -16 */ + case 3: chr -= 32; break; /* Shift -32 */ + case 4: chr -= 48; break; /* Shift -48 */ + case 5: chr -= 26; break; /* Shift -26 */ + case 6: chr += 8; break; /* Shift +8 */ + case 7: chr -= 80; break; /* Shift -80 */ + case 8: chr -= 0x1C60; break; /* Shift -0x1C60 */ + } + break; + } + if (!cmd) p += nc; + } + + return chr; +} + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/syscall.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/syscall.c new file mode 100644 index 000000000..cd6370dc9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FatFs/src/option/syscall.c @@ -0,0 +1,177 @@ +/*------------------------------------------------------------------------*/ +/* Sample code of OS dependent controls for FatFs */ +/* (C)ChaN, 2014 */ +/* Portions COPYRIGHT 2017 STMicroelectronics */ +/* Portions Copyright (C) 2014, ChaN, all right reserved */ +/*------------------------------------------------------------------------*/ + +/** + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. All rights reserved. + * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** +**/ + + + +#include "../ff.h" + + +#if _FS_REENTRANT +/*------------------------------------------------------------------------*/ +/* Create a Synchronization Object */ +/*------------------------------------------------------------------------*/ +/* This function is called in f_mount() function to create a new +/ synchronization object, such as semaphore and mutex. When a 0 is returned, +/ the f_mount() function fails with FR_INT_ERR. +*/ + +int ff_cre_syncobj ( /* 1:Function succeeded, 0:Could not create the sync object */ + BYTE vol, /* Corresponding volume (logical drive number) */ + _SYNC_t *sobj /* Pointer to return the created sync object */ +) +{ + + int ret; +#if _USE_MUTEX + +#if (osCMSIS < 0x20000U) + osMutexDef(MTX); + *sobj = osMutexCreate(osMutex(MTX)); +#else + *sobj = osMutexNew(NULL); +#endif + +#else + +#if (osCMSIS < 0x20000U) + osSemaphoreDef(SEM); + *sobj = osSemaphoreCreate(osSemaphore(SEM), 1); +#else + *sobj = osSemaphoreNew(1, 1, NULL); +#endif + +#endif + ret = (*sobj != NULL); + + return ret; +} + + + +/*------------------------------------------------------------------------*/ +/* Delete a Synchronization Object */ +/*------------------------------------------------------------------------*/ +/* This function is called in f_mount() function to delete a synchronization +/ object that created with ff_cre_syncobj() function. When a 0 is returned, +/ the f_mount() function fails with FR_INT_ERR. +*/ + +int ff_del_syncobj ( /* 1:Function succeeded, 0:Could not delete due to any error */ + _SYNC_t sobj /* Sync object tied to the logical drive to be deleted */ +) +{ +#if _USE_MUTEX + osMutexDelete (sobj); +#else + osSemaphoreDelete (sobj); +#endif + return 1; +} + + + +/*------------------------------------------------------------------------*/ +/* Request Grant to Access the Volume */ +/*------------------------------------------------------------------------*/ +/* This function is called on entering file functions to lock the volume. +/ When a 0 is returned, the file function fails with FR_TIMEOUT. +*/ + +int ff_req_grant ( /* 1:Got a grant to access the volume, 0:Could not get a grant */ + _SYNC_t sobj /* Sync object to wait */ +) +{ + int ret = 0; +#if (osCMSIS < 0x20000U) + +#if _USE_MUTEX + if(osMutexWait(sobj, _FS_TIMEOUT) == osOK) +#else + if(osSemaphoreWait(sobj, _FS_TIMEOUT) == osOK) +#endif + +#else + +#if _USE_MUTEX + if(osMutexAcquire(sobj, _FS_TIMEOUT) == osOK) +#else + if(osSemaphoreAcquire(sobj, _FS_TIMEOUT) == osOK) +#endif + +#endif + { + ret = 1; + } + + return ret; +} + + + +/*------------------------------------------------------------------------*/ +/* Release Grant to Access the Volume */ +/*------------------------------------------------------------------------*/ +/* This function is called on leaving file functions to unlock the volume. +*/ + +void ff_rel_grant ( + _SYNC_t sobj /* Sync object to be signaled */ +) +{ +#if _USE_MUTEX + osMutexRelease(sobj); +#else + osSemaphoreRelease(sobj); +#endif +} + +#endif + + + + +#if _USE_LFN == 3 /* LFN with a working buffer on the heap */ +/*------------------------------------------------------------------------*/ +/* Allocate a memory block */ +/*------------------------------------------------------------------------*/ +/* If a NULL is returned, the file function fails with FR_NOT_ENOUGH_CORE. +*/ + +void* ff_memalloc ( /* Returns pointer to the allocated memory block */ + UINT msize /* Number of bytes to allocate */ +) +{ + return ff_malloc(msize); /* Allocate a new memory block with POSIX API */ +} + + +/*------------------------------------------------------------------------*/ +/* Free a memory block */ +/*------------------------------------------------------------------------*/ + +void ff_memfree ( + void* mblock /* Pointer to the memory block to free */ +) +{ + ff_free(mblock); /* Discard the memory block with POSIX API */ +} + +#endif diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c new file mode 100644 index 000000000..89c36334e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.c @@ -0,0 +1,1727 @@ +/* ---------------------------------------------------------------------- + * $Date: 5. February 2013 + * $Revision: V1.02 + * + * Project: CMSIS-RTOS API + * Title: cmsis_os.c + * + * Version 0.02 + * Initial Proposal Phase + * Version 0.03 + * osKernelStart added, optional feature: main started as thread + * osSemaphores have standard behavior + * osTimerCreate does not start the timer, added osTimerStart + * osThreadPass is renamed to osThreadYield + * Version 1.01 + * Support for C++ interface + * - const attribute removed from the osXxxxDef_t typedef's + * - const attribute added to the osXxxxDef macros + * Added: osTimerDelete, osMutexDelete, osSemaphoreDelete + * Added: osKernelInitialize + * Version 1.02 + * Control functions for short timeouts in microsecond resolution: + * Added: osKernelSysTick, osKernelSysTickFrequency, osKernelSysTickMicroSec + * Removed: osSignalGet + * + * + *---------------------------------------------------------------------------- + * + * Portions Copyright © 2016 STMicroelectronics International N.V. All rights reserved. + * Portions Copyright (c) 2013 ARM LIMITED + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * - Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *---------------------------------------------------------------------------*/ + +#include +#include "cmsis_os.h" + +/* + * ARM Compiler 4/5 + */ +#if defined ( __CC_ARM ) + + #define __ASM __asm + #define __INLINE __inline + #define __STATIC_INLINE static __inline + + #include "cmsis_armcc.h" + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + + #define __ASM __asm /*!< asm keyword for GNU Compiler */ + #define __INLINE inline /*!< inline keyword for GNU Compiler */ + #define __STATIC_INLINE static inline + + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + + #include +#endif + +extern void xPortSysTickHandler(void); + +/* Convert from CMSIS type osPriority to FreeRTOS priority number */ +static unsigned portBASE_TYPE makeFreeRtosPriority (osPriority priority) +{ + unsigned portBASE_TYPE fpriority = tskIDLE_PRIORITY; + + if (priority != osPriorityError) { + fpriority += (priority - osPriorityIdle); + } + + return fpriority; +} + +#if (INCLUDE_uxTaskPriorityGet == 1) +/* Convert from FreeRTOS priority number to CMSIS type osPriority */ +static osPriority makeCmsisPriority (unsigned portBASE_TYPE fpriority) +{ + osPriority priority = osPriorityError; + + if ((fpriority - tskIDLE_PRIORITY) <= (osPriorityRealtime - osPriorityIdle)) { + priority = (osPriority)((int)osPriorityIdle + (int)(fpriority - tskIDLE_PRIORITY)); + } + + return priority; +} +#endif + + +/* Determine whether we are in thread mode or handler mode. */ +static int inHandlerMode (void) +{ + return __get_IPSR() != 0; +} + +/*********************** Kernel Control Functions *****************************/ +/** +* @brief Initialize the RTOS Kernel for creating objects. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osKernelInitialize shall be consistent in every CMSIS-RTOS. +*/ +osStatus osKernelInitialize (void); + +/** +* @brief Start the RTOS Kernel with executing the specified thread. +* @param thread_def thread definition referenced with \ref osThread. +* @param argument pointer that is passed to the thread function as start argument. +* @retval status code that indicates the execution status of the function +* @note MUST REMAIN UNCHANGED: \b osKernelStart shall be consistent in every CMSIS-RTOS. +*/ +osStatus osKernelStart (void) +{ + vTaskStartScheduler(); + + return osOK; +} + +/** +* @brief Check if the RTOS kernel is already started +* @param None +* @retval (0) RTOS is not started +* (1) RTOS is started +* (-1) if this feature is disabled in FreeRTOSConfig.h +* @note MUST REMAIN UNCHANGED: \b osKernelRunning shall be consistent in every CMSIS-RTOS. +*/ +int32_t osKernelRunning(void) +{ +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) + return 0; + else + return 1; +#else + return (-1); +#endif +} + +#if (defined (osFeature_SysTick) && (osFeature_SysTick != 0)) // System Timer available +/** +* @brief Get the value of the Kernel SysTick timer +* @param None +* @retval None +* @note MUST REMAIN UNCHANGED: \b osKernelSysTick shall be consistent in every CMSIS-RTOS. +*/ +uint32_t osKernelSysTick(void) +{ + if (inHandlerMode()) { + return xTaskGetTickCountFromISR(); + } + else { + return xTaskGetTickCount(); + } +} +#endif // System Timer available +/*********************** Thread Management *****************************/ +/** +* @brief Create a thread and add it to Active Threads and set it to state READY. +* @param thread_def thread definition referenced with \ref osThread. +* @param argument pointer that is passed to the thread function as start argument. +* @retval thread ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osThreadCreate shall be consistent in every CMSIS-RTOS. +*/ +osThreadId osThreadCreate (const osThreadDef_t *thread_def, void *argument) +{ + TaskHandle_t handle; + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + if((thread_def->buffer != NULL) && (thread_def->controlblock != NULL)) { + handle = xTaskCreateStatic((TaskFunction_t)thread_def->pthread,(const portCHAR *)thread_def->name, + thread_def->stacksize, argument, makeFreeRtosPriority(thread_def->tpriority), + thread_def->buffer, thread_def->controlblock); + } + else { + if (xTaskCreate((TaskFunction_t)thread_def->pthread,(const portCHAR *)thread_def->name, + thread_def->stacksize, argument, makeFreeRtosPriority(thread_def->tpriority), + &handle) != pdPASS) { + return NULL; + } + } +#elif( configSUPPORT_STATIC_ALLOCATION == 1 ) + + handle = xTaskCreateStatic((TaskFunction_t)thread_def->pthread,(const portCHAR *)thread_def->name, + thread_def->stacksize, argument, makeFreeRtosPriority(thread_def->tpriority), + thread_def->buffer, thread_def->controlblock); +#else + if (xTaskCreate((TaskFunction_t)thread_def->pthread,(const portCHAR *)thread_def->name, + thread_def->stacksize, argument, makeFreeRtosPriority(thread_def->tpriority), + &handle) != pdPASS) { + return NULL; + } +#endif + + return handle; +} + +/** +* @brief Return the thread ID of the current running thread. +* @retval thread ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osThreadGetId shall be consistent in every CMSIS-RTOS. +*/ +osThreadId osThreadGetId (void) +{ +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + return xTaskGetCurrentTaskHandle(); +#else + return NULL; +#endif +} + +/** +* @brief Terminate execution of a thread and remove it from Active Threads. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osThreadTerminate shall be consistent in every CMSIS-RTOS. +*/ +osStatus osThreadTerminate (osThreadId thread_id) +{ +#if (INCLUDE_vTaskDelete == 1) + vTaskDelete(thread_id); + return osOK; +#else + return osErrorOS; +#endif +} + +/** +* @brief Pass control to next thread that is in state \b READY. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osThreadYield shall be consistent in every CMSIS-RTOS. +*/ +osStatus osThreadYield (void) +{ + taskYIELD(); + + return osOK; +} + +/** +* @brief Change priority of an active thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @param priority new priority value for the thread function. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osThreadSetPriority shall be consistent in every CMSIS-RTOS. +*/ +osStatus osThreadSetPriority (osThreadId thread_id, osPriority priority) +{ +#if (INCLUDE_vTaskPrioritySet == 1) + vTaskPrioritySet(thread_id, makeFreeRtosPriority(priority)); + return osOK; +#else + return osErrorOS; +#endif +} + +/** +* @brief Get current priority of an active thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval current priority value of the thread function. +* @note MUST REMAIN UNCHANGED: \b osThreadGetPriority shall be consistent in every CMSIS-RTOS. +*/ +osPriority osThreadGetPriority (osThreadId thread_id) +{ +#if (INCLUDE_uxTaskPriorityGet == 1) + if (inHandlerMode()) + { + return makeCmsisPriority(uxTaskPriorityGetFromISR(thread_id)); + } + else + { + return makeCmsisPriority(uxTaskPriorityGet(thread_id)); + } +#else + return osPriorityError; +#endif +} + +/*********************** Generic Wait Functions *******************************/ +/** +* @brief Wait for Timeout (Time Delay) +* @param millisec time delay value +* @retval status code that indicates the execution status of the function. +*/ +osStatus osDelay (uint32_t millisec) +{ +#if INCLUDE_vTaskDelay + TickType_t ticks = millisec / portTICK_PERIOD_MS; + + vTaskDelay(ticks ? ticks : 1); /* Minimum delay = 1 tick */ + + return osOK; +#else + (void) millisec; + + return osErrorResource; +#endif +} + +#if (defined (osFeature_Wait) && (osFeature_Wait != 0)) /* Generic Wait available */ +/** +* @brief Wait for Signal, Message, Mail, or Timeout +* @param millisec timeout value or 0 in case of no time-out +* @retval event that contains signal, message, or mail information or error code. +* @note MUST REMAIN UNCHANGED: \b osWait shall be consistent in every CMSIS-RTOS. +*/ +osEvent osWait (uint32_t millisec); + +#endif /* Generic Wait available */ + +/*********************** Timer Management Functions ***************************/ +/** +* @brief Create a timer. +* @param timer_def timer object referenced with \ref osTimer. +* @param type osTimerOnce for one-shot or osTimerPeriodic for periodic behavior. +* @param argument argument to the timer call back function. +* @retval timer ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osTimerCreate shall be consistent in every CMSIS-RTOS. +*/ +osTimerId osTimerCreate (const osTimerDef_t *timer_def, os_timer_type type, void *argument) +{ +#if (configUSE_TIMERS == 1) + +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + if(timer_def->controlblock != NULL) { + return xTimerCreateStatic((const char *)"", + 1, // period should be filled when starting the Timer using osTimerStart + (type == osTimerPeriodic) ? pdTRUE : pdFALSE, + (void *) argument, + (TimerCallbackFunction_t)timer_def->ptimer, + (StaticTimer_t *)timer_def->controlblock); + } + else { + return xTimerCreate((const char *)"", + 1, // period should be filled when starting the Timer using osTimerStart + (type == osTimerPeriodic) ? pdTRUE : pdFALSE, + (void *) argument, + (TimerCallbackFunction_t)timer_def->ptimer); + } +#elif( configSUPPORT_STATIC_ALLOCATION == 1 ) + return xTimerCreateStatic((const char *)"", + 1, // period should be filled when starting the Timer using osTimerStart + (type == osTimerPeriodic) ? pdTRUE : pdFALSE, + (void *) argument, + (TimerCallbackFunction_t)timer_def->ptimer, + (StaticTimer_t *)timer_def->controlblock); +#else + return xTimerCreate((const char *)"", + 1, // period should be filled when starting the Timer using osTimerStart + (type == osTimerPeriodic) ? pdTRUE : pdFALSE, + (void *) argument, + (TimerCallbackFunction_t)timer_def->ptimer); +#endif + +#else + return NULL; +#endif +} + +/** +* @brief Start or restart a timer. +* @param timer_id timer ID obtained by \ref osTimerCreate. +* @param millisec time delay value of the timer. +* @retval status code that indicates the execution status of the function +* @note MUST REMAIN UNCHANGED: \b osTimerStart shall be consistent in every CMSIS-RTOS. +*/ +osStatus osTimerStart (osTimerId timer_id, uint32_t millisec) +{ + osStatus result = osOK; +#if (configUSE_TIMERS == 1) + portBASE_TYPE taskWoken = pdFALSE; + TickType_t ticks = millisec / portTICK_PERIOD_MS; + + if (ticks == 0) + ticks = 1; + + if (inHandlerMode()) + { + if (xTimerChangePeriodFromISR(timer_id, ticks, &taskWoken) != pdPASS) + { + result = osErrorOS; + } + else + { + portEND_SWITCHING_ISR(taskWoken); + } + } + else + { + if (xTimerChangePeriod(timer_id, ticks, 0) != pdPASS) + result = osErrorOS; + } + +#else + result = osErrorOS; +#endif + return result; +} + +/** +* @brief Stop a timer. +* @param timer_id timer ID obtained by \ref osTimerCreate +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osTimerStop shall be consistent in every CMSIS-RTOS. +*/ +osStatus osTimerStop (osTimerId timer_id) +{ + osStatus result = osOK; +#if (configUSE_TIMERS == 1) + portBASE_TYPE taskWoken = pdFALSE; + + if (inHandlerMode()) { + if (xTimerStopFromISR(timer_id, &taskWoken) != pdPASS) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xTimerStop(timer_id, 0) != pdPASS) { + result = osErrorOS; + } + } +#else + result = osErrorOS; +#endif + return result; +} + +/** +* @brief Delete a timer. +* @param timer_id timer ID obtained by \ref osTimerCreate +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osTimerDelete shall be consistent in every CMSIS-RTOS. +*/ +osStatus osTimerDelete (osTimerId timer_id) +{ +osStatus result = osOK; + +#if (configUSE_TIMERS == 1) + + if (inHandlerMode()) { + return osErrorISR; + } + else { + if ((xTimerDelete(timer_id, osWaitForever )) != pdPASS) { + result = osErrorOS; + } + } + +#else + result = osErrorOS; +#endif + + return result; +} + +/*************************** Signal Management ********************************/ +/** +* @brief Set the specified Signal Flags of an active thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @param signals specifies the signal flags of the thread that should be set. +* @retval previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters. +* @note MUST REMAIN UNCHANGED: \b osSignalSet shall be consistent in every CMSIS-RTOS. +*/ +int32_t osSignalSet (osThreadId thread_id, int32_t signal) +{ +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + uint32_t ulPreviousNotificationValue = 0; + + if (inHandlerMode()) + { + if(xTaskGenericNotifyFromISR( thread_id , (uint32_t)signal, eSetBits, &ulPreviousNotificationValue, &xHigherPriorityTaskWoken ) != pdPASS ) + return 0x80000000; + + portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + } + else if(xTaskGenericNotify( thread_id , (uint32_t)signal, eSetBits, &ulPreviousNotificationValue) != pdPASS ) + return 0x80000000; + + return ulPreviousNotificationValue; +#else + (void) thread_id; + (void) signal; + + return 0x80000000; /* Task Notification not supported */ +#endif +} + +/** +* @brief Clear the specified Signal Flags of an active thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @param signals specifies the signal flags of the thread that shall be cleared. +* @retval previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters. +* @note MUST REMAIN UNCHANGED: \b osSignalClear shall be consistent in every CMSIS-RTOS. +*/ +int32_t osSignalClear (osThreadId thread_id, int32_t signal); + +/** +* @brief Wait for one or more Signal Flags to become signaled for the current \b RUNNING thread. +* @param signals wait until all specified signal flags set or 0 for any single signal flag. +* @param millisec timeout value or 0 in case of no time-out. +* @retval event flag information or error code. +* @note MUST REMAIN UNCHANGED: \b osSignalWait shall be consistent in every CMSIS-RTOS. +*/ +osEvent osSignalWait (int32_t signals, uint32_t millisec) +{ + osEvent ret; + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + TickType_t ticks; + + ret.value.signals = 0; + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (inHandlerMode()) + { + ret.status = osErrorISR; /*Not allowed in ISR*/ + } + else + { + if(xTaskNotifyWait( 0,(uint32_t) signals, (uint32_t *)&ret.value.signals, ticks) != pdTRUE) + { + if(ticks == 0) ret.status = osOK; + else ret.status = osEventTimeout; + } + else if(ret.value.signals < 0) + { + ret.status = osErrorValue; + } + else ret.status = osEventSignal; + } +#else + (void) signals; + (void) millisec; + + ret.status = osErrorOS; /* Task Notification not supported */ +#endif + + return ret; +} + +/**************************** Mutex Management ********************************/ +/** +* @brief Create and Initialize a Mutex object +* @param mutex_def mutex definition referenced with \ref osMutex. +* @retval mutex ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osMutexCreate shall be consistent in every CMSIS-RTOS. +*/ +osMutexId osMutexCreate (const osMutexDef_t *mutex_def) +{ +#if ( configUSE_MUTEXES == 1) + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + if (mutex_def->controlblock != NULL) { + return xSemaphoreCreateMutexStatic( mutex_def->controlblock ); + } + else { + return xSemaphoreCreateMutex(); + } +#elif ( configSUPPORT_STATIC_ALLOCATION == 1 ) + return xSemaphoreCreateMutexStatic( mutex_def->controlblock ); +#else + return xSemaphoreCreateMutex(); +#endif +#else + return NULL; +#endif +} + +/** +* @brief Wait until a Mutex becomes available +* @param mutex_id mutex ID obtained by \ref osMutexCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMutexWait shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMutexWait (osMutexId mutex_id, uint32_t millisec) +{ + TickType_t ticks; + portBASE_TYPE taskWoken = pdFALSE; + + + if (mutex_id == NULL) { + return osErrorParameter; + } + + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (inHandlerMode()) { + if (xSemaphoreTakeFromISR(mutex_id, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else if (xSemaphoreTake(mutex_id, ticks) != pdTRUE) { + return osErrorOS; + } + + return osOK; +} + +/** +* @brief Release a Mutex that was obtained by \ref osMutexWait +* @param mutex_id mutex ID obtained by \ref osMutexCreate. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMutexRelease shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMutexRelease (osMutexId mutex_id) +{ + osStatus result = osOK; + portBASE_TYPE taskWoken = pdFALSE; + + if (inHandlerMode()) { + if (xSemaphoreGiveFromISR(mutex_id, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else if (xSemaphoreGive(mutex_id) != pdTRUE) + { + result = osErrorOS; + } + return result; +} + +/** +* @brief Delete a Mutex +* @param mutex_id mutex ID obtained by \ref osMutexCreate. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMutexDelete shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMutexDelete (osMutexId mutex_id) +{ + if (inHandlerMode()) { + return osErrorISR; + } + + vQueueDelete(mutex_id); + + return osOK; +} + +/******************** Semaphore Management Functions **************************/ + +#if (defined (osFeature_Semaphore) && (osFeature_Semaphore != 0)) + +/** +* @brief Create and Initialize a Semaphore object used for managing resources +* @param semaphore_def semaphore definition referenced with \ref osSemaphore. +* @param count number of available resources. +* @retval semaphore ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osSemaphoreCreate shall be consistent in every CMSIS-RTOS. +*/ +osSemaphoreId osSemaphoreCreate (const osSemaphoreDef_t *semaphore_def, int32_t count) +{ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + osSemaphoreId sema; + + if (semaphore_def->controlblock != NULL){ + if (count == 1) { + return xSemaphoreCreateBinaryStatic( semaphore_def->controlblock ); + } + else { +#if (configUSE_COUNTING_SEMAPHORES == 1 ) + return xSemaphoreCreateCountingStatic( count, count, semaphore_def->controlblock ); +#else + return NULL; +#endif + } + } + else { + if (count == 1) { + vSemaphoreCreateBinary(sema); + return sema; + } + else { +#if (configUSE_COUNTING_SEMAPHORES == 1 ) + return xSemaphoreCreateCounting(count, count); +#else + return NULL; +#endif + } + } +#elif ( configSUPPORT_STATIC_ALLOCATION == 1 ) // configSUPPORT_DYNAMIC_ALLOCATION == 0 + if(count == 1) { + return xSemaphoreCreateBinaryStatic( semaphore_def->controlblock ); + } + else + { +#if (configUSE_COUNTING_SEMAPHORES == 1 ) + return xSemaphoreCreateCountingStatic( count, count, semaphore_def->controlblock ); +#else + return NULL; +#endif + } +#else // configSUPPORT_STATIC_ALLOCATION == 0 && configSUPPORT_DYNAMIC_ALLOCATION == 1 + osSemaphoreId sema; + + if (count == 1) { + vSemaphoreCreateBinary(sema); + return sema; + } + else { +#if (configUSE_COUNTING_SEMAPHORES == 1 ) + return xSemaphoreCreateCounting(count, count); +#else + return NULL; +#endif + } +#endif +} + +/** +* @brief Wait until a Semaphore token becomes available +* @param semaphore_id semaphore object referenced with \ref osSemaphore. +* @param millisec timeout value or 0 in case of no time-out. +* @retval number of available tokens, or -1 in case of incorrect parameters. +* @note MUST REMAIN UNCHANGED: \b osSemaphoreWait shall be consistent in every CMSIS-RTOS. +*/ +int32_t osSemaphoreWait (osSemaphoreId semaphore_id, uint32_t millisec) +{ + TickType_t ticks; + portBASE_TYPE taskWoken = pdFALSE; + + + if (semaphore_id == NULL) { + return osErrorParameter; + } + + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (inHandlerMode()) { + if (xSemaphoreTakeFromISR(semaphore_id, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else if (xSemaphoreTake(semaphore_id, ticks) != pdTRUE) { + return osErrorOS; + } + + return osOK; +} + +/** +* @brief Release a Semaphore token +* @param semaphore_id semaphore object referenced with \ref osSemaphore. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osSemaphoreRelease shall be consistent in every CMSIS-RTOS. +*/ +osStatus osSemaphoreRelease (osSemaphoreId semaphore_id) +{ + osStatus result = osOK; + portBASE_TYPE taskWoken = pdFALSE; + + + if (inHandlerMode()) { + if (xSemaphoreGiveFromISR(semaphore_id, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xSemaphoreGive(semaphore_id) != pdTRUE) { + result = osErrorOS; + } + } + + return result; +} + +/** +* @brief Delete a Semaphore +* @param semaphore_id semaphore object referenced with \ref osSemaphore. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osSemaphoreDelete shall be consistent in every CMSIS-RTOS. +*/ +osStatus osSemaphoreDelete (osSemaphoreId semaphore_id) +{ + if (inHandlerMode()) { + return osErrorISR; + } + + vSemaphoreDelete(semaphore_id); + + return osOK; +} + +#endif /* Use Semaphores */ + +/******************* Memory Pool Management Functions ***********************/ + +#if (defined (osFeature_Pool) && (osFeature_Pool != 0)) + +//TODO +//This is a primitive and inefficient wrapper around the existing FreeRTOS memory management. +//A better implementation will have to modify heap_x.c! + + +typedef struct os_pool_cb { + void *pool; + uint8_t *markers; + uint32_t pool_sz; + uint32_t item_sz; + uint32_t currentIndex; +} os_pool_cb_t; + + +/** +* @brief Create and Initialize a memory pool +* @param pool_def memory pool definition referenced with \ref osPool. +* @retval memory pool ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osPoolCreate shall be consistent in every CMSIS-RTOS. +*/ +osPoolId osPoolCreate (const osPoolDef_t *pool_def) +{ +#if (configSUPPORT_DYNAMIC_ALLOCATION == 1) + osPoolId thePool; + int itemSize = 4 * ((pool_def->item_sz + 3) / 4); + uint32_t i; + + /* First have to allocate memory for the pool control block. */ + thePool = pvPortMalloc(sizeof(os_pool_cb_t)); + + + if (thePool) { + thePool->pool_sz = pool_def->pool_sz; + thePool->item_sz = itemSize; + thePool->currentIndex = 0; + + /* Memory for markers */ + thePool->markers = pvPortMalloc(pool_def->pool_sz); + + if (thePool->markers) { + /* Now allocate the pool itself. */ + thePool->pool = pvPortMalloc(pool_def->pool_sz * itemSize); + + if (thePool->pool) { + for (i = 0; i < pool_def->pool_sz; i++) { + thePool->markers[i] = 0; + } + } + else { + vPortFree(thePool->markers); + vPortFree(thePool); + thePool = NULL; + } + } + else { + vPortFree(thePool); + thePool = NULL; + } + } + + return thePool; + +#else + return NULL; +#endif +} + +/** +* @brief Allocate a memory block from a memory pool +* @param pool_id memory pool ID obtain referenced with \ref osPoolCreate. +* @retval address of the allocated memory block or NULL in case of no memory available. +* @note MUST REMAIN UNCHANGED: \b osPoolAlloc shall be consistent in every CMSIS-RTOS. +*/ +void *osPoolAlloc (osPoolId pool_id) +{ + int dummy = 0; + void *p = NULL; + uint32_t i; + uint32_t index; + + if (inHandlerMode()) { + dummy = portSET_INTERRUPT_MASK_FROM_ISR(); + } + else { + vPortEnterCritical(); + } + + for (i = 0; i < pool_id->pool_sz; i++) { + index = (pool_id->currentIndex + i) % pool_id->pool_sz; + + if (pool_id->markers[index] == 0) { + pool_id->markers[index] = 1; + p = (void *)((uint32_t)(pool_id->pool) + (index * pool_id->item_sz)); + pool_id->currentIndex = index; + break; + } + } + + if (inHandlerMode()) { + portCLEAR_INTERRUPT_MASK_FROM_ISR(dummy); + } + else { + vPortExitCritical(); + } + + return p; +} + +/** +* @brief Allocate a memory block from a memory pool and set memory block to zero +* @param pool_id memory pool ID obtain referenced with \ref osPoolCreate. +* @retval address of the allocated memory block or NULL in case of no memory available. +* @note MUST REMAIN UNCHANGED: \b osPoolCAlloc shall be consistent in every CMSIS-RTOS. +*/ +void *osPoolCAlloc (osPoolId pool_id) +{ + void *p = osPoolAlloc(pool_id); + + if (p != NULL) + { + memset(p, 0, sizeof(pool_id->pool_sz)); + } + + return p; +} + +/** +* @brief Return an allocated memory block back to a specific memory pool +* @param pool_id memory pool ID obtain referenced with \ref osPoolCreate. +* @param block address of the allocated memory block that is returned to the memory pool. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osPoolFree shall be consistent in every CMSIS-RTOS. +*/ +osStatus osPoolFree (osPoolId pool_id, void *block) +{ + uint32_t index; + + if (pool_id == NULL) { + return osErrorParameter; + } + + if (block == NULL) { + return osErrorParameter; + } + + if (block < pool_id->pool) { + return osErrorParameter; + } + + index = (uint32_t)block - (uint32_t)(pool_id->pool); + if (index % pool_id->item_sz) { + return osErrorParameter; + } + index = index / pool_id->item_sz; + if (index >= pool_id->pool_sz) { + return osErrorParameter; + } + + pool_id->markers[index] = 0; + + return osOK; +} + + +#endif /* Use Memory Pool Management */ + +/******************* Message Queue Management Functions *********************/ + +#if (defined (osFeature_MessageQ) && (osFeature_MessageQ != 0)) /* Use Message Queues */ + +/** +* @brief Create and Initialize a Message Queue +* @param queue_def queue definition referenced with \ref osMessageQ. +* @param thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +* @retval message queue ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osMessageCreate shall be consistent in every CMSIS-RTOS. +*/ +osMessageQId osMessageCreate (const osMessageQDef_t *queue_def, osThreadId thread_id) +{ + (void) thread_id; + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + if ((queue_def->buffer != NULL) && (queue_def->controlblock != NULL)) { + return xQueueCreateStatic(queue_def->queue_sz, queue_def->item_sz, queue_def->buffer, queue_def->controlblock); + } + else { + return xQueueCreate(queue_def->queue_sz, queue_def->item_sz); + } +#elif ( configSUPPORT_STATIC_ALLOCATION == 1 ) + return xQueueCreateStatic(queue_def->queue_sz, queue_def->item_sz, queue_def->buffer, queue_def->controlblock); +#else + return xQueueCreate(queue_def->queue_sz, queue_def->item_sz); +#endif +} + +/** +* @brief Put a Message to a Queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @param info message information. +* @param millisec timeout value or 0 in case of no time-out. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMessagePut shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec) +{ + portBASE_TYPE taskWoken = pdFALSE; + TickType_t ticks; + + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + + if (inHandlerMode()) { + if (xQueueSendFromISR(queue_id, &info, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xQueueSend(queue_id, &info, ticks) != pdTRUE) { + return osErrorOS; + } + } + + return osOK; +} + +/** +* @brief Get a Message or Wait for a Message from a Queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval event information that includes status code. +* @note MUST REMAIN UNCHANGED: \b osMessageGet shall be consistent in every CMSIS-RTOS. +*/ +osEvent osMessageGet (osMessageQId queue_id, uint32_t millisec) +{ + portBASE_TYPE taskWoken; + TickType_t ticks; + osEvent event; + + event.def.message_id = queue_id; + event.value.v = 0; + + if (queue_id == NULL) { + event.status = osErrorParameter; + return event; + } + + taskWoken = pdFALSE; + + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (inHandlerMode()) { + if (xQueueReceiveFromISR(queue_id, &event.value.v, &taskWoken) == pdTRUE) { + /* We have mail */ + event.status = osEventMessage; + } + else { + event.status = osOK; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xQueueReceive(queue_id, &event.value.v, ticks) == pdTRUE) { + /* We have mail */ + event.status = osEventMessage; + } + else { + event.status = (ticks == 0) ? osOK : osEventTimeout; + } + } + + return event; +} + +#endif /* Use Message Queues */ + +/******************** Mail Queue Management Functions ***********************/ +#if (defined (osFeature_MailQ) && (osFeature_MailQ != 0)) /* Use Mail Queues */ + + +typedef struct os_mailQ_cb { + const osMailQDef_t *queue_def; + QueueHandle_t handle; + osPoolId pool; +} os_mailQ_cb_t; + +/** +* @brief Create and Initialize mail queue +* @param queue_def reference to the mail queue definition obtain with \ref osMailQ +* @param thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +* @retval mail queue ID for reference by other functions or NULL in case of error. +* @note MUST REMAIN UNCHANGED: \b osMailCreate shall be consistent in every CMSIS-RTOS. +*/ +osMailQId osMailCreate (const osMailQDef_t *queue_def, osThreadId thread_id) +{ +#if (configSUPPORT_DYNAMIC_ALLOCATION == 1) + (void) thread_id; + + osPoolDef_t pool_def = {queue_def->queue_sz, queue_def->item_sz, NULL}; + + /* Create a mail queue control block */ + + *(queue_def->cb) = pvPortMalloc(sizeof(struct os_mailQ_cb)); + + if (*(queue_def->cb) == NULL) { + return NULL; + } + (*(queue_def->cb))->queue_def = queue_def; + + /* Create a queue in FreeRTOS */ + (*(queue_def->cb))->handle = xQueueCreate(queue_def->queue_sz, sizeof(void *)); + + + if ((*(queue_def->cb))->handle == NULL) { + vPortFree(*(queue_def->cb)); + return NULL; + } + + /* Create a mail pool */ + (*(queue_def->cb))->pool = osPoolCreate(&pool_def); + if ((*(queue_def->cb))->pool == NULL) { + //TODO: Delete queue. How to do it in FreeRTOS? + vPortFree(*(queue_def->cb)); + return NULL; + } + + return *(queue_def->cb); +#else + return NULL; +#endif +} + +/** +* @brief Allocate a memory block from a mail +* @param queue_id mail queue ID obtained with \ref osMailCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval pointer to memory block that can be filled with mail or NULL in case error. +* @note MUST REMAIN UNCHANGED: \b osMailAlloc shall be consistent in every CMSIS-RTOS. +*/ +void *osMailAlloc (osMailQId queue_id, uint32_t millisec) +{ + (void) millisec; + void *p; + + + if (queue_id == NULL) { + return NULL; + } + + p = osPoolAlloc(queue_id->pool); + + return p; +} + +/** +* @brief Allocate a memory block from a mail and set memory block to zero +* @param queue_id mail queue ID obtained with \ref osMailCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval pointer to memory block that can be filled with mail or NULL in case error. +* @note MUST REMAIN UNCHANGED: \b osMailCAlloc shall be consistent in every CMSIS-RTOS. +*/ +void *osMailCAlloc (osMailQId queue_id, uint32_t millisec) +{ + uint32_t i; + void *p = osMailAlloc(queue_id, millisec); + + if (p) { + for (i = 0; i < queue_id->queue_def->item_sz; i++) { + ((uint8_t *)p)[i] = 0; + } + } + + return p; +} + +/** +* @brief Put a mail to a queue +* @param queue_id mail queue ID obtained with \ref osMailCreate. +* @param mail memory block previously allocated with \ref osMailAlloc or \ref osMailCAlloc. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMailPut shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMailPut (osMailQId queue_id, void *mail) +{ + portBASE_TYPE taskWoken; + + + if (queue_id == NULL) { + return osErrorParameter; + } + + taskWoken = pdFALSE; + + if (inHandlerMode()) { + if (xQueueSendFromISR(queue_id->handle, &mail, &taskWoken) != pdTRUE) { + return osErrorOS; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xQueueSend(queue_id->handle, &mail, 0) != pdTRUE) { + return osErrorOS; + } + } + + return osOK; +} + +/** +* @brief Get a mail from a queue +* @param queue_id mail queue ID obtained with \ref osMailCreate. +* @param millisec timeout value or 0 in case of no time-out +* @retval event that contains mail information or error code. +* @note MUST REMAIN UNCHANGED: \b osMailGet shall be consistent in every CMSIS-RTOS. +*/ +osEvent osMailGet (osMailQId queue_id, uint32_t millisec) +{ + portBASE_TYPE taskWoken; + TickType_t ticks; + osEvent event; + + event.def.mail_id = queue_id; + + if (queue_id == NULL) { + event.status = osErrorParameter; + return event; + } + + taskWoken = pdFALSE; + + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (inHandlerMode()) { + if (xQueueReceiveFromISR(queue_id->handle, &event.value.p, &taskWoken) == pdTRUE) { + /* We have mail */ + event.status = osEventMail; + } + else { + event.status = osOK; + } + portEND_SWITCHING_ISR(taskWoken); + } + else { + if (xQueueReceive(queue_id->handle, &event.value.p, ticks) == pdTRUE) { + /* We have mail */ + event.status = osEventMail; + } + else { + event.status = (ticks == 0) ? osOK : osEventTimeout; + } + } + + return event; +} + +/** +* @brief Free a memory block from a mail +* @param queue_id mail queue ID obtained with \ref osMailCreate. +* @param mail pointer to the memory block that was obtained with \ref osMailGet. +* @retval status code that indicates the execution status of the function. +* @note MUST REMAIN UNCHANGED: \b osMailFree shall be consistent in every CMSIS-RTOS. +*/ +osStatus osMailFree (osMailQId queue_id, void *mail) +{ + if (queue_id == NULL) { + return osErrorParameter; + } + + return osPoolFree(queue_id->pool, mail); +} +#endif /* Use Mail Queues */ + +/*************************** Additional specific APIs to Free RTOS ************/ +/** +* @brief Handles the tick increment +* @param none. +* @retval none. +*/ +void osSystickHandler(void) +{ + +#if (INCLUDE_xTaskGetSchedulerState == 1 ) + if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) + { +#endif /* INCLUDE_xTaskGetSchedulerState */ + xPortSysTickHandler(); +#if (INCLUDE_xTaskGetSchedulerState == 1 ) + } +#endif /* INCLUDE_xTaskGetSchedulerState */ +} + +#if ( INCLUDE_eTaskGetState == 1 ) +/** +* @brief Obtain the state of any thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval the stae of the thread, states are encoded by the osThreadState enumerated type. +*/ +osThreadState osThreadGetState(osThreadId thread_id) +{ + eTaskState ThreadState; + osThreadState result; + + ThreadState = eTaskGetState(thread_id); + + switch (ThreadState) + { + case eRunning : + result = osThreadRunning; + break; + case eReady : + result = osThreadReady; + break; + case eBlocked : + result = osThreadBlocked; + break; + case eSuspended : + result = osThreadSuspended; + break; + case eDeleted : + result = osThreadDeleted; + break; + default: + result = osThreadError; + } + + return result; +} +#endif /* INCLUDE_eTaskGetState */ + +#if (INCLUDE_eTaskGetState == 1) +/** +* @brief Check if a thread is already suspended or not. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadIsSuspended(osThreadId thread_id) +{ + if (eTaskGetState(thread_id) == eSuspended) + return osOK; + else + return osErrorOS; +} +#endif /* INCLUDE_eTaskGetState */ +/** +* @brief Suspend execution of a thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadSuspend (osThreadId thread_id) +{ +#if (INCLUDE_vTaskSuspend == 1) + vTaskSuspend(thread_id); + + return osOK; +#else + return osErrorResource; +#endif +} + +/** +* @brief Resume execution of a suspended thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadResume (osThreadId thread_id) +{ +#if (INCLUDE_vTaskSuspend == 1) + if(inHandlerMode()) + { + if (xTaskResumeFromISR(thread_id) == pdTRUE) + { + portYIELD_FROM_ISR(pdTRUE); + } + } + else + { + vTaskResume(thread_id); + } + return osOK; +#else + return osErrorResource; +#endif +} + +/** +* @brief Suspend execution of a all active threads. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadSuspendAll (void) +{ + vTaskSuspendAll(); + + return osOK; +} + +/** +* @brief Resume execution of a all suspended threads. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadResumeAll (void) +{ + if (xTaskResumeAll() == pdTRUE) + return osOK; + else + return osErrorOS; + +} + +/** +* @brief Delay a task until a specified time +* @param PreviousWakeTime Pointer to a variable that holds the time at which the +* task was last unblocked. PreviousWakeTime must be initialised with the current time +* prior to its first use (PreviousWakeTime = osKernelSysTick() ) +* @param millisec time delay value +* @retval status code that indicates the execution status of the function. +*/ +osStatus osDelayUntil (uint32_t *PreviousWakeTime, uint32_t millisec) +{ +#if INCLUDE_vTaskDelayUntil + TickType_t ticks = (millisec / portTICK_PERIOD_MS); + vTaskDelayUntil((TickType_t *) PreviousWakeTime, ticks ? ticks : 1); + + return osOK; +#else + (void) millisec; + (void) PreviousWakeTime; + + return osErrorResource; +#endif +} + +/** +* @brief Abort the delay for a specific thread +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId +* @retval status code that indicates the execution status of the function. +*/ +osStatus osAbortDelay(osThreadId thread_id) +{ +#if INCLUDE_xTaskAbortDelay + + xTaskAbortDelay(thread_id); + + return osOK; +#else + (void) thread_id; + + return osErrorResource; +#endif +} + +/** +* @brief Lists all the current threads, along with their current state +* and stack usage high water mark. +* @param buffer A buffer into which the above mentioned details +* will be written +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadList (uint8_t *buffer) +{ +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) ) + vTaskList((char *)buffer); +#endif + return osOK; +} + +/** +* @brief Receive an item from a queue without removing the item from the queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval event information that includes status code. +*/ +osEvent osMessagePeek (osMessageQId queue_id, uint32_t millisec) +{ + TickType_t ticks; + osEvent event; + + event.def.message_id = queue_id; + + if (queue_id == NULL) { + event.status = osErrorParameter; + return event; + } + + ticks = 0; + if (millisec == osWaitForever) { + ticks = portMAX_DELAY; + } + else if (millisec != 0) { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) { + ticks = 1; + } + } + + if (xQueuePeek(queue_id, &event.value.v, ticks) == pdTRUE) + { + /* We have mail */ + event.status = osEventMessage; + } + else + { + event.status = (ticks == 0) ? osOK : osEventTimeout; + } + + return event; +} + +/** +* @brief Get the number of messaged stored in a queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval number of messages stored in a queue. +*/ +uint32_t osMessageWaiting(osMessageQId queue_id) +{ + if (inHandlerMode()) { + return uxQueueMessagesWaitingFromISR(queue_id); + } + else + { + return uxQueueMessagesWaiting(queue_id); + } +} + +/** +* @brief Get the available space in a message queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval available space in a message queue. +*/ +uint32_t osMessageAvailableSpace(osMessageQId queue_id) +{ + return uxQueueSpacesAvailable(queue_id); +} + +/** +* @brief Delete a Message Queue +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osMessageDelete (osMessageQId queue_id) +{ + if (inHandlerMode()) { + return osErrorISR; + } + + vQueueDelete(queue_id); + + return osOK; +} + +/** +* @brief Create and Initialize a Recursive Mutex +* @param mutex_def mutex definition referenced with \ref osMutex. +* @retval mutex ID for reference by other functions or NULL in case of error.. +*/ +osMutexId osRecursiveMutexCreate (const osMutexDef_t *mutex_def) +{ +#if (configUSE_RECURSIVE_MUTEXES == 1) +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + if (mutex_def->controlblock != NULL){ + return xSemaphoreCreateRecursiveMutexStatic( mutex_def->controlblock ); + } + else { + return xSemaphoreCreateRecursiveMutex(); + } +#elif ( configSUPPORT_STATIC_ALLOCATION == 1 ) + return xSemaphoreCreateRecursiveMutexStatic( mutex_def->controlblock ); +#else + return xSemaphoreCreateRecursiveMutex(); +#endif +#else + return NULL; +#endif +} + +/** +* @brief Release a Recursive Mutex +* @param mutex_id mutex ID obtained by \ref osRecursiveMutexCreate. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osRecursiveMutexRelease (osMutexId mutex_id) +{ +#if (configUSE_RECURSIVE_MUTEXES == 1) + osStatus result = osOK; + + if (xSemaphoreGiveRecursive(mutex_id) != pdTRUE) + { + result = osErrorOS; + } + return result; +#else + return osErrorResource; +#endif +} + +/** +* @brief Release a Recursive Mutex +* @param mutex_id mutex ID obtained by \ref osRecursiveMutexCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osRecursiveMutexWait (osMutexId mutex_id, uint32_t millisec) +{ +#if (configUSE_RECURSIVE_MUTEXES == 1) + TickType_t ticks; + + if (mutex_id == NULL) + { + return osErrorParameter; + } + + ticks = 0; + if (millisec == osWaitForever) + { + ticks = portMAX_DELAY; + } + else if (millisec != 0) + { + ticks = millisec / portTICK_PERIOD_MS; + if (ticks == 0) + { + ticks = 1; + } + } + + if (xSemaphoreTakeRecursive(mutex_id, ticks) != pdTRUE) + { + return osErrorOS; + } + return osOK; +#else + return osErrorResource; +#endif +} + +/** +* @brief Returns the current count value of a counting semaphore +* @param semaphore_id semaphore_id ID obtained by \ref osSemaphoreCreate. +* @retval count value +*/ +uint32_t osSemaphoreGetCount(osSemaphoreId semaphore_id) +{ + return uxSemaphoreGetCount(semaphore_id); +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.h new file mode 100644 index 000000000..f53a132a1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS/cmsis_os.h @@ -0,0 +1,1026 @@ +/* ---------------------------------------------------------------------- + * $Date: 5. February 2013 + * $Revision: V1.02 + * + * Project: CMSIS-RTOS API + * Title: cmsis_os.h header file + * + * Version 0.02 + * Initial Proposal Phase + * Version 0.03 + * osKernelStart added, optional feature: main started as thread + * osSemaphores have standard behavior + * osTimerCreate does not start the timer, added osTimerStart + * osThreadPass is renamed to osThreadYield + * Version 1.01 + * Support for C++ interface + * - const attribute removed from the osXxxxDef_t typedef's + * - const attribute added to the osXxxxDef macros + * Added: osTimerDelete, osMutexDelete, osSemaphoreDelete + * Added: osKernelInitialize + * Version 1.02 + * Control functions for short timeouts in microsecond resolution: + * Added: osKernelSysTick, osKernelSysTickFrequency, osKernelSysTickMicroSec + * Removed: osSignalGet + * + * + *---------------------------------------------------------------------------- + * + * Portions Copyright © 2016 STMicroelectronics International N.V. All rights reserved. + * Portions Copyright (c) 2013 ARM LIMITED + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * - Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *---------------------------------------------------------------------------*/ + +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "queue.h" +#include "semphr.h" +#include "event_groups.h" + +/** +\page cmsis_os_h Header File Template: cmsis_os.h + +The file \b cmsis_os.h is a template header file for a CMSIS-RTOS compliant Real-Time Operating System (RTOS). +Each RTOS that is compliant with CMSIS-RTOS shall provide a specific \b cmsis_os.h header file that represents +its implementation. + +The file cmsis_os.h contains: + - CMSIS-RTOS API function definitions + - struct definitions for parameters and return types + - status and priority values used by CMSIS-RTOS API functions + - macros for defining threads and other kernel objects + + +Name conventions and header file modifications + +All definitions are prefixed with \b os to give an unique name space for CMSIS-RTOS functions. +Definitions that are prefixed \b os_ are not used in the application code but local to this header file. +All definitions and functions that belong to a module are grouped and have a common prefix, i.e. \b osThread. + +Definitions that are marked with CAN BE CHANGED can be adapted towards the needs of the actual CMSIS-RTOS implementation. +These definitions can be specific to the underlying RTOS kernel. + +Definitions that are marked with MUST REMAIN UNCHANGED cannot be altered. Otherwise the CMSIS-RTOS implementation is no longer +compliant to the standard. Note that some functions are optional and need not to be provided by every CMSIS-RTOS implementation. + + +Function calls from interrupt service routines + +The following CMSIS-RTOS functions can be called from threads and interrupt service routines (ISR): + - \ref osSignalSet + - \ref osSemaphoreRelease + - \ref osPoolAlloc, \ref osPoolCAlloc, \ref osPoolFree + - \ref osMessagePut, \ref osMessageGet + - \ref osMailAlloc, \ref osMailCAlloc, \ref osMailGet, \ref osMailPut, \ref osMailFree + +Functions that cannot be called from an ISR are verifying the interrupt status and return in case that they are called +from an ISR context the status code \b osErrorISR. In some implementations this condition might be caught using the HARD FAULT vector. + +Some CMSIS-RTOS implementations support CMSIS-RTOS function calls from multiple ISR at the same time. +If this is impossible, the CMSIS-RTOS rejects calls by nested ISR functions with the status code \b osErrorISRRecursive. + + +Define and reference object definitions + +With \#define osObjectsExternal objects are defined as external symbols. This allows to create a consistent header file +that is used throughout a project as shown below: + +Header File +\code +#include // CMSIS RTOS header file + +// Thread definition +extern void thread_sample (void const *argument); // function prototype +osThreadDef (thread_sample, osPriorityBelowNormal, 1, 100); + +// Pool definition +osPoolDef(MyPool, 10, long); +\endcode + + +This header file defines all objects when included in a C/C++ source file. When \#define osObjectsExternal is +present before the header file, the objects are defined as external symbols. A single consistent header file can therefore be +used throughout the whole project. + +Example +\code +#include "osObjects.h" // Definition of the CMSIS-RTOS objects +\endcode + +\code +#define osObjectExternal // Objects will be defined as external symbols +#include "osObjects.h" // Reference to the CMSIS-RTOS objects +\endcode + +*/ + +#ifndef _CMSIS_OS_H +#define _CMSIS_OS_H + +/// \note MUST REMAIN UNCHANGED: \b osCMSIS identifies the CMSIS-RTOS API version. +#define osCMSIS 0x10002 ///< API version (main [31:16] .sub [15:0]) + +/// \note CAN BE CHANGED: \b osCMSIS_KERNEL identifies the underlying RTOS kernel and version number. +#define osCMSIS_KERNEL 0x10000 ///< RTOS identification and version (main [31:16] .sub [15:0]) + +/// \note MUST REMAIN UNCHANGED: \b osKernelSystemId shall be consistent in every CMSIS-RTOS. +#define osKernelSystemId "KERNEL V1.00" ///< RTOS identification string + +/// \note MUST REMAIN UNCHANGED: \b osFeature_xxx shall be consistent in every CMSIS-RTOS. +#define osFeature_MainThread 1 ///< main thread 1=main can be thread, 0=not available +#define osFeature_Pool 1 ///< Memory Pools: 1=available, 0=not available +#define osFeature_MailQ 1 ///< Mail Queues: 1=available, 0=not available +#define osFeature_MessageQ 1 ///< Message Queues: 1=available, 0=not available +#define osFeature_Signals 8 ///< maximum number of Signal Flags available per thread +#define osFeature_Semaphore 1 ///< osFeature_Semaphore function: 1=available, 0=not available +#define osFeature_Wait 0 ///< osWait function: 1=available, 0=not available +#define osFeature_SysTick 1 ///< osKernelSysTick functions: 1=available, 0=not available + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// ==== Enumeration, structures, defines ==== + +/// Priority used for thread control. +/// \note MUST REMAIN UNCHANGED: \b osPriority shall be consistent in every CMSIS-RTOS. +typedef enum { + osPriorityIdle = -3, ///< priority: idle (lowest) + osPriorityLow = -2, ///< priority: low + osPriorityBelowNormal = -1, ///< priority: below normal + osPriorityNormal = 0, ///< priority: normal (default) + osPriorityAboveNormal = +1, ///< priority: above normal + osPriorityHigh = +2, ///< priority: high + osPriorityRealtime = +3, ///< priority: realtime (highest) + osPriorityError = 0x84 ///< system cannot determine priority or thread has illegal priority +} osPriority; + +/// Timeout value. +/// \note MUST REMAIN UNCHANGED: \b osWaitForever shall be consistent in every CMSIS-RTOS. +#define osWaitForever 0xFFFFFFFF ///< wait forever timeout value + +/// Status code values returned by CMSIS-RTOS functions. +/// \note MUST REMAIN UNCHANGED: \b osStatus shall be consistent in every CMSIS-RTOS. +typedef enum { + osOK = 0, ///< function completed; no error or event occurred. + osEventSignal = 0x08, ///< function completed; signal event occurred. + osEventMessage = 0x10, ///< function completed; message event occurred. + osEventMail = 0x20, ///< function completed; mail event occurred. + osEventTimeout = 0x40, ///< function completed; timeout occurred. + osErrorParameter = 0x80, ///< parameter error: a mandatory parameter was missing or specified an incorrect object. + osErrorResource = 0x81, ///< resource not available: a specified resource was not available. + osErrorTimeoutResource = 0xC1, ///< resource not available within given time: a specified resource was not available within the timeout period. + osErrorISR = 0x82, ///< not allowed in ISR context: the function cannot be called from interrupt service routines. + osErrorISRRecursive = 0x83, ///< function called multiple times from ISR with same object. + osErrorPriority = 0x84, ///< system cannot determine priority or thread has illegal priority. + osErrorNoMemory = 0x85, ///< system is out of memory: it was impossible to allocate or reserve memory for the operation. + osErrorValue = 0x86, ///< value of a parameter is out of range. + osErrorOS = 0xFF, ///< unspecified RTOS error: run-time error but no other error message fits. + os_status_reserved = 0x7FFFFFFF ///< prevent from enum down-size compiler optimization. +} osStatus; + +#if ( INCLUDE_eTaskGetState == 1 ) +/* Thread state returned by osThreadGetState */ +typedef enum { + osThreadRunning = 0x0, /* A thread is querying the state of itself, so must be running. */ + osThreadReady = 0x1 , /* The thread being queried is in a read or pending ready list. */ + osThreadBlocked = 0x2, /* The thread being queried is in the Blocked state. */ + osThreadSuspended = 0x3, /* The thread being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + osThreadDeleted = 0x4, /* The thread being queried has been deleted, but its TCB has not yet been freed. */ + osThreadError = 0x7FFFFFFF +} osThreadState; +#endif /* INCLUDE_eTaskGetState */ + +/// Timer type value for the timer definition. +/// \note MUST REMAIN UNCHANGED: \b os_timer_type shall be consistent in every CMSIS-RTOS. +typedef enum { + osTimerOnce = 0, ///< one-shot timer + osTimerPeriodic = 1 ///< repeating timer +} os_timer_type; + +/// Entry point of a thread. +/// \note MUST REMAIN UNCHANGED: \b os_pthread shall be consistent in every CMSIS-RTOS. +typedef void (*os_pthread) (void const *argument); + +/// Entry point of a timer call back function. +/// \note MUST REMAIN UNCHANGED: \b os_ptimer shall be consistent in every CMSIS-RTOS. +typedef void (*os_ptimer) (void const *argument); + +// >>> the following data type definitions may shall adapted towards a specific RTOS + +/// Thread ID identifies the thread (pointer to a thread control block). +/// \note CAN BE CHANGED: \b os_thread_cb is implementation specific in every CMSIS-RTOS. +typedef TaskHandle_t osThreadId; + +/// Timer ID identifies the timer (pointer to a timer control block). +/// \note CAN BE CHANGED: \b os_timer_cb is implementation specific in every CMSIS-RTOS. +typedef TimerHandle_t osTimerId; + +/// Mutex ID identifies the mutex (pointer to a mutex control block). +/// \note CAN BE CHANGED: \b os_mutex_cb is implementation specific in every CMSIS-RTOS. +typedef SemaphoreHandle_t osMutexId; + +/// Semaphore ID identifies the semaphore (pointer to a semaphore control block). +/// \note CAN BE CHANGED: \b os_semaphore_cb is implementation specific in every CMSIS-RTOS. +typedef SemaphoreHandle_t osSemaphoreId; + +/// Pool ID identifies the memory pool (pointer to a memory pool control block). +/// \note CAN BE CHANGED: \b os_pool_cb is implementation specific in every CMSIS-RTOS. +typedef struct os_pool_cb *osPoolId; + +/// Message ID identifies the message queue (pointer to a message queue control block). +/// \note CAN BE CHANGED: \b os_messageQ_cb is implementation specific in every CMSIS-RTOS. +typedef QueueHandle_t osMessageQId; + +/// Mail ID identifies the mail queue (pointer to a mail queue control block). +/// \note CAN BE CHANGED: \b os_mailQ_cb is implementation specific in every CMSIS-RTOS. +typedef struct os_mailQ_cb *osMailQId; + + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + +typedef StaticTask_t osStaticThreadDef_t; +typedef StaticTimer_t osStaticTimerDef_t; +typedef StaticSemaphore_t osStaticMutexDef_t; +typedef StaticSemaphore_t osStaticSemaphoreDef_t; +typedef StaticQueue_t osStaticMessageQDef_t; + +#endif + + + + +/// Thread Definition structure contains startup information of a thread. +/// \note CAN BE CHANGED: \b os_thread_def is implementation specific in every CMSIS-RTOS. +typedef struct os_thread_def { + char *name; ///< Thread name + os_pthread pthread; ///< start address of thread function + osPriority tpriority; ///< initial thread priority + uint32_t instances; ///< maximum number of instances of that thread function + uint32_t stacksize; ///< stack size requirements in bytes; 0 is default stack size +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + uint32_t *buffer; ///< stack buffer for static allocation; NULL for dynamic allocation + osStaticThreadDef_t *controlblock; ///< control block to hold thread's data for static allocation; NULL for dynamic allocation +#endif +} osThreadDef_t; + +/// Timer Definition structure contains timer parameters. +/// \note CAN BE CHANGED: \b os_timer_def is implementation specific in every CMSIS-RTOS. +typedef struct os_timer_def { + os_ptimer ptimer; ///< start address of a timer function +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + osStaticTimerDef_t *controlblock; ///< control block to hold timer's data for static allocation; NULL for dynamic allocation +#endif +} osTimerDef_t; + +/// Mutex Definition structure contains setup information for a mutex. +/// \note CAN BE CHANGED: \b os_mutex_def is implementation specific in every CMSIS-RTOS. +typedef struct os_mutex_def { + uint32_t dummy; ///< dummy value. +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + osStaticMutexDef_t *controlblock; ///< control block for static allocation; NULL for dynamic allocation +#endif +} osMutexDef_t; + +/// Semaphore Definition structure contains setup information for a semaphore. +/// \note CAN BE CHANGED: \b os_semaphore_def is implementation specific in every CMSIS-RTOS. +typedef struct os_semaphore_def { + uint32_t dummy; ///< dummy value. +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + osStaticSemaphoreDef_t *controlblock; ///< control block for static allocation; NULL for dynamic allocation +#endif +} osSemaphoreDef_t; + +/// Definition structure for memory block allocation. +/// \note CAN BE CHANGED: \b os_pool_def is implementation specific in every CMSIS-RTOS. +typedef struct os_pool_def { + uint32_t pool_sz; ///< number of items (elements) in the pool + uint32_t item_sz; ///< size of an item + void *pool; ///< pointer to memory for pool +} osPoolDef_t; + +/// Definition structure for message queue. +/// \note CAN BE CHANGED: \b os_messageQ_def is implementation specific in every CMSIS-RTOS. +typedef struct os_messageQ_def { + uint32_t queue_sz; ///< number of elements in the queue + uint32_t item_sz; ///< size of an item +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + uint8_t *buffer; ///< buffer for static allocation; NULL for dynamic allocation + osStaticMessageQDef_t *controlblock; ///< control block to hold queue's data for static allocation; NULL for dynamic allocation +#endif + //void *pool; ///< memory array for messages +} osMessageQDef_t; + +/// Definition structure for mail queue. +/// \note CAN BE CHANGED: \b os_mailQ_def is implementation specific in every CMSIS-RTOS. +typedef struct os_mailQ_def { + uint32_t queue_sz; ///< number of elements in the queue + uint32_t item_sz; ///< size of an item + struct os_mailQ_cb **cb; +} osMailQDef_t; + +/// Event structure contains detailed information about an event. +/// \note MUST REMAIN UNCHANGED: \b os_event shall be consistent in every CMSIS-RTOS. +/// However the struct may be extended at the end. +typedef struct { + osStatus status; ///< status code: event or error information + union { + uint32_t v; ///< message as 32-bit value + void *p; ///< message or mail as void pointer + int32_t signals; ///< signal flags + } value; ///< event value + union { + osMailQId mail_id; ///< mail id obtained by \ref osMailCreate + osMessageQId message_id; ///< message id obtained by \ref osMessageCreate + } def; ///< event definition +} osEvent; + + +// ==== Kernel Control Functions ==== + +/// Initialize the RTOS Kernel for creating objects. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osKernelInitialize shall be consistent in every CMSIS-RTOS. +osStatus osKernelInitialize (void); + +/// Start the RTOS Kernel. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osKernelStart shall be consistent in every CMSIS-RTOS. +osStatus osKernelStart (void); + +/// Check if the RTOS kernel is already started. +/// \note MUST REMAIN UNCHANGED: \b osKernelRunning shall be consistent in every CMSIS-RTOS. +/// \return 0 RTOS is not started, 1 RTOS is started. +int32_t osKernelRunning(void); + +#if (defined (osFeature_SysTick) && (osFeature_SysTick != 0)) // System Timer available + +/// Get the RTOS kernel system timer counter +/// \note MUST REMAIN UNCHANGED: \b osKernelSysTick shall be consistent in every CMSIS-RTOS. +/// \return RTOS kernel system timer as 32-bit value +uint32_t osKernelSysTick (void); + +/// The RTOS kernel system timer frequency in Hz +/// \note Reflects the system timer setting and is typically defined in a configuration file. +#define osKernelSysTickFrequency (configTICK_RATE_HZ) + +/// Convert a microseconds value to a RTOS kernel system timer value. +/// \param microsec time value in microseconds. +/// \return time value normalized to the \ref osKernelSysTickFrequency +#define osKernelSysTickMicroSec(microsec) (((uint64_t)microsec * (osKernelSysTickFrequency)) / 1000000) + +#endif // System Timer available + +// ==== Thread Management ==== + +/// Create a Thread Definition with function, priority, and stack requirements. +/// \param name name of the thread function. +/// \param priority initial priority of the thread function. +/// \param instances number of possible thread instances. +/// \param stacksz stack size (in bytes) requirements for the thread function. +/// \note CAN BE CHANGED: The parameters to \b osThreadDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osThreadDef(name, thread, priority, instances, stacksz) \ +extern const osThreadDef_t os_thread_def_##name +#else // define the object + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) +#define osThreadDef(name, thread, priority, instances, stacksz) \ +const osThreadDef_t os_thread_def_##name = \ +{ #name, (thread), (priority), (instances), (stacksz), NULL, NULL } + +#define osThreadStaticDef(name, thread, priority, instances, stacksz, buffer, control) \ +const osThreadDef_t os_thread_def_##name = \ +{ #name, (thread), (priority), (instances), (stacksz), (buffer), (control) } +#else //configSUPPORT_STATIC_ALLOCATION == 0 + +#define osThreadDef(name, thread, priority, instances, stacksz) \ +const osThreadDef_t os_thread_def_##name = \ +{ #name, (thread), (priority), (instances), (stacksz)} +#endif +#endif + +/// Access a Thread definition. +/// \param name name of the thread definition object. +/// \note CAN BE CHANGED: The parameter to \b osThread shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osThread(name) \ +&os_thread_def_##name + +/// Create a thread and add it to Active Threads and set it to state READY. +/// \param[in] thread_def thread definition referenced with \ref osThread. +/// \param[in] argument pointer that is passed to the thread function as start argument. +/// \return thread ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osThreadCreate shall be consistent in every CMSIS-RTOS. +osThreadId osThreadCreate (const osThreadDef_t *thread_def, void *argument); + +/// Return the thread ID of the current running thread. +/// \return thread ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osThreadGetId shall be consistent in every CMSIS-RTOS. +osThreadId osThreadGetId (void); + +/// Terminate execution of a thread and remove it from Active Threads. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osThreadTerminate shall be consistent in every CMSIS-RTOS. +osStatus osThreadTerminate (osThreadId thread_id); + +/// Pass control to next thread that is in state \b READY. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osThreadYield shall be consistent in every CMSIS-RTOS. +osStatus osThreadYield (void); + +/// Change priority of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] priority new priority value for the thread function. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osThreadSetPriority shall be consistent in every CMSIS-RTOS. +osStatus osThreadSetPriority (osThreadId thread_id, osPriority priority); + +/// Get current priority of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \return current priority value of the thread function. +/// \note MUST REMAIN UNCHANGED: \b osThreadGetPriority shall be consistent in every CMSIS-RTOS. +osPriority osThreadGetPriority (osThreadId thread_id); + + +// ==== Generic Wait Functions ==== + +/// Wait for Timeout (Time Delay). +/// \param[in] millisec time delay value +/// \return status code that indicates the execution status of the function. +osStatus osDelay (uint32_t millisec); + +#if (defined (osFeature_Wait) && (osFeature_Wait != 0)) // Generic Wait available + +/// Wait for Signal, Message, Mail, or Timeout. +/// \param[in] millisec timeout value or 0 in case of no time-out +/// \return event that contains signal, message, or mail information or error code. +/// \note MUST REMAIN UNCHANGED: \b osWait shall be consistent in every CMSIS-RTOS. +osEvent osWait (uint32_t millisec); + +#endif // Generic Wait available + + +// ==== Timer Management Functions ==== +/// Define a Timer object. +/// \param name name of the timer object. +/// \param function name of the timer call back function. +/// \note CAN BE CHANGED: The parameter to \b osTimerDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osTimerDef(name, function) \ +extern const osTimerDef_t os_timer_def_##name +#else // define the object + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) +#define osTimerDef(name, function) \ +const osTimerDef_t os_timer_def_##name = \ +{ (function), NULL } + +#define osTimerStaticDef(name, function, control) \ +const osTimerDef_t os_timer_def_##name = \ +{ (function), (control) } +#else //configSUPPORT_STATIC_ALLOCATION == 0 +#define osTimerDef(name, function) \ +const osTimerDef_t os_timer_def_##name = \ +{ (function) } +#endif +#endif + +/// Access a Timer definition. +/// \param name name of the timer object. +/// \note CAN BE CHANGED: The parameter to \b osTimer shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osTimer(name) \ +&os_timer_def_##name + +/// Create a timer. +/// \param[in] timer_def timer object referenced with \ref osTimer. +/// \param[in] type osTimerOnce for one-shot or osTimerPeriodic for periodic behavior. +/// \param[in] argument argument to the timer call back function. +/// \return timer ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osTimerCreate shall be consistent in every CMSIS-RTOS. +osTimerId osTimerCreate (const osTimerDef_t *timer_def, os_timer_type type, void *argument); + +/// Start or restart a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \param[in] millisec time delay value of the timer. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osTimerStart shall be consistent in every CMSIS-RTOS. +osStatus osTimerStart (osTimerId timer_id, uint32_t millisec); + +/// Stop the timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osTimerStop shall be consistent in every CMSIS-RTOS. +osStatus osTimerStop (osTimerId timer_id); + +/// Delete a timer that was created by \ref osTimerCreate. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osTimerDelete shall be consistent in every CMSIS-RTOS. +osStatus osTimerDelete (osTimerId timer_id); + + +// ==== Signal Management ==== + +/// Set the specified Signal Flags of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] signals specifies the signal flags of the thread that should be set. +/// \return osOK if successful, osErrorOS if failed. +/// \note MUST REMAIN UNCHANGED: \b osSignalSet shall be consistent in every CMSIS-RTOS. +int32_t osSignalSet (osThreadId thread_id, int32_t signals); + +/// Clear the specified Signal Flags of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] signals specifies the signal flags of the thread that shall be cleared. +/// \return previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters. +/// \note MUST REMAIN UNCHANGED: \b osSignalClear shall be consistent in every CMSIS-RTOS. +int32_t osSignalClear (osThreadId thread_id, int32_t signals); + +/// Wait for one or more Signal Flags to become signaled for the current \b RUNNING thread. +/// \param[in] signals wait until all specified signal flags set or 0 for any single signal flag. +/// \param[in] millisec timeout value or 0 in case of no time-out. +/// \return event flag information or error code. +/// \note MUST REMAIN UNCHANGED: \b osSignalWait shall be consistent in every CMSIS-RTOS. +osEvent osSignalWait (int32_t signals, uint32_t millisec); + + +// ==== Mutex Management ==== + +/// Define a Mutex. +/// \param name name of the mutex object. +/// \note CAN BE CHANGED: The parameter to \b osMutexDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osMutexDef(name) \ +extern const osMutexDef_t os_mutex_def_##name +#else // define the object + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) +#define osMutexDef(name) \ +const osMutexDef_t os_mutex_def_##name = { 0, NULL } + +#define osMutexStaticDef(name, control) \ +const osMutexDef_t os_mutex_def_##name = { 0, (control) } +#else //configSUPPORT_STATIC_ALLOCATION == 0 +#define osMutexDef(name) \ +const osMutexDef_t os_mutex_def_##name = { 0 } + +#endif + +#endif + +/// Access a Mutex definition. +/// \param name name of the mutex object. +/// \note CAN BE CHANGED: The parameter to \b osMutex shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osMutex(name) \ +&os_mutex_def_##name + +/// Create and Initialize a Mutex object. +/// \param[in] mutex_def mutex definition referenced with \ref osMutex. +/// \return mutex ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osMutexCreate shall be consistent in every CMSIS-RTOS. +osMutexId osMutexCreate (const osMutexDef_t *mutex_def); + +/// Wait until a Mutex becomes available. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMutexWait shall be consistent in every CMSIS-RTOS. +osStatus osMutexWait (osMutexId mutex_id, uint32_t millisec); + +/// Release a Mutex that was obtained by \ref osMutexWait. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMutexRelease shall be consistent in every CMSIS-RTOS. +osStatus osMutexRelease (osMutexId mutex_id); + +/// Delete a Mutex that was created by \ref osMutexCreate. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMutexDelete shall be consistent in every CMSIS-RTOS. +osStatus osMutexDelete (osMutexId mutex_id); + + +// ==== Semaphore Management Functions ==== + +#if (defined (osFeature_Semaphore) && (osFeature_Semaphore != 0)) // Semaphore available + +/// Define a Semaphore object. +/// \param name name of the semaphore object. +/// \note CAN BE CHANGED: The parameter to \b osSemaphoreDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osSemaphoreDef(name) \ +extern const osSemaphoreDef_t os_semaphore_def_##name +#else // define the object + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) +#define osSemaphoreDef(name) \ +const osSemaphoreDef_t os_semaphore_def_##name = { 0, NULL } + +#define osSemaphoreStaticDef(name, control) \ +const osSemaphoreDef_t os_semaphore_def_##name = { 0, (control) } + +#else //configSUPPORT_STATIC_ALLOCATION == 0 +#define osSemaphoreDef(name) \ +const osSemaphoreDef_t os_semaphore_def_##name = { 0 } +#endif +#endif + +/// Access a Semaphore definition. +/// \param name name of the semaphore object. +/// \note CAN BE CHANGED: The parameter to \b osSemaphore shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osSemaphore(name) \ +&os_semaphore_def_##name + +/// Create and Initialize a Semaphore object used for managing resources. +/// \param[in] semaphore_def semaphore definition referenced with \ref osSemaphore. +/// \param[in] count number of available resources. +/// \return semaphore ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osSemaphoreCreate shall be consistent in every CMSIS-RTOS. +osSemaphoreId osSemaphoreCreate (const osSemaphoreDef_t *semaphore_def, int32_t count); + +/// Wait until a Semaphore token becomes available. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out. +/// \return number of available tokens, or -1 in case of incorrect parameters. +/// \note MUST REMAIN UNCHANGED: \b osSemaphoreWait shall be consistent in every CMSIS-RTOS. +int32_t osSemaphoreWait (osSemaphoreId semaphore_id, uint32_t millisec); + +/// Release a Semaphore token. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osSemaphoreRelease shall be consistent in every CMSIS-RTOS. +osStatus osSemaphoreRelease (osSemaphoreId semaphore_id); + +/// Delete a Semaphore that was created by \ref osSemaphoreCreate. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osSemaphoreDelete shall be consistent in every CMSIS-RTOS. +osStatus osSemaphoreDelete (osSemaphoreId semaphore_id); + +#endif // Semaphore available + + +// ==== Memory Pool Management Functions ==== + +#if (defined (osFeature_Pool) && (osFeature_Pool != 0)) // Memory Pool Management available + +/// \brief Define a Memory Pool. +/// \param name name of the memory pool. +/// \param no maximum number of blocks (objects) in the memory pool. +/// \param type data type of a single block (object). +/// \note CAN BE CHANGED: The parameter to \b osPoolDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osPoolDef(name, no, type) \ +extern const osPoolDef_t os_pool_def_##name +#else // define the object +#define osPoolDef(name, no, type) \ +const osPoolDef_t os_pool_def_##name = \ +{ (no), sizeof(type), NULL } +#endif + +/// \brief Access a Memory Pool definition. +/// \param name name of the memory pool +/// \note CAN BE CHANGED: The parameter to \b osPool shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osPool(name) \ +&os_pool_def_##name + +/// Create and Initialize a memory pool. +/// \param[in] pool_def memory pool definition referenced with \ref osPool. +/// \return memory pool ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osPoolCreate shall be consistent in every CMSIS-RTOS. +osPoolId osPoolCreate (const osPoolDef_t *pool_def); + +/// Allocate a memory block from a memory pool. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \return address of the allocated memory block or NULL in case of no memory available. +/// \note MUST REMAIN UNCHANGED: \b osPoolAlloc shall be consistent in every CMSIS-RTOS. +void *osPoolAlloc (osPoolId pool_id); + +/// Allocate a memory block from a memory pool and set memory block to zero. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \return address of the allocated memory block or NULL in case of no memory available. +/// \note MUST REMAIN UNCHANGED: \b osPoolCAlloc shall be consistent in every CMSIS-RTOS. +void *osPoolCAlloc (osPoolId pool_id); + +/// Return an allocated memory block back to a specific memory pool. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \param[in] block address of the allocated memory block that is returned to the memory pool. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osPoolFree shall be consistent in every CMSIS-RTOS. +osStatus osPoolFree (osPoolId pool_id, void *block); + +#endif // Memory Pool Management available + + +// ==== Message Queue Management Functions ==== + +#if (defined (osFeature_MessageQ) && (osFeature_MessageQ != 0)) // Message Queues available + +/// \brief Create a Message Queue Definition. +/// \param name name of the queue. +/// \param queue_sz maximum number of messages in the queue. +/// \param type data type of a single message element (for debugger). +/// \note CAN BE CHANGED: The parameter to \b osMessageQDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osMessageQDef(name, queue_sz, type) \ +extern const osMessageQDef_t os_messageQ_def_##name +#else // define the object +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) +#define osMessageQDef(name, queue_sz, type) \ +const osMessageQDef_t os_messageQ_def_##name = \ +{ (queue_sz), sizeof (type), NULL, NULL } + +#define osMessageQStaticDef(name, queue_sz, type, buffer, control) \ +const osMessageQDef_t os_messageQ_def_##name = \ +{ (queue_sz), sizeof (type) , (buffer), (control)} +#else //configSUPPORT_STATIC_ALLOCATION == 1 +#define osMessageQDef(name, queue_sz, type) \ +const osMessageQDef_t os_messageQ_def_##name = \ +{ (queue_sz), sizeof (type) } + +#endif +#endif + +/// \brief Access a Message Queue Definition. +/// \param name name of the queue +/// \note CAN BE CHANGED: The parameter to \b osMessageQ shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osMessageQ(name) \ +&os_messageQ_def_##name + +/// Create and Initialize a Message Queue. +/// \param[in] queue_def queue definition referenced with \ref osMessageQ. +/// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +/// \return message queue ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osMessageCreate shall be consistent in every CMSIS-RTOS. +osMessageQId osMessageCreate (const osMessageQDef_t *queue_def, osThreadId thread_id); + +/// Put a Message to a Queue. +/// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. +/// \param[in] info message information. +/// \param[in] millisec timeout value or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMessagePut shall be consistent in every CMSIS-RTOS. +osStatus osMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec); + +/// Get a Message or Wait for a Message from a Queue. +/// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out. +/// \return event information that includes status code. +/// \note MUST REMAIN UNCHANGED: \b osMessageGet shall be consistent in every CMSIS-RTOS. +osEvent osMessageGet (osMessageQId queue_id, uint32_t millisec); + +#endif // Message Queues available + + +// ==== Mail Queue Management Functions ==== + +#if (defined (osFeature_MailQ) && (osFeature_MailQ != 0)) // Mail Queues available + +/// \brief Create a Mail Queue Definition. +/// \param name name of the queue +/// \param queue_sz maximum number of messages in queue +/// \param type data type of a single message element +/// \note CAN BE CHANGED: The parameter to \b osMailQDef shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#if defined (osObjectsExternal) // object is external +#define osMailQDef(name, queue_sz, type) \ +extern struct os_mailQ_cb *os_mailQ_cb_##name \ +extern osMailQDef_t os_mailQ_def_##name +#else // define the object +#define osMailQDef(name, queue_sz, type) \ +struct os_mailQ_cb *os_mailQ_cb_##name; \ +const osMailQDef_t os_mailQ_def_##name = \ +{ (queue_sz), sizeof (type), (&os_mailQ_cb_##name) } +#endif + +/// \brief Access a Mail Queue Definition. +/// \param name name of the queue +/// \note CAN BE CHANGED: The parameter to \b osMailQ shall be consistent but the +/// macro body is implementation specific in every CMSIS-RTOS. +#define osMailQ(name) \ +&os_mailQ_def_##name + +/// Create and Initialize mail queue. +/// \param[in] queue_def reference to the mail queue definition obtain with \ref osMailQ +/// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +/// \return mail queue ID for reference by other functions or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osMailCreate shall be consistent in every CMSIS-RTOS. +osMailQId osMailCreate (const osMailQDef_t *queue_def, osThreadId thread_id); + +/// Allocate a memory block from a mail. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out +/// \return pointer to memory block that can be filled with mail or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osMailAlloc shall be consistent in every CMSIS-RTOS. +void *osMailAlloc (osMailQId queue_id, uint32_t millisec); + +/// Allocate a memory block from a mail and set memory block to zero. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out +/// \return pointer to memory block that can be filled with mail or NULL in case of error. +/// \note MUST REMAIN UNCHANGED: \b osMailCAlloc shall be consistent in every CMSIS-RTOS. +void *osMailCAlloc (osMailQId queue_id, uint32_t millisec); + +/// Put a mail to a queue. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] mail memory block previously allocated with \ref osMailAlloc or \ref osMailCAlloc. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMailPut shall be consistent in every CMSIS-RTOS. +osStatus osMailPut (osMailQId queue_id, void *mail); + +/// Get a mail from a queue. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec timeout value or 0 in case of no time-out +/// \return event that contains mail information or error code. +/// \note MUST REMAIN UNCHANGED: \b osMailGet shall be consistent in every CMSIS-RTOS. +osEvent osMailGet (osMailQId queue_id, uint32_t millisec); + +/// Free a memory block from a mail. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] mail pointer to the memory block that was obtained with \ref osMailGet. +/// \return status code that indicates the execution status of the function. +/// \note MUST REMAIN UNCHANGED: \b osMailFree shall be consistent in every CMSIS-RTOS. +osStatus osMailFree (osMailQId queue_id, void *mail); + +#endif // Mail Queues available + +/*************************** Additional specific APIs to Free RTOS ************/ +/** +* @brief Handles the tick increment +* @param none. +* @retval none. +*/ +void osSystickHandler(void); + +#if ( INCLUDE_eTaskGetState == 1 ) +/** +* @brief Obtain the state of any thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval the stae of the thread, states are encoded by the osThreadState enumerated type. +*/ +osThreadState osThreadGetState(osThreadId thread_id); +#endif /* INCLUDE_eTaskGetState */ + +#if ( INCLUDE_eTaskGetState == 1 ) +/** +* @brief Check if a thread is already suspended or not. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ + +osStatus osThreadIsSuspended(osThreadId thread_id); + +#endif /* INCLUDE_eTaskGetState */ + +/** +* @brief Suspend execution of a thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadSuspend (osThreadId thread_id); + +/** +* @brief Resume execution of a suspended thread. +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadResume (osThreadId thread_id); + +/** +* @brief Suspend execution of a all active threads. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadSuspendAll (void); + +/** +* @brief Resume execution of a all suspended threads. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadResumeAll (void); + +/** +* @brief Delay a task until a specified time +* @param PreviousWakeTime Pointer to a variable that holds the time at which the +* task was last unblocked. PreviousWakeTime must be initialised with the current time +* prior to its first use (PreviousWakeTime = osKernelSysTick() ) +* @param millisec time delay value +* @retval status code that indicates the execution status of the function. +*/ +osStatus osDelayUntil (uint32_t *PreviousWakeTime, uint32_t millisec); + +/** +* @brief Abort the delay for a specific thread +* @param thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId +* @retval status code that indicates the execution status of the function. +*/ +osStatus osAbortDelay(osThreadId thread_id); + +/** +* @brief Lists all the current threads, along with their current state +* and stack usage high water mark. +* @param buffer A buffer into which the above mentioned details +* will be written +* @retval status code that indicates the execution status of the function. +*/ +osStatus osThreadList (uint8_t *buffer); + +/** +* @brief Receive an item from a queue without removing the item from the queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval event information that includes status code. +*/ +osEvent osMessagePeek (osMessageQId queue_id, uint32_t millisec); + +/** +* @brief Get the number of messaged stored in a queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval number of messages stored in a queue. +*/ +uint32_t osMessageWaiting(osMessageQId queue_id); + +/** +* @brief Get the available space in a message queue. +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval available space in a message queue. +*/ +uint32_t osMessageAvailableSpace(osMessageQId queue_id); + +/** +* @brief Delete a Message Queue +* @param queue_id message queue ID obtained with \ref osMessageCreate. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osMessageDelete (osMessageQId queue_id); + +/** +* @brief Create and Initialize a Recursive Mutex +* @param mutex_def mutex definition referenced with \ref osMutex. +* @retval mutex ID for reference by other functions or NULL in case of error.. +*/ +osMutexId osRecursiveMutexCreate (const osMutexDef_t *mutex_def); + +/** +* @brief Release a Recursive Mutex +* @param mutex_id mutex ID obtained by \ref osRecursiveMutexCreate. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osRecursiveMutexRelease (osMutexId mutex_id); + +/** +* @brief Release a Recursive Mutex +* @param mutex_id mutex ID obtained by \ref osRecursiveMutexCreate. +* @param millisec timeout value or 0 in case of no time-out. +* @retval status code that indicates the execution status of the function. +*/ +osStatus osRecursiveMutexWait (osMutexId mutex_id, uint32_t millisec); + +/** +* @brief Returns the current count value of a counting semaphore +* @param semaphore_id semaphore_id ID obtained by \ref osSemaphoreCreate. +* @retval count value +*/ +uint32_t osSemaphoreGetCount(osSemaphoreId semaphore_id); + +#ifdef __cplusplus +} +#endif + +#endif // _CMSIS_OS_H diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/croutine.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/croutine.c new file mode 100644 index 000000000..56c8ac290 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/croutine.c @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include "FreeRTOS.h" +#include "task.h" +#include "croutine.h" + +/* Remove the whole file is co-routines are not being used. */ +#if( configUSE_CO_ROUTINES != 0 ) + +/* + * Some kernel aware debuggers require data to be viewed to be global, rather + * than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + + +/* Lists for ready and blocked co-routines. --------------------*/ +static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */ +static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */ +static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */ +static List_t * pxDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used. */ +static List_t * pxOverflowDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */ +static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */ + +/* Other file private variables. --------------------------------*/ +CRCB_t * pxCurrentCoRoutine = NULL; +static UBaseType_t uxTopCoRoutineReadyPriority = 0; +static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0; + +/* The initial state of the co-routine when it is created. */ +#define corINITIAL_STATE ( 0 ) + +/* + * Place the co-routine represented by pxCRCB into the appropriate ready queue + * for the priority. It is inserted at the end of the list. + * + * This macro accesses the co-routine ready lists and therefore must not be + * used from within an ISR. + */ +#define prvAddCoRoutineToReadyQueue( pxCRCB ) \ +{ \ + if( pxCRCB->uxPriority > uxTopCoRoutineReadyPriority ) \ + { \ + uxTopCoRoutineReadyPriority = pxCRCB->uxPriority; \ + } \ + vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ pxCRCB->uxPriority ] ), &( pxCRCB->xGenericListItem ) ); \ +} + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first co-routine. + */ +static void prvInitialiseCoRoutineLists( void ); + +/* + * Co-routines that are readied by an interrupt cannot be placed directly into + * the ready lists (there is no mutual exclusion). Instead they are placed in + * in the pending ready list in order that they can later be moved to the ready + * list by the co-routine scheduler. + */ +static void prvCheckPendingReadyList( void ); + +/* + * Macro that looks at the list of co-routines that are currently delayed to + * see if any require waking. + * + * Co-routines are stored in the queue in the order of their wake time - + * meaning once one co-routine has been found whose timer has not expired + * we need not look any further down the list. + */ +static void prvCheckDelayedList( void ); + +/*-----------------------------------------------------------*/ + +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ) +{ +BaseType_t xReturn; +CRCB_t *pxCoRoutine; + + /* Allocate the memory that will store the co-routine control block. */ + pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) ); + if( pxCoRoutine ) + { + /* If pxCurrentCoRoutine is NULL then this is the first co-routine to + be created and the co-routine data structures need initialising. */ + if( pxCurrentCoRoutine == NULL ) + { + pxCurrentCoRoutine = pxCoRoutine; + prvInitialiseCoRoutineLists(); + } + + /* Check the priority is within limits. */ + if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES ) + { + uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1; + } + + /* Fill out the co-routine control block from the function parameters. */ + pxCoRoutine->uxState = corINITIAL_STATE; + pxCoRoutine->uxPriority = uxPriority; + pxCoRoutine->uxIndex = uxIndex; + pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode; + + /* Initialise all the other co-routine control block parameters. */ + vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) ); + vListInitialiseItem( &( pxCoRoutine->xEventListItem ) ); + + /* Set the co-routine control block as a link back from the ListItem_t. + This is so we can get back to the containing CRCB from a generic item + in a list. */ + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine ); + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) ); + + /* Now the co-routine has been initialised it can be added to the ready + list at the correct priority. */ + prvAddCoRoutineToReadyQueue( pxCoRoutine ); + + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ) +{ +TickType_t xTimeToWake; + + /* Calculate the time to wake - this may overflow but this is + not a problem. */ + xTimeToWake = xCoRoutineTickCount + xTicksToDelay; + + /* We must remove ourselves from the ready list before adding + ourselves to the blocked list as the same list item is used for + both lists. */ + ( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake ); + + if( xTimeToWake < xCoRoutineTickCount ) + { + /* Wake time has overflowed. Place this item in the + overflow list. */ + vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + else + { + /* The wake time has not overflowed, so we can use the + current block list. */ + vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + + if( pxEventList ) + { + /* Also add the co-routine to an event list. If this is done then the + function must be called with interrupts disabled. */ + vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +static void prvCheckPendingReadyList( void ) +{ + /* Are there any co-routines waiting to get moved to the ready list? These + are co-routines that have been readied by an ISR. The ISR cannot access + the ready lists itself. */ + while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE ) + { + CRCB_t *pxUnblockedCRCB; + + /* The pending ready list can be accessed by an ISR. */ + portDISABLE_INTERRUPTS(); + { + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( (&xPendingReadyCoRoutineList) ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + } + portENABLE_INTERRUPTS(); + + ( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) ); + prvAddCoRoutineToReadyQueue( pxUnblockedCRCB ); + } +} +/*-----------------------------------------------------------*/ + +static void prvCheckDelayedList( void ) +{ +CRCB_t *pxCRCB; + + xPassedTicks = xTaskGetTickCount() - xLastTickCount; + while( xPassedTicks ) + { + xCoRoutineTickCount++; + xPassedTicks--; + + /* If the tick count has overflowed we need to swap the ready lists. */ + if( xCoRoutineTickCount == 0 ) + { + List_t * pxTemp; + + /* Tick count has overflowed so we need to swap the delay lists. If there are + any items in pxDelayedCoRoutineList here then there is an error! */ + pxTemp = pxDelayedCoRoutineList; + pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList; + pxOverflowDelayedCoRoutineList = pxTemp; + } + + /* See if this tick has made a timeout expire. */ + while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE ) + { + pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList ); + + if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) ) + { + /* Timeout not yet expired. */ + break; + } + + portDISABLE_INTERRUPTS(); + { + /* The event could have occurred just before this critical + section. If this is the case then the generic list item will + have been moved to the pending ready list and the following + line is still valid. Also the pvContainer parameter will have + been set to NULL so the following lines are also valid. */ + ( void ) uxListRemove( &( pxCRCB->xGenericListItem ) ); + + /* Is the co-routine waiting on an event also? */ + if( pxCRCB->xEventListItem.pxContainer ) + { + ( void ) uxListRemove( &( pxCRCB->xEventListItem ) ); + } + } + portENABLE_INTERRUPTS(); + + prvAddCoRoutineToReadyQueue( pxCRCB ); + } + } + + xLastTickCount = xCoRoutineTickCount; +} +/*-----------------------------------------------------------*/ + +void vCoRoutineSchedule( void ) +{ + /* See if any co-routines readied by events need moving to the ready lists. */ + prvCheckPendingReadyList(); + + /* See if any delayed co-routines have timed out. */ + prvCheckDelayedList(); + + /* Find the highest priority queue that contains ready co-routines. */ + while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) ) + { + if( uxTopCoRoutineReadyPriority == 0 ) + { + /* No more co-routines to check. */ + return; + } + --uxTopCoRoutineReadyPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines + of the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ); + + /* Call the co-routine. */ + ( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex ); + + return; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseCoRoutineLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ ) + { + vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) ); + } + + vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 ); + vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 ); + vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList ); + + /* Start with pxDelayedCoRoutineList using list1 and the + pxOverflowDelayedCoRoutineList using list2. */ + pxDelayedCoRoutineList = &xDelayedCoRoutineList1; + pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2; +} +/*-----------------------------------------------------------*/ + +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ) +{ +CRCB_t *pxUnblockedCRCB; +BaseType_t xReturn; + + /* This function is called from within an interrupt. It can only access + event lists and the pending ready list. This function assumes that a + check has already been made to ensure pxEventList is not empty. */ + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) ); + + if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} + +#endif /* configUSE_CO_ROUTINES == 0 */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c new file mode 100644 index 000000000..65a5ff259 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c @@ -0,0 +1,753 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "event_groups.h" + +/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */ + +/* The following bit fields convey control information in a task's event list +item value. It is important they don't clash with the +taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */ +#if configUSE_16_BIT_TICKS == 1 + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U + #define eventWAIT_FOR_ALL_BITS 0x0400U + #define eventEVENT_BITS_CONTROL_BYTES 0xff00U +#else + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL + #define eventWAIT_FOR_ALL_BITS 0x04000000UL + #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL +#endif + +typedef struct EventGroupDef_t +{ + EventBits_t uxEventBits; + List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */ + + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxEventGroupNumber; + #endif + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ + #endif +} EventGroup_t; + +/*-----------------------------------------------------------*/ + +/* + * Test the bits set in uxCurrentEventBits to see if the wait condition is met. + * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is + * pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor + * are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the + * wait condition is met if any of the bits set in uxBitsToWait for are also set + * in uxCurrentEventBits. + */ +static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) + { + EventGroup_t *pxEventBits; + + /* A StaticEventGroup_t object must be provided. */ + configASSERT( pxEventGroupBuffer ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticEventGroup_t equals the size of the real + event group structure. */ + volatile size_t xSize = sizeof( StaticEventGroup_t ); + configASSERT( xSize == sizeof( EventGroup_t ) ); + } /*lint !e529 xSize is referenced if configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ + + /* The user has provided a statically allocated event group - use it. */ + pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */ + + if( pxEventBits != NULL ) + { + pxEventBits->uxEventBits = 0; + vListInitialise( &( pxEventBits->xTasksWaitingForBits ) ); + + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both static and dynamic allocation can be used, so note that + this event group was created statically in case the event group + is later deleted. */ + pxEventBits->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + traceEVENT_GROUP_CREATE( pxEventBits ); + } + else + { + /* xEventGroupCreateStatic should only ever be called with + pxEventGroupBuffer pointing to a pre-allocated (compile time + allocated) StaticEventGroup_t variable. */ + traceEVENT_GROUP_CREATE_FAILED(); + } + + return pxEventBits; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + EventGroupHandle_t xEventGroupCreate( void ) + { + EventGroup_t *pxEventBits; + + /* Allocate the event group. Justification for MISRA deviation as + follows: pvPortMalloc() always ensures returned memory blocks are + aligned per the requirements of the MCU stack. In this case + pvPortMalloc() must return a pointer that is guaranteed to meet the + alignment requirements of the EventGroup_t structure - which (if you + follow it through) is the alignment requirements of the TickType_t type + (EventBits_t being of TickType_t itself). Therefore, whenever the + stack alignment requirements are greater than or equal to the + TickType_t alignment requirements the cast is safe. In other cases, + where the natural word size of the architecture is less than + sizeof( TickType_t ), the TickType_t variables will be accessed in two + or more reads operations, and the alignment requirements is only that + of each individual read. */ + pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) ); /*lint !e9087 !e9079 see comment above. */ + + if( pxEventBits != NULL ) + { + pxEventBits->uxEventBits = 0; + vListInitialise( &( pxEventBits->xTasksWaitingForBits ) ); + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Both static and dynamic allocation can be used, so note this + event group was allocated statically in case the event group is + later deleted. */ + pxEventBits->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + traceEVENT_GROUP_CREATE( pxEventBits ); + } + else + { + traceEVENT_GROUP_CREATE_FAILED(); /*lint !e9063 Else branch only exists to allow tracing and does not generate code if trace macros are not defined. */ + } + + return pxEventBits; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) +{ +EventBits_t uxOriginalBitValue, uxReturn; +EventGroup_t *pxEventBits = xEventGroup; +BaseType_t xAlreadyYielded; +BaseType_t xTimeoutOccurred = pdFALSE; + + configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + configASSERT( uxBitsToWaitFor != 0 ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + vTaskSuspendAll(); + { + uxOriginalBitValue = pxEventBits->uxEventBits; + + ( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet ); + + if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + /* All the rendezvous bits are now set - no need to block. */ + uxReturn = ( uxOriginalBitValue | uxBitsToSet ); + + /* Rendezvous always clear the bits. They will have been cleared + already unless this is the only task in the rendezvous. */ + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + + xTicksToWait = 0; + } + else + { + if( xTicksToWait != ( TickType_t ) 0 ) + { + traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor ); + + /* Store the bits that the calling task is waiting for in the + task's event list item so the kernel knows when a match is + found. Then enter the blocked state. */ + vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait ); + + /* This assignment is obsolete as uxReturn will get set after + the task unblocks, but some compilers mistakenly generate a + warning about uxReturn being returned without being set if the + assignment is omitted. */ + uxReturn = 0; + } + else + { + /* The rendezvous bits were not set, but no block time was + specified - just return the current event bit value. */ + uxReturn = pxEventBits->uxEventBits; + xTimeoutOccurred = pdTRUE; + } + } + } + xAlreadyYielded = xTaskResumeAll(); + + if( xTicksToWait != ( TickType_t ) 0 ) + { + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The task blocked to wait for its required bits to be set - at this + point either the required bits were set or the block time expired. If + the required bits were set they will have been stored in the task's + event list item, and they should now be retrieved then cleared. */ + uxReturn = uxTaskResetEventItemValue(); + + if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) + { + /* The task timed out, just return the current event bit value. */ + taskENTER_CRITICAL(); + { + uxReturn = pxEventBits->uxEventBits; + + /* Although the task got here because it timed out before the + bits it was waiting for were set, it is possible that since it + unblocked another task has set the bits. If this is the case + then it needs to clear the bits before exiting. */ + if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + xTimeoutOccurred = pdTRUE; + } + else + { + /* The task unblocked because the bits were set. */ + } + + /* Control bits might be set as the task had blocked should not be + returned. */ + uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; + } + + traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ); + + /* Prevent compiler warnings when trace macros are not used. */ + ( void ) xTimeoutOccurred; + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) +{ +EventGroup_t *pxEventBits = xEventGroup; +EventBits_t uxReturn, uxControlBits = 0; +BaseType_t xWaitConditionMet, xAlreadyYielded; +BaseType_t xTimeoutOccurred = pdFALSE; + + /* Check the user is not attempting to wait on the bits used by the kernel + itself, and that at least one bit is being requested. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + configASSERT( uxBitsToWaitFor != 0 ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + vTaskSuspendAll(); + { + const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; + + /* Check to see if the wait condition is already met or not. */ + xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits ); + + if( xWaitConditionMet != pdFALSE ) + { + /* The wait condition has already been met so there is no need to + block. */ + uxReturn = uxCurrentEventBits; + xTicksToWait = ( TickType_t ) 0; + + /* Clear the wait bits if requested to do so. */ + if( xClearOnExit != pdFALSE ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The wait condition has not been met, but no block time was + specified, so just return the current value. */ + uxReturn = uxCurrentEventBits; + xTimeoutOccurred = pdTRUE; + } + else + { + /* The task is going to block to wait for its required bits to be + set. uxControlBits are used to remember the specified behaviour of + this call to xEventGroupWaitBits() - for use when the event bits + unblock the task. */ + if( xClearOnExit != pdFALSE ) + { + uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xWaitForAllBits != pdFALSE ) + { + uxControlBits |= eventWAIT_FOR_ALL_BITS; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Store the bits that the calling task is waiting for in the + task's event list item so the kernel knows when a match is + found. Then enter the blocked state. */ + vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait ); + + /* This is obsolete as it will get set after the task unblocks, but + some compilers mistakenly generate a warning about the variable + being returned without being set if it is not done. */ + uxReturn = 0; + + traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); + } + } + xAlreadyYielded = xTaskResumeAll(); + + if( xTicksToWait != ( TickType_t ) 0 ) + { + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The task blocked to wait for its required bits to be set - at this + point either the required bits were set or the block time expired. If + the required bits were set they will have been stored in the task's + event list item, and they should now be retrieved then cleared. */ + uxReturn = uxTaskResetEventItemValue(); + + if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) + { + taskENTER_CRITICAL(); + { + /* The task timed out, just return the current event bit value. */ + uxReturn = pxEventBits->uxEventBits; + + /* It is possible that the event bits were updated between this + task leaving the Blocked state and running again. */ + if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE ) + { + if( xClearOnExit != pdFALSE ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + xTimeoutOccurred = pdTRUE; + } + taskEXIT_CRITICAL(); + } + else + { + /* The task unblocked because the bits were set. */ + } + + /* The task blocked so control bits may have been set. */ + uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; + } + traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ); + + /* Prevent compiler warnings when trace macros are not used. */ + ( void ) xTimeoutOccurred; + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) +{ +EventGroup_t *pxEventBits = xEventGroup; +EventBits_t uxReturn; + + /* Check the user is not attempting to clear the bits used by the kernel + itself. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + + taskENTER_CRITICAL(); + { + traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); + + /* The value returned is the event group value prior to the bits being + cleared. */ + uxReturn = pxEventBits->uxEventBits; + + /* Clear the bits. */ + pxEventBits->uxEventBits &= ~uxBitsToClear; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) + { + BaseType_t xReturn; + + traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear ); + xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */ + + return xReturn; + } + +#endif +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) +{ +UBaseType_t uxSavedInterruptStatus; +EventGroup_t const * const pxEventBits = xEventGroup; +EventBits_t uxReturn; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + uxReturn = pxEventBits->uxEventBits; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return uxReturn; +} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) +{ +ListItem_t *pxListItem, *pxNext; +ListItem_t const *pxListEnd; +List_t const * pxList; +EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits; +EventGroup_t *pxEventBits = xEventGroup; +BaseType_t xMatchFound = pdFALSE; + + /* Check the user is not attempting to set the bits used by the kernel + itself. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + + pxList = &( pxEventBits->xTasksWaitingForBits ); + pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + vTaskSuspendAll(); + { + traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); + + pxListItem = listGET_HEAD_ENTRY( pxList ); + + /* Set the bits. */ + pxEventBits->uxEventBits |= uxBitsToSet; + + /* See if the new bit value should unblock any tasks. */ + while( pxListItem != pxListEnd ) + { + pxNext = listGET_NEXT( pxListItem ); + uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem ); + xMatchFound = pdFALSE; + + /* Split the bits waited for from the control bits. */ + uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES; + uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES; + + if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 ) + { + /* Just looking for single bit being set. */ + if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 ) + { + xMatchFound = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor ) + { + /* All bits are set. */ + xMatchFound = pdTRUE; + } + else + { + /* Need all bits to be set, but not all the bits were set. */ + } + + if( xMatchFound != pdFALSE ) + { + /* The bits match. Should the bits be cleared on exit? */ + if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 ) + { + uxBitsToClear |= uxBitsWaitedFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Store the actual event flag value in the task's event list + item before removing the task from the event list. The + eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows + that is was unblocked due to its required bits matching, rather + than because it timed out. */ + vTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET ); + } + + /* Move onto the next list item. Note pxListItem->pxNext is not + used here as the list item may have been removed from the event list + and inserted into the ready/pending reading list. */ + pxListItem = pxNext; + } + + /* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT + bit was set in the control word. */ + pxEventBits->uxEventBits &= ~uxBitsToClear; + } + ( void ) xTaskResumeAll(); + + return pxEventBits->uxEventBits; +} +/*-----------------------------------------------------------*/ + +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) +{ +EventGroup_t *pxEventBits = xEventGroup; +const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); + + vTaskSuspendAll(); + { + traceEVENT_GROUP_DELETE( xEventGroup ); + + while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) + { + /* Unblock the task, returning 0 as the event list is being deleted + and cannot therefore have any bits set. */ + configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); + vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); + } + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The event group can only have been allocated dynamically - free + it again. */ + vPortFree( pxEventBits ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The event group could have been allocated statically or + dynamically, so check before attempting to free the memory. */ + if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxEventBits ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + ( void ) xTaskResumeAll(); +} +/*-----------------------------------------------------------*/ + +/* For internal use only - execute a 'set bits' command that was pended from +an interrupt. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) +{ + ( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */ +} +/*-----------------------------------------------------------*/ + +/* For internal use only - execute a 'clear bits' command that was pended from +an interrupt. */ +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) +{ + ( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */ +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) +{ +BaseType_t xWaitConditionMet = pdFALSE; + + if( xWaitForAllBits == pdFALSE ) + { + /* Task only has to wait for one bit within uxBitsToWaitFor to be + set. Is one already set? */ + if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 ) + { + xWaitConditionMet = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Task has to wait for all the bits in uxBitsToWaitFor to be set. + Are they set already? */ + if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + xWaitConditionMet = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return xWaitConditionMet; +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) + { + BaseType_t xReturn; + + traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet ); + xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */ + + return xReturn; + } + +#endif +/*-----------------------------------------------------------*/ + +#if (configUSE_TRACE_FACILITY == 1) + + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) + { + UBaseType_t xReturn; + EventGroup_t const *pxEventBits = ( EventGroup_t * ) xEventGroup; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */ + + if( xEventGroup == NULL ) + { + xReturn = 0; + } + else + { + xReturn = pxEventBits->uxEventGroupNumber; + } + + return xReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vEventGroupSetNumber( void * xEventGroup, UBaseType_t uxEventGroupNumber ) + { + ( ( EventGroup_t * ) xEventGroup )->uxEventGroupNumber = uxEventGroupNumber; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */ + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h new file mode 100644 index 000000000..9d09d91af --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h @@ -0,0 +1,1278 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef INC_FREERTOS_H +#define INC_FREERTOS_H + +/* + * Include the generic headers required for the FreeRTOS port being used. + */ +#include + +/* + * If stdint.h cannot be located then: + * + If using GCC ensure the -nostdint options is *not* being used. + * + Ensure the project's include path includes the directory in which your + * compiler stores stdint.h. + * + Set any compiler options necessary for it to support C99, as technically + * stdint.h is only mandatory with C99 (FreeRTOS does not require C99 in any + * other way). + * + The FreeRTOS download includes a simple stdint.h definition that can be + * used in cases where none is provided by the compiler. The files only + * contains the typedefs required to build FreeRTOS. Read the instructions + * in FreeRTOS/source/stdint.readme for more information. + */ +#include /* READ COMMENT ABOVE. */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Application specific configuration options. */ +#include "FreeRTOSConfig.h" + +/* Basic FreeRTOS definitions. */ +#include "projdefs.h" + +/* Definitions specific to the port being used. */ +#include "portable.h" + +/* Must be defaulted before configUSE_NEWLIB_REENTRANT is used below. */ +#ifndef configUSE_NEWLIB_REENTRANT + #define configUSE_NEWLIB_REENTRANT 0 +#endif + +/* Required if struct _reent is used. */ +#if ( configUSE_NEWLIB_REENTRANT == 1 ) + #include +#endif +/* + * Check all the required application specific macros have been defined. + * These macros are application specific and (as downloaded) are defined + * within FreeRTOSConfig.h. + */ + +#ifndef configMINIMAL_STACK_SIZE + #error Missing definition: configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h. configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task. Refer to the demo project provided for your port for a suitable value. +#endif + +#ifndef configMAX_PRIORITIES + #error Missing definition: configMAX_PRIORITIES must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#if configMAX_PRIORITIES < 1 + #error configMAX_PRIORITIES must be defined to be greater than or equal to 1. +#endif + +#ifndef configUSE_PREEMPTION + #error Missing definition: configUSE_PREEMPTION must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_IDLE_HOOK + #error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_TICK_HOOK + #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_16_BIT_TICKS + #error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_CO_ROUTINES + #define configUSE_CO_ROUTINES 0 +#endif + +#ifndef INCLUDE_vTaskPrioritySet + #define INCLUDE_vTaskPrioritySet 0 +#endif + +#ifndef INCLUDE_uxTaskPriorityGet + #define INCLUDE_uxTaskPriorityGet 0 +#endif + +#ifndef INCLUDE_vTaskDelete + #define INCLUDE_vTaskDelete 0 +#endif + +#ifndef INCLUDE_vTaskSuspend + #define INCLUDE_vTaskSuspend 0 +#endif + +#ifndef INCLUDE_vTaskDelayUntil + #define INCLUDE_vTaskDelayUntil 0 +#endif + +#ifndef INCLUDE_vTaskDelay + #define INCLUDE_vTaskDelay 0 +#endif + +#ifndef INCLUDE_xTaskGetIdleTaskHandle + #define INCLUDE_xTaskGetIdleTaskHandle 0 +#endif + +#ifndef INCLUDE_xTaskAbortDelay + #define INCLUDE_xTaskAbortDelay 0 +#endif + +#ifndef INCLUDE_xQueueGetMutexHolder + #define INCLUDE_xQueueGetMutexHolder 0 +#endif + +#ifndef INCLUDE_xSemaphoreGetMutexHolder + #define INCLUDE_xSemaphoreGetMutexHolder INCLUDE_xQueueGetMutexHolder +#endif + +#ifndef INCLUDE_xTaskGetHandle + #define INCLUDE_xTaskGetHandle 0 +#endif + +#ifndef INCLUDE_uxTaskGetStackHighWaterMark + #define INCLUDE_uxTaskGetStackHighWaterMark 0 +#endif + +#ifndef INCLUDE_uxTaskGetStackHighWaterMark2 + #define INCLUDE_uxTaskGetStackHighWaterMark2 0 +#endif + +#ifndef INCLUDE_eTaskGetState + #define INCLUDE_eTaskGetState 0 +#endif + +#ifndef INCLUDE_xTaskResumeFromISR + #define INCLUDE_xTaskResumeFromISR 1 +#endif + +#ifndef INCLUDE_xTimerPendFunctionCall + #define INCLUDE_xTimerPendFunctionCall 0 +#endif + +#ifndef INCLUDE_xTaskGetSchedulerState + #define INCLUDE_xTaskGetSchedulerState 0 +#endif + +#ifndef INCLUDE_xTaskGetCurrentTaskHandle + #define INCLUDE_xTaskGetCurrentTaskHandle 0 +#endif + +#if configUSE_CO_ROUTINES != 0 + #ifndef configMAX_CO_ROUTINE_PRIORITIES + #error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1. + #endif +#endif + +#ifndef configUSE_DAEMON_TASK_STARTUP_HOOK + #define configUSE_DAEMON_TASK_STARTUP_HOOK 0 +#endif + +#ifndef configUSE_APPLICATION_TASK_TAG + #define configUSE_APPLICATION_TASK_TAG 0 +#endif + +#ifndef configNUM_THREAD_LOCAL_STORAGE_POINTERS + #define configNUM_THREAD_LOCAL_STORAGE_POINTERS 0 +#endif + +#ifndef configUSE_RECURSIVE_MUTEXES + #define configUSE_RECURSIVE_MUTEXES 0 +#endif + +#ifndef configUSE_MUTEXES + #define configUSE_MUTEXES 0 +#endif + +#ifndef configUSE_TIMERS + #define configUSE_TIMERS 0 +#endif + +#ifndef configUSE_COUNTING_SEMAPHORES + #define configUSE_COUNTING_SEMAPHORES 0 +#endif + +#ifndef configUSE_ALTERNATIVE_API + #define configUSE_ALTERNATIVE_API 0 +#endif + +#ifndef portCRITICAL_NESTING_IN_TCB + #define portCRITICAL_NESTING_IN_TCB 0 +#endif + +#ifndef configMAX_TASK_NAME_LEN + #define configMAX_TASK_NAME_LEN 16 +#endif + +#ifndef configIDLE_SHOULD_YIELD + #define configIDLE_SHOULD_YIELD 1 +#endif + +#if configMAX_TASK_NAME_LEN < 1 + #error configMAX_TASK_NAME_LEN must be set to a minimum of 1 in FreeRTOSConfig.h +#endif + +#ifndef configASSERT + #define configASSERT( x ) + #define configASSERT_DEFINED 0 +#else + #define configASSERT_DEFINED 1 +#endif + +#ifndef portMEMORY_BARRIER + #define portMEMORY_BARRIER() +#endif + +/* The timers module relies on xTaskGetSchedulerState(). */ +#if configUSE_TIMERS == 1 + + #ifndef configTIMER_TASK_PRIORITY + #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_PRIORITY must also be defined. + #endif /* configTIMER_TASK_PRIORITY */ + + #ifndef configTIMER_QUEUE_LENGTH + #error If configUSE_TIMERS is set to 1 then configTIMER_QUEUE_LENGTH must also be defined. + #endif /* configTIMER_QUEUE_LENGTH */ + + #ifndef configTIMER_TASK_STACK_DEPTH + #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined. + #endif /* configTIMER_TASK_STACK_DEPTH */ + +#endif /* configUSE_TIMERS */ + +#ifndef portSET_INTERRUPT_MASK_FROM_ISR + #define portSET_INTERRUPT_MASK_FROM_ISR() 0 +#endif + +#ifndef portCLEAR_INTERRUPT_MASK_FROM_ISR + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) ( void ) uxSavedStatusValue +#endif + +#ifndef portCLEAN_UP_TCB + #define portCLEAN_UP_TCB( pxTCB ) ( void ) pxTCB +#endif + +#ifndef portPRE_TASK_DELETE_HOOK + #define portPRE_TASK_DELETE_HOOK( pvTaskToDelete, pxYieldPending ) +#endif + +#ifndef portSETUP_TCB + #define portSETUP_TCB( pxTCB ) ( void ) pxTCB +#endif + +#ifndef configQUEUE_REGISTRY_SIZE + #define configQUEUE_REGISTRY_SIZE 0U +#endif + +#if ( configQUEUE_REGISTRY_SIZE < 1 ) + #define vQueueAddToRegistry( xQueue, pcName ) + #define vQueueUnregisterQueue( xQueue ) + #define pcQueueGetName( xQueue ) +#endif + +#ifndef portPOINTER_SIZE_TYPE + #define portPOINTER_SIZE_TYPE uint32_t +#endif + +/* Remove any unused trace macros. */ +#ifndef traceSTART + /* Used to perform any necessary initialisation - for example, open a file + into which trace is to be written. */ + #define traceSTART() +#endif + +#ifndef traceEND + /* Use to close a trace, for example close a file into which trace has been + written. */ + #define traceEND() +#endif + +#ifndef traceTASK_SWITCHED_IN + /* Called after a task has been selected to run. pxCurrentTCB holds a pointer + to the task control block of the selected task. */ + #define traceTASK_SWITCHED_IN() +#endif + +#ifndef traceINCREASE_TICK_COUNT + /* Called before stepping the tick count after waking from tickless idle + sleep. */ + #define traceINCREASE_TICK_COUNT( x ) +#endif + +#ifndef traceLOW_POWER_IDLE_BEGIN + /* Called immediately before entering tickless idle. */ + #define traceLOW_POWER_IDLE_BEGIN() +#endif + +#ifndef traceLOW_POWER_IDLE_END + /* Called when returning to the Idle task after a tickless idle. */ + #define traceLOW_POWER_IDLE_END() +#endif + +#ifndef traceTASK_SWITCHED_OUT + /* Called before a task has been selected to run. pxCurrentTCB holds a pointer + to the task control block of the task being switched out. */ + #define traceTASK_SWITCHED_OUT() +#endif + +#ifndef traceTASK_PRIORITY_INHERIT + /* Called when a task attempts to take a mutex that is already held by a + lower priority task. pxTCBOfMutexHolder is a pointer to the TCB of the task + that holds the mutex. uxInheritedPriority is the priority the mutex holder + will inherit (the priority of the task that is attempting to obtain the + muted. */ + #define traceTASK_PRIORITY_INHERIT( pxTCBOfMutexHolder, uxInheritedPriority ) +#endif + +#ifndef traceTASK_PRIORITY_DISINHERIT + /* Called when a task releases a mutex, the holding of which had resulted in + the task inheriting the priority of a higher priority task. + pxTCBOfMutexHolder is a pointer to the TCB of the task that is releasing the + mutex. uxOriginalPriority is the task's configured (base) priority. */ + #define traceTASK_PRIORITY_DISINHERIT( pxTCBOfMutexHolder, uxOriginalPriority ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_RECEIVE + /* Task is about to block because it cannot read from a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the read was attempted. pxCurrentTCB points to the TCB of the + task that attempted the read. */ + #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_PEEK + /* Task is about to block because it cannot read from a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the read was attempted. pxCurrentTCB points to the TCB of the + task that attempted the read. */ + #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_SEND + /* Task is about to block because it cannot write to a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the write was attempted. pxCurrentTCB points to the TCB of the + task that attempted the write. */ + #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) +#endif + +#ifndef configCHECK_FOR_STACK_OVERFLOW + #define configCHECK_FOR_STACK_OVERFLOW 0 +#endif + +#ifndef configRECORD_STACK_HIGH_ADDRESS + #define configRECORD_STACK_HIGH_ADDRESS 0 +#endif + +#ifndef configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H + #define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 0 +#endif + +/* The following event macros are embedded in the kernel API calls. */ + +#ifndef traceMOVED_TASK_TO_READY_STATE + #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) +#endif + +#ifndef tracePOST_MOVED_TASK_TO_READY_STATE + #define tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +#endif + +#ifndef traceQUEUE_CREATE + #define traceQUEUE_CREATE( pxNewQueue ) +#endif + +#ifndef traceQUEUE_CREATE_FAILED + #define traceQUEUE_CREATE_FAILED( ucQueueType ) +#endif + +#ifndef traceCREATE_MUTEX + #define traceCREATE_MUTEX( pxNewQueue ) +#endif + +#ifndef traceCREATE_MUTEX_FAILED + #define traceCREATE_MUTEX_FAILED() +#endif + +#ifndef traceGIVE_MUTEX_RECURSIVE + #define traceGIVE_MUTEX_RECURSIVE( pxMutex ) +#endif + +#ifndef traceGIVE_MUTEX_RECURSIVE_FAILED + #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) +#endif + +#ifndef traceTAKE_MUTEX_RECURSIVE + #define traceTAKE_MUTEX_RECURSIVE( pxMutex ) +#endif + +#ifndef traceTAKE_MUTEX_RECURSIVE_FAILED + #define traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ) +#endif + +#ifndef traceCREATE_COUNTING_SEMAPHORE + #define traceCREATE_COUNTING_SEMAPHORE() +#endif + +#ifndef traceCREATE_COUNTING_SEMAPHORE_FAILED + #define traceCREATE_COUNTING_SEMAPHORE_FAILED() +#endif + +#ifndef traceQUEUE_SEND + #define traceQUEUE_SEND( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FAILED + #define traceQUEUE_SEND_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE + #define traceQUEUE_RECEIVE( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK + #define traceQUEUE_PEEK( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FAILED + #define traceQUEUE_PEEK_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FROM_ISR + #define traceQUEUE_PEEK_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FAILED + #define traceQUEUE_RECEIVE_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FROM_ISR + #define traceQUEUE_SEND_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FROM_ISR_FAILED + #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FROM_ISR + #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FROM_ISR_FAILED + #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FROM_ISR_FAILED + #define traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_DELETE + #define traceQUEUE_DELETE( pxQueue ) +#endif + +#ifndef traceTASK_CREATE + #define traceTASK_CREATE( pxNewTCB ) +#endif + +#ifndef traceTASK_CREATE_FAILED + #define traceTASK_CREATE_FAILED() +#endif + +#ifndef traceTASK_DELETE + #define traceTASK_DELETE( pxTaskToDelete ) +#endif + +#ifndef traceTASK_DELAY_UNTIL + #define traceTASK_DELAY_UNTIL( x ) +#endif + +#ifndef traceTASK_DELAY + #define traceTASK_DELAY() +#endif + +#ifndef traceTASK_PRIORITY_SET + #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) +#endif + +#ifndef traceTASK_SUSPEND + #define traceTASK_SUSPEND( pxTaskToSuspend ) +#endif + +#ifndef traceTASK_RESUME + #define traceTASK_RESUME( pxTaskToResume ) +#endif + +#ifndef traceTASK_RESUME_FROM_ISR + #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) +#endif + +#ifndef traceTASK_INCREMENT_TICK + #define traceTASK_INCREMENT_TICK( xTickCount ) +#endif + +#ifndef traceTIMER_CREATE + #define traceTIMER_CREATE( pxNewTimer ) +#endif + +#ifndef traceTIMER_CREATE_FAILED + #define traceTIMER_CREATE_FAILED() +#endif + +#ifndef traceTIMER_COMMAND_SEND + #define traceTIMER_COMMAND_SEND( xTimer, xMessageID, xMessageValueValue, xReturn ) +#endif + +#ifndef traceTIMER_EXPIRED + #define traceTIMER_EXPIRED( pxTimer ) +#endif + +#ifndef traceTIMER_COMMAND_RECEIVED + #define traceTIMER_COMMAND_RECEIVED( pxTimer, xMessageID, xMessageValue ) +#endif + +#ifndef traceMALLOC + #define traceMALLOC( pvAddress, uiSize ) +#endif + +#ifndef traceFREE + #define traceFREE( pvAddress, uiSize ) +#endif + +#ifndef traceEVENT_GROUP_CREATE + #define traceEVENT_GROUP_CREATE( xEventGroup ) +#endif + +#ifndef traceEVENT_GROUP_CREATE_FAILED + #define traceEVENT_GROUP_CREATE_FAILED() +#endif + +#ifndef traceEVENT_GROUP_SYNC_BLOCK + #define traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor ) +#endif + +#ifndef traceEVENT_GROUP_SYNC_END + #define traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred +#endif + +#ifndef traceEVENT_GROUP_WAIT_BITS_BLOCK + #define traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ) +#endif + +#ifndef traceEVENT_GROUP_WAIT_BITS_END + #define traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred +#endif + +#ifndef traceEVENT_GROUP_CLEAR_BITS + #define traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ) +#endif + +#ifndef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR + #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear ) +#endif + +#ifndef traceEVENT_GROUP_SET_BITS + #define traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ) +#endif + +#ifndef traceEVENT_GROUP_SET_BITS_FROM_ISR + #define traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet ) +#endif + +#ifndef traceEVENT_GROUP_DELETE + #define traceEVENT_GROUP_DELETE( xEventGroup ) +#endif + +#ifndef tracePEND_FUNC_CALL + #define tracePEND_FUNC_CALL(xFunctionToPend, pvParameter1, ulParameter2, ret) +#endif + +#ifndef tracePEND_FUNC_CALL_FROM_ISR + #define tracePEND_FUNC_CALL_FROM_ISR(xFunctionToPend, pvParameter1, ulParameter2, ret) +#endif + +#ifndef traceQUEUE_REGISTRY_ADD + #define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName) +#endif + +#ifndef traceTASK_NOTIFY_TAKE_BLOCK + #define traceTASK_NOTIFY_TAKE_BLOCK() +#endif + +#ifndef traceTASK_NOTIFY_TAKE + #define traceTASK_NOTIFY_TAKE() +#endif + +#ifndef traceTASK_NOTIFY_WAIT_BLOCK + #define traceTASK_NOTIFY_WAIT_BLOCK() +#endif + +#ifndef traceTASK_NOTIFY_WAIT + #define traceTASK_NOTIFY_WAIT() +#endif + +#ifndef traceTASK_NOTIFY + #define traceTASK_NOTIFY() +#endif + +#ifndef traceTASK_NOTIFY_FROM_ISR + #define traceTASK_NOTIFY_FROM_ISR() +#endif + +#ifndef traceTASK_NOTIFY_GIVE_FROM_ISR + #define traceTASK_NOTIFY_GIVE_FROM_ISR() +#endif + +#ifndef traceSTREAM_BUFFER_CREATE_FAILED + #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_CREATE_STATIC_FAILED + #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_CREATE + #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_DELETE + #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RESET + #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) +#endif + +#ifndef traceBLOCKING_ON_STREAM_BUFFER_SEND + #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND + #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xBytesSent ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND_FAILED + #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND_FROM_ISR + #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xBytesSent ) +#endif + +#ifndef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE + #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE + #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE_FAILED + #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE_FROM_ISR + #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) +#endif + +#ifndef configGENERATE_RUN_TIME_STATS + #define configGENERATE_RUN_TIME_STATS 0 +#endif + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + #ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS + #error If configGENERATE_RUN_TIME_STATS is defined then portCONFIGURE_TIMER_FOR_RUN_TIME_STATS must also be defined. portCONFIGURE_TIMER_FOR_RUN_TIME_STATS should call a port layer function to setup a peripheral timer/counter that can then be used as the run time counter time base. + #endif /* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS */ + + #ifndef portGET_RUN_TIME_COUNTER_VALUE + #ifndef portALT_GET_RUN_TIME_COUNTER_VALUE + #error If configGENERATE_RUN_TIME_STATS is defined then either portGET_RUN_TIME_COUNTER_VALUE or portALT_GET_RUN_TIME_COUNTER_VALUE must also be defined. See the examples provided and the FreeRTOS web site for more information. + #endif /* portALT_GET_RUN_TIME_COUNTER_VALUE */ + #endif /* portGET_RUN_TIME_COUNTER_VALUE */ + +#endif /* configGENERATE_RUN_TIME_STATS */ + +#ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS + #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() +#endif + +#ifndef configUSE_MALLOC_FAILED_HOOK + #define configUSE_MALLOC_FAILED_HOOK 0 +#endif + +#ifndef portPRIVILEGE_BIT + #define portPRIVILEGE_BIT ( ( UBaseType_t ) 0x00 ) +#endif + +#ifndef portYIELD_WITHIN_API + #define portYIELD_WITHIN_API portYIELD +#endif + +#ifndef portSUPPRESS_TICKS_AND_SLEEP + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) +#endif + +#ifndef configEXPECTED_IDLE_TIME_BEFORE_SLEEP + #define configEXPECTED_IDLE_TIME_BEFORE_SLEEP 2 +#endif + +#if configEXPECTED_IDLE_TIME_BEFORE_SLEEP < 2 + #error configEXPECTED_IDLE_TIME_BEFORE_SLEEP must not be less than 2 +#endif + +#ifndef configUSE_TICKLESS_IDLE + #define configUSE_TICKLESS_IDLE 0 +#endif + +#ifndef configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING + #define configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( x ) +#endif + +#ifndef configPRE_SLEEP_PROCESSING + #define configPRE_SLEEP_PROCESSING( x ) +#endif + +#ifndef configPOST_SLEEP_PROCESSING + #define configPOST_SLEEP_PROCESSING( x ) +#endif + +#ifndef configUSE_QUEUE_SETS + #define configUSE_QUEUE_SETS 0 +#endif + +#ifndef portTASK_USES_FLOATING_POINT + #define portTASK_USES_FLOATING_POINT() +#endif + +#ifndef portALLOCATE_SECURE_CONTEXT + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) +#endif + +#ifndef portDONT_DISCARD + #define portDONT_DISCARD +#endif + +#ifndef configUSE_TIME_SLICING + #define configUSE_TIME_SLICING 1 +#endif + +#ifndef configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS + #define configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS 0 +#endif + +#ifndef configUSE_STATS_FORMATTING_FUNCTIONS + #define configUSE_STATS_FORMATTING_FUNCTIONS 0 +#endif + +#ifndef portASSERT_IF_INTERRUPT_PRIORITY_INVALID + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() +#endif + +#ifndef configUSE_TRACE_FACILITY + #define configUSE_TRACE_FACILITY 0 +#endif + +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +#ifndef mtCOVERAGE_TEST_DELAY + #define mtCOVERAGE_TEST_DELAY() +#endif + +#ifndef portASSERT_IF_IN_ISR + #define portASSERT_IF_IN_ISR() +#endif + +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 +#endif + +#ifndef configAPPLICATION_ALLOCATED_HEAP + #define configAPPLICATION_ALLOCATED_HEAP 0 +#endif + +#ifndef configUSE_TASK_NOTIFICATIONS + #define configUSE_TASK_NOTIFICATIONS 1 +#endif + +#ifndef configUSE_POSIX_ERRNO + #define configUSE_POSIX_ERRNO 0 +#endif + +#ifndef portTICK_TYPE_IS_ATOMIC + #define portTICK_TYPE_IS_ATOMIC 0 +#endif + +#ifndef configSUPPORT_STATIC_ALLOCATION + /* Defaults to 0 for backward compatibility. */ + #define configSUPPORT_STATIC_ALLOCATION 0 +#endif + +#ifndef configSUPPORT_DYNAMIC_ALLOCATION + /* Defaults to 1 for backward compatibility. */ + #define configSUPPORT_DYNAMIC_ALLOCATION 1 +#endif + +#ifndef configSTACK_DEPTH_TYPE + /* Defaults to uint16_t for backward compatibility, but can be overridden + in FreeRTOSConfig.h if uint16_t is too restrictive. */ + #define configSTACK_DEPTH_TYPE uint16_t +#endif + +#ifndef configMESSAGE_BUFFER_LENGTH_TYPE + /* Defaults to size_t for backward compatibility, but can be overridden + in FreeRTOSConfig.h if lengths will always be less than the number of bytes + in a size_t. */ + #define configMESSAGE_BUFFER_LENGTH_TYPE size_t +#endif + +/* Sanity check the configuration. */ +#if( configUSE_TICKLESS_IDLE != 0 ) + #if( INCLUDE_vTaskSuspend != 1 ) + #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0 + #endif /* INCLUDE_vTaskSuspend */ +#endif /* configUSE_TICKLESS_IDLE */ + +#if( ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) ) + #error configSUPPORT_STATIC_ALLOCATION and configSUPPORT_DYNAMIC_ALLOCATION cannot both be 0, but can both be 1. +#endif + +#if( ( configUSE_RECURSIVE_MUTEXES == 1 ) && ( configUSE_MUTEXES != 1 ) ) + #error configUSE_MUTEXES must be set to 1 to use recursive mutexes +#endif + +#ifndef configINITIAL_TICK_COUNT + #define configINITIAL_TICK_COUNT 0 +#endif + +#if( portTICK_TYPE_IS_ATOMIC == 0 ) + /* Either variables of tick type cannot be read atomically, or + portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when + the tick count is returned to the standard critical section macros. */ + #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() + #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) ) +#else + /* The tick type can be read atomically, so critical sections used when the + tick count is returned can be defined away. */ + #define portTICK_TYPE_ENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() + #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() 0 + #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) ( void ) x +#endif + +/* Definitions to allow backward compatibility with FreeRTOS versions prior to +V8 if desired. */ +#ifndef configENABLE_BACKWARD_COMPATIBILITY + #define configENABLE_BACKWARD_COMPATIBILITY 1 +#endif + +#ifndef configPRINTF + /* configPRINTF() was not defined, so define it away to nothing. To use + configPRINTF() then define it as follows (where MyPrintFunction() is + provided by the application writer): + + void MyPrintFunction(const char *pcFormat, ... ); + #define configPRINTF( X ) MyPrintFunction X + + Then call like a standard printf() function, but placing brackets around + all parameters so they are passed as a single parameter. For example: + configPRINTF( ("Value = %d", MyVariable) ); */ + #define configPRINTF( X ) +#endif + +#ifndef configMAX + /* The application writer has not provided their own MAX macro, so define + the following generic implementation. */ + #define configMAX( a, b ) ( ( ( a ) > ( b ) ) ? ( a ) : ( b ) ) +#endif + +#ifndef configMIN + /* The application writer has not provided their own MAX macro, so define + the following generic implementation. */ + #define configMIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) ) +#endif + +#if configENABLE_BACKWARD_COMPATIBILITY == 1 + #define eTaskStateGet eTaskGetState + #define portTickType TickType_t + #define xTaskHandle TaskHandle_t + #define xQueueHandle QueueHandle_t + #define xSemaphoreHandle SemaphoreHandle_t + #define xQueueSetHandle QueueSetHandle_t + #define xQueueSetMemberHandle QueueSetMemberHandle_t + #define xTimeOutType TimeOut_t + #define xMemoryRegion MemoryRegion_t + #define xTaskParameters TaskParameters_t + #define xTaskStatusType TaskStatus_t + #define xTimerHandle TimerHandle_t + #define xCoRoutineHandle CoRoutineHandle_t + #define pdTASK_HOOK_CODE TaskHookFunction_t + #define portTICK_RATE_MS portTICK_PERIOD_MS + #define pcTaskGetTaskName pcTaskGetName + #define pcTimerGetTimerName pcTimerGetName + #define pcQueueGetQueueName pcQueueGetName + #define vTaskGetTaskInfo vTaskGetInfo + + /* Backward compatibility within the scheduler code only - these definitions + are not really required but are included for completeness. */ + #define tmrTIMER_CALLBACK TimerCallbackFunction_t + #define pdTASK_CODE TaskFunction_t + #define xListItem ListItem_t + #define xList List_t + + /* For libraries that break the list data hiding, and access list structure + members directly (which is not supposed to be done). */ + #define pxContainer pvContainer +#endif /* configENABLE_BACKWARD_COMPATIBILITY */ + +#if( configUSE_ALTERNATIVE_API != 0 ) + #error The alternative API was deprecated some time ago, and was removed in FreeRTOS V9.0 0 +#endif + +/* Set configUSE_TASK_FPU_SUPPORT to 0 to omit floating point support even +if floating point hardware is otherwise supported by the FreeRTOS port in use. +This constant is not supported by all FreeRTOS ports that include floating +point support. */ +#ifndef configUSE_TASK_FPU_SUPPORT + #define configUSE_TASK_FPU_SUPPORT 1 +#endif + +/* Set configENABLE_MPU to 1 to enable MPU support and 0 to disable it. This is +currently used in ARMv8M ports. */ +#ifndef configENABLE_MPU + #define configENABLE_MPU 0 +#endif + +/* Set configENABLE_FPU to 1 to enable FPU support and 0 to disable it. This is +currently used in ARMv8M ports. */ +#ifndef configENABLE_FPU + #define configENABLE_FPU 1 +#endif + +/* Set configENABLE_TRUSTZONE to 1 enable TrustZone support and 0 to disable it. +This is currently used in ARMv8M ports. */ +#ifndef configENABLE_TRUSTZONE + #define configENABLE_TRUSTZONE 1 +#endif + +/* Set configRUN_FREERTOS_SECURE_ONLY to 1 to run the FreeRTOS ARMv8M port on +the Secure Side only. */ +#ifndef configRUN_FREERTOS_SECURE_ONLY + #define configRUN_FREERTOS_SECURE_ONLY 0 +#endif + +/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using + * dynamically allocated RAM, in which case when any task is deleted it is known + * that both the task's stack and TCB need to be freed. Sometimes the + * FreeRTOSConfig.h settings only allow a task to be created using statically + * allocated RAM, in which case when any task is deleted it is known that neither + * the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h + * settings allow a task to be created using either statically or dynamically + * allocated RAM, in which case a member of the TCB is used to record whether the + * stack and/or TCB were allocated statically or dynamically, so when a task is + * deleted the RAM that was allocated dynamically is freed again and no attempt is + * made to free the RAM that was allocated statically. + * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a + * task to be created using either statically or dynamically allocated RAM. Note + * that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with + * a statically allocated stack and a dynamically allocated TCB. + * + * The following table lists various combinations of portUSING_MPU_WRAPPERS, + * configSUPPORT_DYNAMIC_ALLOCATION and configSUPPORT_STATIC_ALLOCATION and + * when it is possible to have both static and dynamic allocation: + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + * | MPU | Dynamic | Static | Available Functions | Possible Allocations | Both Dynamic and | Need Free | + * | | | | | | Static Possible | | + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + * | 0 | 0 | 1 | xTaskCreateStatic | TCB - Static, Stack - Static | No | No | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 0 | 1 | 0 | xTaskCreate | TCB - Dynamic, Stack - Dynamic | No | Yes | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 0 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateStatic | 2. TCB - Static, Stack - Static | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 0 | 1 | xTaskCreateStatic, | TCB - Static, Stack - Static | No | No | + * | | | | xTaskCreateRestrictedStatic | | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 1 | 0 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateRestricted | 2. TCB - Dynamic, Stack - Static | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateStatic, | 2. TCB - Dynamic, Stack - Static | | | + * | | | | xTaskCreateRestricted, | 3. TCB - Static, Stack - Static | | | + * | | | | xTaskCreateRestrictedStatic | | | | + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + */ +#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( portUSING_MPU_WRAPPERS == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) || \ + ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) ) + +/* + * In line with software engineering best practice, FreeRTOS implements a strict + * data hiding policy, so the real structures used by FreeRTOS to maintain the + * state of tasks, queues, semaphores, etc. are not accessible to the application + * code. However, if the application writer wants to statically allocate such + * an object then the size of the object needs to be know. Dummy structures + * that are guaranteed to have the same size and alignment requirements of the + * real objects are used for this purpose. The dummy list and list item + * structures below are used for inclusion in such a dummy structure. + */ +struct xSTATIC_LIST_ITEM +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + TickType_t xDummy2; + void *pvDummy3[ 4 ]; + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy4; + #endif +}; +typedef struct xSTATIC_LIST_ITEM StaticListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +struct xSTATIC_MINI_LIST_ITEM +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + TickType_t xDummy2; + void *pvDummy3[ 2 ]; +}; +typedef struct xSTATIC_MINI_LIST_ITEM StaticMiniListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +typedef struct xSTATIC_LIST +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + UBaseType_t uxDummy2; + void *pvDummy3; + StaticMiniListItem_t xDummy4; + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy5; + #endif +} StaticList_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Task structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a task then + * the size of the task object needs to be know. The StaticTask_t structure + * below is provided for this purpose. Its sizes and alignment requirements are + * guaranteed to match those of the genuine structure, no matter which + * architecture is being used, and no matter how the values in FreeRTOSConfig.h + * are set. Its contents are somewhat obfuscated in the hope users will + * recognise that it would be unwise to make direct use of the structure members. + */ +typedef struct xSTATIC_TCB +{ + void *pxDummy1; + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xDummy2; + #endif + StaticListItem_t xDummy3[ 2 ]; + UBaseType_t uxDummy5; + void *pxDummy6; + uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + void *pxDummy8; + #endif + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxDummy9; + #endif + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy10[ 2 ]; + #endif + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxDummy12[ 2 ]; + #endif + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + void *pxDummy14; + #endif + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvDummy15[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulDummy16; + #endif + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + struct _reent xDummy17; + #endif + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + uint32_t ulDummy18; + uint8_t ucDummy19; + #endif + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + uint8_t uxDummy20; + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDummy21; + #endif + #if ( configUSE_POSIX_ERRNO == 1 ) + int iDummy22; + #endif +} StaticTask_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Queue structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a queue + * then the size of the queue object needs to be know. The StaticQueue_t + * structure below is provided for this purpose. Its sizes and alignment + * requirements are guaranteed to match those of the genuine structure, no + * matter which architecture is being used, and no matter how the values in + * FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in the hope + * users will recognise that it would be unwise to make direct use of the + * structure members. + */ +typedef struct xSTATIC_QUEUE +{ + void *pvDummy1[ 3 ]; + + union + { + void *pvDummy2; + UBaseType_t uxDummy2; + } u; + + StaticList_t xDummy3[ 2 ]; + UBaseType_t uxDummy4[ 3 ]; + uint8_t ucDummy5[ 2 ]; + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucDummy6; + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + void *pvDummy7; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy8; + uint8_t ucDummy9; + #endif + +} StaticQueue_t; +typedef StaticQueue_t StaticSemaphore_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the event group structure used + * internally by FreeRTOS is not accessible to application code. However, if + * the application writer wants to statically allocate the memory required to + * create an event group then the size of the event group object needs to be + * know. The StaticEventGroup_t structure below is provided for this purpose. + * Its sizes and alignment requirements are guaranteed to match those of the + * genuine structure, no matter which architecture is being used, and no matter + * how the values in FreeRTOSConfig.h are set. Its contents are somewhat + * obfuscated in the hope users will recognise that it would be unwise to make + * direct use of the structure members. + */ +typedef struct xSTATIC_EVENT_GROUP +{ + TickType_t xDummy1; + StaticList_t xDummy2; + + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy3; + #endif + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucDummy4; + #endif + +} StaticEventGroup_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the software timer structure used + * internally by FreeRTOS is not accessible to application code. However, if + * the application writer wants to statically allocate the memory required to + * create a software timer then the size of the queue object needs to be know. + * The StaticTimer_t structure below is provided for this purpose. Its sizes + * and alignment requirements are guaranteed to match those of the genuine + * structure, no matter which architecture is being used, and no matter how the + * values in FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in + * the hope users will recognise that it would be unwise to make direct use of + * the structure members. + */ +typedef struct xSTATIC_TIMER +{ + void *pvDummy1; + StaticListItem_t xDummy2; + TickType_t xDummy3; + void *pvDummy5; + TaskFunction_t pvDummy6; + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy7; + #endif + uint8_t ucDummy8; + +} StaticTimer_t; + +/* +* In line with software engineering best practice, especially when supplying a +* library that is likely to change in future versions, FreeRTOS implements a +* strict data hiding policy. This means the stream buffer structure used +* internally by FreeRTOS is not accessible to application code. However, if +* the application writer wants to statically allocate the memory required to +* create a stream buffer then the size of the stream buffer object needs to be +* know. The StaticStreamBuffer_t structure below is provided for this purpose. +* Its size and alignment requirements are guaranteed to match those of the +* genuine structure, no matter which architecture is being used, and no matter +* how the values in FreeRTOSConfig.h are set. Its contents are somewhat +* obfuscated in the hope users will recognise that it would be unwise to make +* direct use of the structure members. +*/ +typedef struct xSTATIC_STREAM_BUFFER +{ + size_t uxDummy1[ 4 ]; + void * pvDummy2[ 3 ]; + uint8_t ucDummy3; + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy4; + #endif +} StaticStreamBuffer_t; + +/* Message buffers are built on stream buffers. */ +typedef StaticStreamBuffer_t StaticMessageBuffer_t; + +#ifdef __cplusplus +} +#endif + +#endif /* INC_FREERTOS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h new file mode 100644 index 000000000..3ed8b22d1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h @@ -0,0 +1,133 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +#ifndef _MSC_VER /* Visual Studio doesn't support #warning. */ + #warning The name of this file has changed to stack_macros.h. Please update your code accordingly. This source file (which has the original name) will be removed in future released. +#endif + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h new file mode 100644 index 000000000..8b3b41b90 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h @@ -0,0 +1,720 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef CO_ROUTINE_H +#define CO_ROUTINE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include croutine.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Used to hide the implementation of the co-routine control block. The +control block structure however has to be included in the header due to +the macro implementation of the co-routine functionality. */ +typedef void * CoRoutineHandle_t; + +/* Defines the prototype to which co-routine functions must conform. */ +typedef void (*crCOROUTINE_CODE)( CoRoutineHandle_t, UBaseType_t ); + +typedef struct corCoRoutineControlBlock +{ + crCOROUTINE_CODE pxCoRoutineFunction; + ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /*< Used internally by the co-routine implementation. */ +} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ + +/** + * croutine. h + *
+ BaseType_t xCoRoutineCreate(
+                                 crCOROUTINE_CODE pxCoRoutineCode,
+                                 UBaseType_t uxPriority,
+                                 UBaseType_t uxIndex
+                               );
+ * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: +
+ // Co-routine to be created.
+ void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ // This may not be necessary for const variables.
+ static const char cLedToFlash[ 2 ] = { 5, 6 };
+ static const TickType_t uxFlashRates[ 2 ] = { 200, 400 };
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // This co-routine just delays for a fixed period, then toggles
+         // an LED.  Two co-routines are created using this function, so
+         // the uxIndex parameter is used to tell the co-routine which
+         // LED to flash and how int32_t to delay.  This assumes xQueue has
+         // already been created.
+         vParTestToggleLED( cLedToFlash[ uxIndex ] );
+         crDELAY( xHandle, uxFlashRates[ uxIndex ] );
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+
+ // Function that creates two co-routines.
+ void vOtherFunction( void )
+ {
+ uint8_t ucParameterToPass;
+ TaskHandle_t xHandle;
+
+     // Create two co-routines at priority 0.  The first is given index 0
+     // so (from the code above) toggles LED 5 every 200 ticks.  The second
+     // is given index 1 so toggles LED 6 every 400 ticks.
+     for( uxIndex = 0; uxIndex < 2; uxIndex++ )
+     {
+         xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex );
+     }
+ }
+   
+ * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ); + + +/** + * croutine. h + *
+ void vCoRoutineSchedule( void );
+ * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: +
+ // This idle task hook will schedule a co-routine each time it is called.
+ // The rest of the idle task will execute between co-routine calls.
+ void vApplicationIdleHook( void )
+ {
+	vCoRoutineSchedule();
+ }
+
+ // Alternatively, if you do not require any other part of the idle task to
+ // execute, the idle task hook can call vCoRoutineScheduler() within an
+ // infinite loop.
+ void vApplicationIdleHook( void )
+ {
+    for( ;; )
+    {
+        vCoRoutineSchedule();
+    }
+ }
+ 
+ * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + *
+ crSTART( CoRoutineHandle_t xHandle );
+ * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static int32_t ulAVariable;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+          // Co-routine functionality goes here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) switch( ( ( CRCB_t * )( pxCRCB ) )->uxState ) { case 0: + +/** + * croutine. h + *
+ crEND();
+ * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static int32_t ulAVariable;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+          // Co-routine functionality goes here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = (__LINE__ * 2); return; case (__LINE__ * 2): +#define crSET_STATE1( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = ((__LINE__ * 2)+1); return; case ((__LINE__ * 2)+1): + +/** + * croutine. h + *
+ crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay );
+ * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ // This may not be necessary for const variables.
+ // We are to delay for 200ms.
+ static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+        // Delay for 200ms.
+        crDELAY( xHandle, xDelayTime );
+
+        // Do something here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); + +/** + *
+ crQUEUE_SEND(
+                  CoRoutineHandle_t xHandle,
+                  QueueHandle_t pxQueue,
+                  void *pvItemToQueue,
+                  TickType_t xTicksToWait,
+                  BaseType_t *pxResult
+             )
+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: +
+ // Co-routine function that blocks for a fixed period then posts a number onto
+ // a queue.
+ static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static BaseType_t xNumberToPost = 0;
+ static BaseType_t xResult;
+
+    // Co-routines must begin with a call to crSTART().
+    crSTART( xHandle );
+
+    for( ;; )
+    {
+        // This assumes the queue has already been created.
+        crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult );
+
+        if( xResult != pdPASS )
+        {
+            // The message was not posted!
+        }
+
+        // Increment the number to be posted onto the queue.
+        xNumberToPost++;
+
+        // Delay for 100 ticks.
+        crDELAY( xHandle, 100 );
+    }
+
+    // Co-routines must end with a call to crEND().
+    crEND();
+ }
+ * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRSend( ( pxQueue) , ( pvItemToQueue) , ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+  crQUEUE_RECEIVE(
+                     CoRoutineHandle_t xHandle,
+                     QueueHandle_t pxQueue,
+                     void *pvBuffer,
+                     TickType_t xTicksToWait,
+                     BaseType_t *pxResult
+                 )
+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: +
+ // A co-routine receives the number of an LED to flash from a queue.  It
+ // blocks on the queue until the number is received.
+ static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static BaseType_t xResult;
+ static UBaseType_t uxLEDToFlash;
+
+    // All co-routines must start with a call to crSTART().
+    crSTART( xHandle );
+
+    for( ;; )
+    {
+        // Wait for data to become available on the queue.
+        crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
+
+        if( xResult == pdPASS )
+        {
+            // We received the LED to flash - flash it!
+            vParTestToggleLED( uxLEDToFlash );
+        }
+    }
+
+    crEND();
+ }
+ * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+  crQUEUE_SEND_FROM_ISR(
+                            QueueHandle_t pxQueue,
+                            void *pvItemToQueue,
+                            BaseType_t xCoRoutinePreviouslyWoken
+                       )
+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: +
+ // A co-routine that blocks on a queue waiting for characters to be received.
+ static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ char cRxedChar;
+ BaseType_t xResult;
+
+     // All co-routines must start with a call to crSTART().
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // Wait for data to become available on the queue.  This assumes the
+         // queue xCommsRxQueue has already been created!
+         crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
+
+         // Was a character received?
+         if( xResult == pdPASS )
+         {
+             // Process the character here.
+         }
+     }
+
+     // All co-routines must end with a call to crEND().
+     crEND();
+ }
+
+ // An ISR that uses a queue to send characters received on a serial port to
+ // a co-routine.
+ void vUART_ISR( void )
+ {
+ char cRxedChar;
+ BaseType_t xCRWokenByPost = pdFALSE;
+
+     // We loop around reading characters until there are none left in the UART.
+     while( UART_RX_REG_NOT_EMPTY() )
+     {
+         // Obtain the character from the UART.
+         cRxedChar = UART_RX_REG;
+
+         // Post the character onto a queue.  xCRWokenByPost will be pdFALSE
+         // the first time around the loop.  If the post causes a co-routine
+         // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE.
+         // In this manner we can ensure that if more than one co-routine is
+         // blocked on the queue only one is woken by this ISR no matter how
+         // many characters are posted to the queue.
+         xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost );
+     }
+ }
+ * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + *
+  crQUEUE_SEND_FROM_ISR(
+                            QueueHandle_t pxQueue,
+                            void *pvBuffer,
+                            BaseType_t * pxCoRoutineWoken
+                       )
+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: +
+ // A co-routine that posts a character to a queue then blocks for a fixed
+ // period.  The character is incremented each time.
+ static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // cChar holds its value while this co-routine is blocked and must therefore
+ // be declared static.
+ static char cCharToTx = 'a';
+ BaseType_t xResult;
+
+     // All co-routines must start with a call to crSTART().
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // Send the next character to the queue.
+         crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult );
+
+         if( xResult == pdPASS )
+         {
+             // The character was successfully posted to the queue.
+         }
+		 else
+		 {
+			// Could not post the character to the queue.
+		 }
+
+         // Enable the UART Tx interrupt to cause an interrupt in this
+		 // hypothetical UART.  The interrupt will obtain the character
+		 // from the queue and send it.
+		 ENABLE_RX_INTERRUPT();
+
+		 // Increment to the next character then block for a fixed period.
+		 // cCharToTx will maintain its value across the delay as it is
+		 // declared static.
+		 cCharToTx++;
+		 if( cCharToTx > 'x' )
+		 {
+			cCharToTx = 'a';
+		 }
+		 crDELAY( 100 );
+     }
+
+     // All co-routines must end with a call to crEND().
+     crEND();
+ }
+
+ // An ISR that uses a queue to receive characters to send on a UART.
+ void vUART_ISR( void )
+ {
+ char cCharToTx;
+ BaseType_t xCRWokenByPost = pdFALSE;
+
+     while( UART_TX_REG_EMPTY() )
+     {
+         // Are there any characters in the queue waiting to be sent?
+		 // xCRWokenByPost will automatically be set to pdTRUE if a co-routine
+		 // is woken by the post - ensuring that only a single co-routine is
+		 // woken no matter how many times we go around this loop.
+         if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) )
+		 {
+			 SEND_CHARACTER( cCharToTx );
+		 }
+     }
+ }
+ * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ); + +#ifdef __cplusplus +} +#endif + +#endif /* CO_ROUTINE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h new file mode 100644 index 000000000..9cece988f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h @@ -0,0 +1,279 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef DEPRECATED_DEFINITIONS_H +#define DEPRECATED_DEFINITIONS_H + + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. The +definitions below remain in the code for backward compatibility only. New +projects should not use them. */ + +#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT + #include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT + #include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef GCC_MEGA_AVR + #include "../portable/GCC/ATMega323/portmacro.h" +#endif + +#ifdef IAR_MEGA_AVR + #include "../portable/IAR/ATMega323/portmacro.h" +#endif + +#ifdef MPLAB_PIC24_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_DSPIC_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_PIC18F_PORT + #include "../../Source/portable/MPLAB/PIC18F/portmacro.h" +#endif + +#ifdef MPLAB_PIC32MX_PORT + #include "../../Source/portable/MPLAB/PIC32MX/portmacro.h" +#endif + +#ifdef _FEDPICC + #include "libFreeRTOS/Include/portmacro.h" +#endif + +#ifdef SDCC_CYGNAL + #include "../../Source/portable/SDCC/Cygnal/portmacro.h" +#endif + +#ifdef GCC_ARM7 + #include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h" +#endif + +#ifdef GCC_ARM7_ECLIPSE + #include "portmacro.h" +#endif + +#ifdef ROWLEY_LPC23xx + #include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h" +#endif + +#ifdef IAR_MSP430 + #include "..\..\Source\portable\IAR\MSP430\portmacro.h" +#endif + +#ifdef GCC_MSP430 + #include "../../Source/portable/GCC/MSP430F449/portmacro.h" +#endif + +#ifdef ROWLEY_MSP430 + #include "../../Source/portable/Rowley/MSP430F449/portmacro.h" +#endif + +#ifdef ARM7_LPC21xx_KEIL_RVDS + #include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h" +#endif + +#ifdef SAM7_GCC + #include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h" +#endif + +#ifdef SAM7_IAR + #include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h" +#endif + +#ifdef SAM9XE_IAR + #include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h" +#endif + +#ifdef LPC2000_IAR + #include "..\..\Source\portable\IAR\LPC2000\portmacro.h" +#endif + +#ifdef STR71X_IAR + #include "..\..\Source\portable\IAR\STR71x\portmacro.h" +#endif + +#ifdef STR75X_IAR + #include "..\..\Source\portable\IAR\STR75x\portmacro.h" +#endif + +#ifdef STR75X_GCC + #include "..\..\Source\portable\GCC\STR75x\portmacro.h" +#endif + +#ifdef STR91X_IAR + #include "..\..\Source\portable\IAR\STR91x\portmacro.h" +#endif + +#ifdef GCC_H8S + #include "../../Source/portable/GCC/H8S2329/portmacro.h" +#endif + +#ifdef GCC_AT91FR40008 + #include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h" +#endif + +#ifdef RVDS_ARMCM3_LM3S102 + #include "../../Source/portable/RVDS/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3_LM3S102 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARM_CM3 + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARMCM3_LM + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef HCS12_CODE_WARRIOR + #include "../../Source/portable/CodeWarrior/HCS12/portmacro.h" +#endif + +#ifdef MICROBLAZE_GCC + #include "../../Source/portable/GCC/MicroBlaze/portmacro.h" +#endif + +#ifdef TERN_EE + #include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h" +#endif + +#ifdef GCC_HCS12 + #include "../../Source/portable/GCC/HCS12/portmacro.h" +#endif + +#ifdef GCC_MCF5235 + #include "../../Source/portable/GCC/MCF5235/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_GCC + #include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_CODEWARRIOR + #include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h" +#endif + +#ifdef GCC_PPC405 + #include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h" +#endif + +#ifdef GCC_PPC440 + #include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h" +#endif + +#ifdef _16FX_SOFTUNE + #include "..\..\Source\portable\Softune\MB96340\portmacro.h" +#endif + +#ifdef BCC_INDUSTRIAL_PC_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\PC\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef BCC_FLASH_LITE_186_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef __GNUC__ + #ifdef __AVR32_AVR32A__ + #include "portmacro.h" + #endif +#endif + +#ifdef __ICCAVR32__ + #ifdef __CORE__ + #if __CORE__ == __AVR32A__ + #include "portmacro.h" + #endif + #endif +#endif + +#ifdef __91467D + #include "portmacro.h" +#endif + +#ifdef __96340 + #include "portmacro.h" +#endif + + +#ifdef __IAR_V850ES_Fx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3_L__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Hx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3L__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#endif /* DEPRECATED_DEFINITIONS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h new file mode 100644 index 000000000..1f38bdb76 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h @@ -0,0 +1,757 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef EVENT_GROUPS_H +#define EVENT_GROUPS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include event_groups.h" +#endif + +/* FreeRTOS includes. */ +#include "timers.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * An event group is a collection of bits to which an application can assign a + * meaning. For example, an application may create an event group to convey + * the status of various CAN bus related events in which bit 0 might mean "A CAN + * message has been received and is ready for processing", bit 1 might mean "The + * application has queued a message that is ready for sending onto the CAN + * network", and bit 2 might mean "It is time to send a SYNC message onto the + * CAN network" etc. A task can then test the bit values to see which events + * are active, and optionally enter the Blocked state to wait for a specified + * bit or a group of specified bits to be active. To continue the CAN bus + * example, a CAN controlling task can enter the Blocked state (and therefore + * not consume any processing time) until either bit 0, bit 1 or bit 2 are + * active, at which time the bit that was actually active would inform the task + * which action it had to take (process a received message, send a message, or + * send a SYNC). + * + * The event groups implementation contains intelligence to avoid race + * conditions that would otherwise occur were an application to use a simple + * variable for the same purpose. This is particularly important with respect + * to when a bit within an event group is to be cleared, and when bits have to + * be set and then tested atomically - as is the case where event groups are + * used to create a synchronisation point between multiple tasks (a + * 'rendezvous'). + * + * \defgroup EventGroup + */ + + + +/** + * event_groups.h + * + * Type by which event groups are referenced. For example, a call to + * xEventGroupCreate() returns an EventGroupHandle_t variable that can then + * be used as a parameter to other event group functions. + * + * \defgroup EventGroupHandle_t EventGroupHandle_t + * \ingroup EventGroup + */ +struct EventGroupDef_t; +typedef struct EventGroupDef_t * EventGroupHandle_t; + +/* + * The type that holds event bits always matches TickType_t - therefore the + * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, + * 32 bits if set to 0. + * + * \defgroup EventBits_t EventBits_t + * \ingroup EventGroup + */ +typedef TickType_t EventBits_t; + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreate( void );
+ 
+ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @return If the event group was created then a handle to the event group is + * returned. If there was insufficient FreeRTOS heap available to create the + * event group then NULL is returned. See http://www.freertos.org/a00111.html + * + * Example usage: +
+	// Declare a variable to hold the created event group.
+	EventGroupHandle_t xCreatedEventGroup;
+
+	// Attempt to create the event group.
+	xCreatedEventGroup = xEventGroupCreate();
+
+	// Was the event group created successfully?
+	if( xCreatedEventGroup == NULL )
+	{
+		// The event group was not created because there was insufficient
+		// FreeRTOS heap available.
+	}
+	else
+	{
+		// The event group was created.
+	}
+   
+ * \defgroup xEventGroupCreate xEventGroupCreate + * \ingroup EventGroup + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
+ 
+ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type + * StaticEventGroup_t, which will be then be used to hold the event group's data + * structures, removing the need for the memory to be allocated dynamically. + * + * @return If the event group was created then a handle to the event group is + * returned. If pxEventGroupBuffer was NULL then NULL is returned. + * + * Example usage: +
+	// StaticEventGroup_t is a publicly accessible structure that has the same
+	// size and alignment requirements as the real event group structure.  It is
+	// provided as a mechanism for applications to know the size of the event
+	// group (which is dependent on the architecture and configuration file
+	// settings) without breaking the strict data hiding policy by exposing the
+	// real event group internals.  This StaticEventGroup_t variable is passed
+	// into the xSemaphoreCreateEventGroupStatic() function and is used to store
+	// the event group's data structures
+	StaticEventGroup_t xEventGroupBuffer;
+
+	// Create the event group without dynamically allocating any memory.
+	xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer );
+   
+ */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupWaitBits( 	EventGroupHandle_t xEventGroup,
+										const EventBits_t uxBitsToWaitFor,
+										const BaseType_t xClearOnExit,
+										const BaseType_t xWaitForAllBits,
+										const TickType_t xTicksToWait );
+ 
+ * + * [Potentially] block to wait for one or more bits to be set within a + * previously created event group. + * + * This function cannot be called from an interrupt. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and/or bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and/or bit 1 and/or bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within + * uxBitsToWaitFor that are set within the event group will be cleared before + * xEventGroupWaitBits() returns if the wait condition was met (if the function + * returns for a reason other than a timeout). If xClearOnExit is set to + * pdFALSE then the bits set in the event group are not altered when the call to + * xEventGroupWaitBits() returns. + * + * @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then + * xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor + * are set or the specified block time expires. If xWaitForAllBits is set to + * pdFALSE then xEventGroupWaitBits() will return when any one of the bits set + * in uxBitsToWaitFor is set or the specified block time expires. The block + * time is specified by the xTicksToWait parameter. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for one/all (depending on the xWaitForAllBits value) of the bits specified by + * uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupWaitBits() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupWaitBits() returned because the bits it was waiting for were set + * then the returned value is the event group value before any bits were + * automatically cleared in the case that xClearOnExit parameter was set to + * pdTRUE. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+   const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
+
+		// Wait a maximum of 100ms for either bit 0 or bit 4 to be set within
+		// the event group.  Clear the bits before exiting.
+		uxBits = xEventGroupWaitBits(
+					xEventGroup,	// The event group being tested.
+					BIT_0 | BIT_4,	// The bits within the event group to wait for.
+					pdTRUE,			// BIT_0 and BIT_4 should be cleared before returning.
+					pdFALSE,		// Don't wait for both bits, either bit will do.
+					xTicksToWait );	// Wait a maximum of 100ms for either bit to be set.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// xEventGroupWaitBits() returned because both bits were set.
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// xEventGroupWaitBits() returned because just BIT_0 was set.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// xEventGroupWaitBits() returned because just BIT_4 was set.
+		}
+		else
+		{
+			// xEventGroupWaitBits() returned because xTicksToWait ticks passed
+			// without either BIT_0 or BIT_4 becoming set.
+		}
+   }
+   
+ * \defgroup xEventGroupWaitBits xEventGroupWaitBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
+ 
+ * + * Clear bits within an event group. This function cannot be called from an + * interrupt. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear + * in the event group. For example, to clear bit 3 only, set uxBitsToClear to + * 0x08. To clear bit 3 and bit 0 set uxBitsToClear to 0x09. + * + * @return The value of the event group before the specified bits were cleared. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+
+		// Clear bit 0 and bit 4 in xEventGroup.
+		uxBits = xEventGroupClearBits(
+								xEventGroup,	// The event group being updated.
+								BIT_0 | BIT_4 );// The bits being cleared.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// Both bit 0 and bit 4 were set before xEventGroupClearBits() was
+			// called.  Both will now be clear (not set).
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// Bit 0 was set before xEventGroupClearBits() was called.  It will
+			// now be clear.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// Bit 4 was set before xEventGroupClearBits() was called.  It will
+			// now be clear.
+		}
+		else
+		{
+			// Neither bit 0 nor bit 4 were set in the first place.
+		}
+   }
+   
+ * \defgroup xEventGroupClearBits xEventGroupClearBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
+ 
+ * + * A version of xEventGroupClearBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed + * while interrupts are disabled, so protects event groups that are accessed + * from tasks by suspending the scheduler rather than disabling interrupts. As + * a result event groups cannot be accessed directly from an interrupt service + * routine. Therefore xEventGroupClearBitsFromISR() sends a message to the + * timer task to have the clear operation performed in the context of the timer + * task. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear. + * For example, to clear bit 3 only, set uxBitsToClear to 0x08. To clear bit 3 + * and bit 0 set uxBitsToClear to 0x09. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   // An event group which it is assumed has already been created by a call to
+   // xEventGroupCreate().
+   EventGroupHandle_t xEventGroup;
+
+   void anInterruptHandler( void )
+   {
+		// Clear bit 0 and bit 4 in xEventGroup.
+		xResult = xEventGroupClearBitsFromISR(
+							xEventGroup,	 // The event group being updated.
+							BIT_0 | BIT_4 ); // The bits being set.
+
+		if( xResult == pdPASS )
+		{
+			// The message was posted successfully.
+		}
+  }
+   
+ * \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ) +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
+ 
+ * + * Set bits within an event group. + * This function cannot be called from an interrupt. xEventGroupSetBitsFromISR() + * is a version that can be called from an interrupt. + * + * Setting bits in an event group will automatically unblock tasks that are + * blocked waiting for the bits. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @return The value of the event group at the time the call to + * xEventGroupSetBits() returns. There are two reasons why the returned value + * might have the bits specified by the uxBitsToSet parameter cleared. First, + * if setting a bit results in a task that was waiting for the bit leaving the + * blocked state then it is possible the bit will be cleared automatically + * (see the xClearBitOnExit parameter of xEventGroupWaitBits()). Second, any + * unblocked (or otherwise Ready state) task that has a priority above that of + * the task that called xEventGroupSetBits() will execute and may change the + * event group value before the call to xEventGroupSetBits() returns. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+
+		// Set bit 0 and bit 4 in xEventGroup.
+		uxBits = xEventGroupSetBits(
+							xEventGroup,	// The event group being updated.
+							BIT_0 | BIT_4 );// The bits being set.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// Both bit 0 and bit 4 remained set when the function returned.
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// Bit 0 remained set when the function returned, but bit 4 was
+			// cleared.  It might be that bit 4 was cleared automatically as a
+			// task that was waiting for bit 4 was removed from the Blocked
+			// state.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// Bit 4 remained set when the function returned, but bit 0 was
+			// cleared.  It might be that bit 0 was cleared automatically as a
+			// task that was waiting for bit 0 was removed from the Blocked
+			// state.
+		}
+		else
+		{
+			// Neither bit 0 nor bit 4 remained set.  It might be that a task
+			// was waiting for both of the bits to be set, and the bits were
+			// cleared as the task left the Blocked state.
+		}
+   }
+   
+ * \defgroup xEventGroupSetBits xEventGroupSetBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
+ 
+ * + * A version of xEventGroupSetBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed in + * interrupts or from critical sections. Therefore xEventGroupSetBitsFromISR() + * sends a message to the timer task to have the set operation performed in the + * context of the timer task - where a scheduler lock is used in place of a + * critical section. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task is higher than the priority of the + * currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE by + * xEventGroupSetBitsFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   // An event group which it is assumed has already been created by a call to
+   // xEventGroupCreate().
+   EventGroupHandle_t xEventGroup;
+
+   void anInterruptHandler( void )
+   {
+   BaseType_t xHigherPriorityTaskWoken, xResult;
+
+		// xHigherPriorityTaskWoken must be initialised to pdFALSE.
+		xHigherPriorityTaskWoken = pdFALSE;
+
+		// Set bit 0 and bit 4 in xEventGroup.
+		xResult = xEventGroupSetBitsFromISR(
+							xEventGroup,	// The event group being updated.
+							BIT_0 | BIT_4   // The bits being set.
+							&xHigherPriorityTaskWoken );
+
+		// Was the message posted successfully?
+		if( xResult == pdPASS )
+		{
+			// If xHigherPriorityTaskWoken is now set to pdTRUE then a context
+			// switch should be requested.  The macro used is port specific and
+			// will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() -
+			// refer to the documentation page for the port being used.
+			portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+		}
+  }
+   
+ * \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ) +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupSync(	EventGroupHandle_t xEventGroup,
+									const EventBits_t uxBitsToSet,
+									const EventBits_t uxBitsToWaitFor,
+									TickType_t xTicksToWait );
+ 
+ * + * Atomically set bits within an event group, then wait for a combination of + * bits to be set within the same event group. This functionality is typically + * used to synchronise multiple tasks, where each task has to wait for the other + * tasks to reach a synchronisation point before proceeding. + * + * This function cannot be used from an interrupt. + * + * The function will return before its block time expires if the bits specified + * by the uxBitsToWait parameter are set, or become set within that time. In + * this case all the bits specified by uxBitsToWait will be automatically + * cleared before the function returns. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToSet The bits to set in the event group before determining + * if, and possibly waiting for, all the bits specified by the uxBitsToWait + * parameter are set. + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and bit 1 and bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for all of the bits specified by uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupSync() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupSync() returned because all the bits it was waiting for were + * set then the returned value is the event group value before any bits were + * automatically cleared. + * + * Example usage: +
+ // Bits used by the three tasks.
+ #define TASK_0_BIT		( 1 << 0 )
+ #define TASK_1_BIT		( 1 << 1 )
+ #define TASK_2_BIT		( 1 << 2 )
+
+ #define ALL_SYNC_BITS ( TASK_0_BIT | TASK_1_BIT | TASK_2_BIT )
+
+ // Use an event group to synchronise three tasks.  It is assumed this event
+ // group has already been created elsewhere.
+ EventGroupHandle_t xEventBits;
+
+ void vTask0( void *pvParameters )
+ {
+ EventBits_t uxReturn;
+ TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
+
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 0 in the event flag to note this task has reached the
+		// sync point.  The other two tasks will set the other two bits defined
+		// by ALL_SYNC_BITS.  All three tasks have reached the synchronisation
+		// point when all the ALL_SYNC_BITS are set.  Wait a maximum of 100ms
+		// for this to happen.
+		uxReturn = xEventGroupSync( xEventBits, TASK_0_BIT, ALL_SYNC_BITS, xTicksToWait );
+
+		if( ( uxReturn & ALL_SYNC_BITS ) == ALL_SYNC_BITS )
+		{
+			// All three tasks reached the synchronisation point before the call
+			// to xEventGroupSync() timed out.
+		}
+	}
+ }
+
+ void vTask1( void *pvParameters )
+ {
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 1 in the event flag to note this task has reached the
+		// synchronisation point.  The other two tasks will set the other two
+		// bits defined by ALL_SYNC_BITS.  All three tasks have reached the
+		// synchronisation point when all the ALL_SYNC_BITS are set.  Wait
+		// indefinitely for this to happen.
+		xEventGroupSync( xEventBits, TASK_1_BIT, ALL_SYNC_BITS, portMAX_DELAY );
+
+		// xEventGroupSync() was called with an indefinite block time, so
+		// this task will only reach here if the syncrhonisation was made by all
+		// three tasks, so there is no need to test the return value.
+	 }
+ }
+
+ void vTask2( void *pvParameters )
+ {
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 2 in the event flag to note this task has reached the
+		// synchronisation point.  The other two tasks will set the other two
+		// bits defined by ALL_SYNC_BITS.  All three tasks have reached the
+		// synchronisation point when all the ALL_SYNC_BITS are set.  Wait
+		// indefinitely for this to happen.
+		xEventGroupSync( xEventBits, TASK_2_BIT, ALL_SYNC_BITS, portMAX_DELAY );
+
+		// xEventGroupSync() was called with an indefinite block time, so
+		// this task will only reach here if the syncrhonisation was made by all
+		// three tasks, so there is no need to test the return value.
+	}
+ }
+
+ 
+ * \defgroup xEventGroupSync xEventGroupSync + * \ingroup EventGroup + */ +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + +/** + * event_groups.h + *
+	EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
+ 
+ * + * Returns the current value of the bits in an event group. This function + * cannot be used from an interrupt. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBits() was called. + * + * \defgroup xEventGroupGetBits xEventGroupGetBits + * \ingroup EventGroup + */ +#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 ) + +/** + * event_groups.h + *
+	EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
+ 
+ * + * A version of xEventGroupGetBits() that can be called from an ISR. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBitsFromISR() was called. + * + * \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR + * \ingroup EventGroup + */ +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	void xEventGroupDelete( EventGroupHandle_t xEventGroup );
+ 
+ * + * Delete an event group that was previously created by a call to + * xEventGroupCreate(). Tasks that are blocked on the event group will be + * unblocked and obtain 0 as the event group's value. + * + * @param xEventGroup The event group being deleted. + */ +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/* For internal use only. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + +#if (configUSE_TRACE_FACILITY == 1) + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) PRIVILEGED_FUNCTION; + void vEventGroupSetNumber( void* xEventGroup, UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_GROUPS_H */ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/list.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/list.h new file mode 100644 index 000000000..2fb6775ff --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/list.h @@ -0,0 +1,412 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * This is the list implementation used by the scheduler. While it is tailored + * heavily for the schedulers needs, it is also available for use by + * application code. + * + * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a + * numeric value (xItemValue). Most of the time the lists are sorted in + * descending item value order. + * + * Lists are created already containing one list item. The value of this + * item is the maximum possible that can be stored, it is therefore always at + * the end of the list and acts as a marker. The list member pxHead always + * points to this marker - even though it is at the tail of the list. This + * is because the tail contains a wrap back pointer to the true head of + * the list. + * + * In addition to it's value, each list item contains a pointer to the next + * item in the list (pxNext), a pointer to the list it is in (pxContainer) + * and a pointer to back to the object that contains it. These later two + * pointers are included for efficiency of list manipulation. There is + * effectively a two way link between the object containing the list item and + * the list item itself. + * + * + * \page ListIntroduction List Implementation + * \ingroup FreeRTOSIntro + */ + +#ifndef INC_FREERTOS_H + #error FreeRTOS.h must be included before list.h +#endif + +#ifndef LIST_H +#define LIST_H + +/* + * The list structure members are modified from within interrupts, and therefore + * by rights should be declared volatile. However, they are only modified in a + * functionally atomic way (within critical sections of with the scheduler + * suspended) and are either passed by reference into a function or indexed via + * a volatile variable. Therefore, in all use cases tested so far, the volatile + * qualifier can be omitted in order to provide a moderate performance + * improvement without adversely affecting functional behaviour. The assembly + * instructions generated by the IAR, ARM and GCC compilers when the respective + * compiler's options were set for maximum optimisation has been inspected and + * deemed to be as intended. That said, as compiler technology advances, and + * especially if aggressive cross module optimisation is used (a use case that + * has not been exercised to any great extend) then it is feasible that the + * volatile qualifier will be needed for correct optimisation. It is expected + * that a compiler removing essential code because, without the volatile + * qualifier on the list structure members and with aggressive cross module + * optimisation, the compiler deemed the code unnecessary will result in + * complete and obvious failure of the scheduler. If this is ever experienced + * then the volatile qualifier can be inserted in the relevant places within the + * list structures by simply defining configLIST_VOLATILE to volatile in + * FreeRTOSConfig.h (as per the example at the bottom of this comment block). + * If configLIST_VOLATILE is not defined then the preprocessor directives below + * will simply #define configLIST_VOLATILE away completely. + * + * To use volatile list structure members then add the following line to + * FreeRTOSConfig.h (without the quotes): + * "#define configLIST_VOLATILE volatile" + */ +#ifndef configLIST_VOLATILE + #define configLIST_VOLATILE +#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macros that can be used to place known values within the list structures, +then check that the known values do not get corrupted during the execution of +the application. These may catch the list data structures being overwritten in +memory. They will not catch data errors caused by incorrect configuration or +use of FreeRTOS.*/ +#if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) + /* Define the macros to do nothing. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) + #define listTEST_LIST_INTEGRITY( pxList ) +#else + /* Define macros that add new members into the list structures. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1; + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2; + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1; + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2; + + /* Define macros that set the new structure members to known values. */ + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + + /* Define macros that will assert if one of the structure members does not + contain its expected value. */ + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) + #define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) +#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */ + + +/* + * Definition of the only type of object that a list can contain. + */ +struct xLIST; +struct xLIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +}; +typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ + +struct xMINI_LIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; + struct xLIST_ITEM * configLIST_VOLATILE pxNext; + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; +}; +typedef struct xMINI_LIST_ITEM MiniListItem_t; + +/* + * Definition of the type of queue used by the scheduler. + */ +typedef struct xLIST +{ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + volatile UBaseType_t uxNumberOfItems; + ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +} List_t; + +/* + * Access macro to set the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) ) + +/* + * Access macro to get the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner ) + +/* + * Access macro to set the value of the list item. In most cases the value is + * used to sort the list in descending order. + * + * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) ) + +/* + * Access macro to retrieve the value of the list item. The value can + * represent anything - for example the priority of a task, or the time at + * which a task should be unblocked. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue ) + +/* + * Access macro to retrieve the value of the list item at the head of a given + * list. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_NEXT listGET_NEXT + * \ingroup LinkedList + */ +#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext ) + +/* + * Return the list item that marks the end of the list + * + * \page listGET_END_MARKER listGET_END_MARKER + * \ingroup LinkedList + */ +#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) ) + +/* + * Access macro to determine if a list contains any items. The macro will + * only have the value true if the list is empty. + * + * \page listLIST_IS_EMPTY listLIST_IS_EMPTY + * \ingroup LinkedList + */ +#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE ) + +/* + * Access macro to return the number of items in the list. + */ +#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems ) + +/* + * Access function to obtain the owner of the next entry in a list. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list + * and returns that entry's pxOwner parameter. Using multiple calls to this + * function it is therefore possible to move through every item contained in + * a list. + * + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxTCB pxTCB is set to the address of the owner of the next list item. + * @param pxList The list from which the next item owner is to be returned. + * + * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ +{ \ +List_t * const pxConstList = ( pxList ); \ + /* Increment the index to the next item and return the item, ensuring */ \ + /* we don't return the marker used at the end of the list. */ \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ + { \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + } \ + ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ +} + + +/* + * Access function to obtain the owner of the first entry in a list. Lists + * are normally sorted in ascending item value order. + * + * This function returns the pxOwner member of the first item in the list. + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxList The list from which the owner of the head item is to be + * returned. + * + * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( (&( ( pxList )->xListEnd ))->pxNext->pvOwner ) + +/* + * Check to see if a list item is within a list. The list item maintains a + * "container" pointer that points to the list it is in. All this macro does + * is check to see if the container and the list match. + * + * @param pxList The list we want to know if the list item is within. + * @param pxListItem The list item we want to know if is in the list. + * @return pdTRUE if the list item is in the list, otherwise pdFALSE. + */ +#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) ) + +/* + * Return the list a list item is contained within (referenced from). + * + * @param pxListItem The list item being queried. + * @return A pointer to the List_t object that references the pxListItem + */ +#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pxContainer ) + +/* + * This provides a crude means of knowing if a list has been initialised, as + * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise() + * function. + */ +#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY ) + +/* + * Must be called before a list is used! This initialises all the members + * of the list structure and inserts the xListEnd item into the list as a + * marker to the back of the list. + * + * @param pxList Pointer to the list being initialised. + * + * \page vListInitialise vListInitialise + * \ingroup LinkedList + */ +void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION; + +/* + * Must be called before a list item is used. This sets the list container to + * null so the item does not think that it is already contained in a list. + * + * @param pxItem Pointer to the list item being initialised. + * + * \page vListInitialiseItem vListInitialiseItem + * \ingroup LinkedList + */ +void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted into the list in + * a position determined by its item value (descending item value order). + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The item that is to be placed in the list. + * + * \page vListInsert vListInsert + * \ingroup LinkedList + */ +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted in a position + * such that it will be the last item within the list returned by multiple + * calls to listGET_OWNER_OF_NEXT_ENTRY. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list. + * Placing an item in a list using vListInsertEnd effectively places the item + * in the list position pointed to by pxIndex. This means that every other + * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before + * the pxIndex parameter again points to the item being inserted. + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The list item to be inserted into the list. + * + * \page vListInsertEnd vListInsertEnd + * \ingroup LinkedList + */ +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Remove an item from a list. The list item has a pointer to the list that + * it is in, so only the list item need be passed into the function. + * + * @param uxListRemove The item to be removed. The item will remove itself from + * the list pointed to by it's pxContainer parameter. + * + * @return The number of items that remain in the list after the list item has + * been removed. + * + * \page uxListRemove uxListRemove + * \ingroup LinkedList + */ +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION; + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h new file mode 100644 index 000000000..cfa08cb93 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h @@ -0,0 +1,799 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +/* + * Message buffers build functionality on top of FreeRTOS stream buffers. + * Whereas stream buffers are used to send a continuous stream of data from one + * task or interrupt to another, message buffers are used to send variable + * length discrete messages from one task or interrupt to another. Their + * implementation is light weight, making them particularly suited for interrupt + * to task and core to core communication scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * timeout to 0. + * + * Message buffers hold variable length messages. To enable that, when a + * message is written to the message buffer an additional sizeof( size_t ) bytes + * are also written to store the message's length (that happens internally, with + * the API function). sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so writing a 10 byte message to a message buffer on a 32-bit + * architecture will actually reduce the available space in the message buffer + * by 14 bytes (10 byte are used by the message, and 4 bytes to hold the length + * of the message). + */ + +#ifndef FREERTOS_MESSAGE_BUFFER_H +#define FREERTOS_MESSAGE_BUFFER_H + +/* Message buffers are built onto of stream buffers. */ +#include "stream_buffer.h" + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which message buffers are referenced. For example, a call to + * xMessageBufferCreate() returns an MessageBufferHandle_t variable that can + * then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(), + * etc. + */ +typedef void * MessageBufferHandle_t; + +/*-----------------------------------------------------------*/ + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreate( size_t xBufferSizeBytes );
+
+ * + * Creates a new message buffer using dynamically allocated memory. See + * xMessageBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xMessageBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes (not messages) the message + * buffer will be able to hold at any one time. When a message is written to + * the message buffer an additional sizeof( size_t ) bytes are also written to + * store the message's length. sizeof( size_t ) is typically 4 bytes on a + * 32-bit architecture, so on most 32-bit architectures a 10 byte message will + * take up 14 bytes of message buffer space. + * + * @return If NULL is returned, then the message buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the message buffer data structures and storage area. A non-NULL value being + * returned indicates that the message buffer has been created successfully - + * the returned value should be stored as the handle to the created message + * buffer. + * + * Example use: +
+
+void vAFunction( void )
+{
+MessageBufferHandle_t xMessageBuffer;
+const size_t xMessageBufferSizeBytes = 100;
+
+    // Create a message buffer that can hold 100 bytes.  The memory used to hold
+    // both the message buffer structure and the messages themselves is allocated
+    // dynamically.  Each message added to the buffer consumes an additional 4
+    // bytes which are used to hold the lengh of the message.
+    xMessageBuffer = xMessageBufferCreate( xMessageBufferSizeBytes );
+
+    if( xMessageBuffer == NULL )
+    {
+        // There was not enough heap memory space available to create the
+        // message buffer.
+    }
+    else
+    {
+        // The message buffer was created successfully and can now be used.
+    }
+
+
+ * \defgroup xMessageBufferCreate xMessageBufferCreate + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreate( xBufferSizeBytes ) ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE ) + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreateStatic( size_t xBufferSizeBytes,
+                                                  uint8_t *pucMessageBufferStorageArea,
+                                                  StaticMessageBuffer_t *pxStaticMessageBuffer );
+
+ * Creates a new message buffer using statically allocated memory. See + * xMessageBufferCreate() for a version that uses dynamically allocated memory. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucMessageBufferStorageArea parameter. When a message is written to the + * message buffer an additional sizeof( size_t ) bytes are also written to store + * the message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so on most 32-bit architecture a 10 byte message will take up + * 14 bytes of message buffer space. The maximum number of bytes that can be + * stored in the message buffer is actually (xBufferSizeBytes - 1). + * + * @param pucMessageBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which messages are + * copied when they are written to the message buffer. + * + * @param pxStaticMessageBuffer Must point to a variable of type + * StaticMessageBuffer_t, which will be used to hold the message buffer's data + * structure. + * + * @return If the message buffer is created successfully then a handle to the + * created message buffer is returned. If either pucMessageBufferStorageArea or + * pxStaticmessageBuffer are NULL then NULL is returned. + * + * Example use: +
+
+// Used to dimension the array used to hold the messages.  The available space
+// will actually be one less than this, so 999.
+#define STORAGE_SIZE_BYTES 1000
+
+// Defines the memory that will actually hold the messages within the message
+// buffer.
+static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
+
+// The variable used to hold the message buffer structure.
+StaticMessageBuffer_t xMessageBufferStruct;
+
+void MyFunction( void )
+{
+MessageBufferHandle_t xMessageBuffer;
+
+    xMessageBuffer = xMessageBufferCreateStatic( sizeof( ucBufferStorage ),
+                                                 ucBufferStorage,
+                                                 &xMessageBufferStruct );
+
+    // As neither the pucMessageBufferStorageArea or pxStaticMessageBuffer
+    // parameters were NULL, xMessageBuffer will not be NULL, and can be used to
+    // reference the created message buffer in other message buffer API calls.
+
+    // Other code that uses the message buffer can go here.
+}
+
+
+ * \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer,
+                           const void *pvTxData,
+                           size_t xDataLengthBytes,
+                           TickType_t xTicksToWait );
+
+ *
+ * Sends a discrete message to the message buffer.  The message can be any
+ * length that fits within the buffer's free space, and is copied into the
+ * buffer.
+ *
+ * ***NOTE***:  Uniquely among FreeRTOS objects, the stream buffer
+ * implementation (so also the message buffer implementation, as message buffers
+ * are built on top of stream buffers) assumes there is only one task or
+ * interrupt that will write to the buffer (the writer), and only one task or
+ * interrupt that will read from the buffer (the reader).  It is safe for the
+ * writer and reader to be different tasks or interrupts, but, unlike other
+ * FreeRTOS objects, it is not safe to have multiple different writers or
+ * multiple different readers.  If there are to be multiple different writers
+ * then the application writer must place each call to a writing API function
+ * (such as xMessageBufferSend()) inside a critical section and set the send
+ * block time to 0.  Likewise, if there are to be multiple different readers
+ * then the application writer must place each call to a reading API function
+ * (such as xMessageBufferRead()) inside a critical section and set the receive
+ * block time to 0.
+ *
+ * Use xMessageBufferSend() to write to a message buffer from a task.  Use
+ * xMessageBufferSendFromISR() to write to a message buffer from an interrupt
+ * service routine (ISR).
+ *
+ * @param xMessageBuffer The handle of the message buffer to which a message is
+ * being sent.
+ *
+ * @param pvTxData A pointer to the message that is to be copied into the
+ * message buffer.
+ *
+ * @param xDataLengthBytes The length of the message.  That is, the number of
+ * bytes to copy from pvTxData into the message buffer.  When a message is
+ * written to the message buffer an additional sizeof( size_t ) bytes are also
+ * written to store the message's length.  sizeof( size_t ) is typically 4 bytes
+ * on a 32-bit architecture, so on most 32-bit architecture setting
+ * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
+ * bytes (20 bytes of message data and 4 bytes to hold the message length).
+ *
+ * @param xTicksToWait The maximum amount of time the calling task should remain
+ * in the Blocked state to wait for enough space to become available in the
+ * message buffer, should the message buffer have insufficient space when
+ * xMessageBufferSend() is called.  The calling task will never block if
+ * xTicksToWait is zero.  The block time is specified in tick periods, so the
+ * absolute time it represents is dependent on the tick frequency.  The macro
+ * pdMS_TO_TICKS() can be used to convert a time specified in milliseconds into
+ * a time specified in ticks.  Setting xTicksToWait to portMAX_DELAY will cause
+ * the task to wait indefinitely (without timing out), provided
+ * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h.  Tasks do not use any
+ * CPU time when they are in the Blocked state.
+ *
+ * @return The number of bytes written to the message buffer.  If the call to
+ * xMessageBufferSend() times out before there was enough space to write the
+ * message into the message buffer then zero is returned.  If the call did not
+ * time out then xDataLengthBytes is returned.
+ *
+ * Example use:
+
+void vAFunction( MessageBufferHandle_t xMessageBuffer )
+{
+size_t xBytesSent;
+uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
+char *pcStringToSend = "String to send";
+const TickType_t x100ms = pdMS_TO_TICKS( 100 );
+
+    // Send an array to the message buffer, blocking for a maximum of 100ms to
+    // wait for enough space to be available in the message buffer.
+    xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
+
+    if( xBytesSent != sizeof( ucArrayToSend ) )
+    {
+        // The call to xMessageBufferSend() times out before there was enough
+        // space in the buffer for the data to be written.
+    }
+
+    // Send the string to the message buffer.  Return immediately if there is
+    // not enough space in the buffer.
+    xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The string could not be added to the message buffer because there was
+        // not enough free space in the buffer.
+    }
+}
+
+ * \defgroup xMessageBufferSend xMessageBufferSend + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSendFromISR( MessageBufferHandle_t xMessageBuffer,
+                                  const void *pvTxData,
+                                  size_t xDataLengthBytes,
+                                  BaseType_t *pxHigherPriorityTaskWoken );
+
+ *
+ * Interrupt safe version of the API function that sends a discrete message to
+ * the message buffer.  The message can be any length that fits within the
+ * buffer's free space, and is copied into the buffer.
+ *
+ * ***NOTE***:  Uniquely among FreeRTOS objects, the stream buffer
+ * implementation (so also the message buffer implementation, as message buffers
+ * are built on top of stream buffers) assumes there is only one task or
+ * interrupt that will write to the buffer (the writer), and only one task or
+ * interrupt that will read from the buffer (the reader).  It is safe for the
+ * writer and reader to be different tasks or interrupts, but, unlike other
+ * FreeRTOS objects, it is not safe to have multiple different writers or
+ * multiple different readers.  If there are to be multiple different writers
+ * then the application writer must place each call to a writing API function
+ * (such as xMessageBufferSend()) inside a critical section and set the send
+ * block time to 0.  Likewise, if there are to be multiple different readers
+ * then the application writer must place each call to a reading API function
+ * (such as xMessageBufferRead()) inside a critical section and set the receive
+ * block time to 0.
+ *
+ * Use xMessageBufferSend() to write to a message buffer from a task.  Use
+ * xMessageBufferSendFromISR() to write to a message buffer from an interrupt
+ * service routine (ISR).
+ *
+ * @param xMessageBuffer The handle of the message buffer to which a message is
+ * being sent.
+ *
+ * @param pvTxData A pointer to the message that is to be copied into the
+ * message buffer.
+ *
+ * @param xDataLengthBytes The length of the message.  That is, the number of
+ * bytes to copy from pvTxData into the message buffer.  When a message is
+ * written to the message buffer an additional sizeof( size_t ) bytes are also
+ * written to store the message's length.  sizeof( size_t ) is typically 4 bytes
+ * on a 32-bit architecture, so on most 32-bit architecture setting
+ * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
+ * bytes (20 bytes of message data and 4 bytes to hold the message length).
+ *
+ * @param pxHigherPriorityTaskWoken  It is possible that a message buffer will
+ * have a task blocked on it waiting for data.  Calling
+ * xMessageBufferSendFromISR() can make data available, and so cause a task that
+ * was waiting for data to leave the Blocked state.  If calling
+ * xMessageBufferSendFromISR() causes a task to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently executing task (the
+ * task that was interrupted), then, internally, xMessageBufferSendFromISR()
+ * will set *pxHigherPriorityTaskWoken to pdTRUE.  If
+ * xMessageBufferSendFromISR() sets this value to pdTRUE, then normally a
+ * context switch should be performed before the interrupt is exited.  This will
+ * ensure that the interrupt returns directly to the highest priority Ready
+ * state task.  *pxHigherPriorityTaskWoken should be set to pdFALSE before it
+ * is passed into the function.  See the code example below for an example.
+ *
+ * @return The number of bytes actually written to the message buffer.  If the
+ * message buffer didn't have enough free space for the message to be stored
+ * then 0 is returned, otherwise xDataLengthBytes is returned.
+ *
+ * Example use:
+
+// A message buffer that has already been created.
+MessageBufferHandle_t xMessageBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+size_t xBytesSent;
+char *pcStringToSend = "String to send";
+BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
+
+    // Attempt to send the string to the message buffer.
+    xBytesSent = xMessageBufferSendFromISR( xMessageBuffer,
+                                            ( void * ) pcStringToSend,
+                                            strlen( pcStringToSend ),
+                                            &xHigherPriorityTaskWoken );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The string could not be added to the message buffer because there was
+        // not enough free space in the buffer.
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xMessageBufferSendFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferReceive( MessageBufferHandle_t xMessageBuffer,
+                              void *pvRxData,
+                              size_t xBufferLengthBytes,
+                              TickType_t xTicksToWait );
+
+ * + * Receives a discrete message from a message buffer. Messages can be of + * variable length and are copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for a message, should the message buffer be empty. + * xMessageBufferReceive() will return immediately if xTicksToWait is zero and + * the message buffer is empty. The block time is specified in tick periods, so + * the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. If xMessageBufferReceive() times out before a message became available + * then zero is returned. If the length of the message is greater than + * xBufferLengthBytes then the message will be left in the message buffer and + * zero is returned. + * + * Example use: +
+void vAFunction( MessageBuffer_t xMessageBuffer )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
+
+    // Receive the next message from the message buffer.  Wait in the Blocked
+    // state (so not using any CPU processing time) for a maximum of 100ms for
+    // a message to become available.
+    xReceivedBytes = xMessageBufferReceive( xMessageBuffer,
+                                            ( void * ) ucRxData,
+                                            sizeof( ucRxData ),
+                                            xBlockTime );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains a message that is xReceivedBytes long.  Process
+        // the message here....
+    }
+}
+
+ * \defgroup xMessageBufferReceive xMessageBufferReceive + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + + +/** + * message_buffer.h + * +
+size_t xMessageBufferReceiveFromISR( MessageBufferHandle_t xMessageBuffer,
+                                     void *pvRxData,
+                                     size_t xBufferLengthBytes,
+                                     BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * An interrupt safe version of the API function that receives a discrete + * message from a message buffer. Messages can be of variable length and are + * copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for space to become available. Calling + * xMessageBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xMessageBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xMessageBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xMessageBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. + * + * Example use: +
+// A message buffer that has already been created.
+MessageBuffer_t xMessageBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;  // Initialised to pdFALSE.
+
+    // Receive the next message from the message buffer.
+    xReceivedBytes = xMessageBufferReceiveFromISR( xMessageBuffer,
+                                                  ( void * ) ucRxData,
+                                                  sizeof( ucRxData ),
+                                                  &xHigherPriorityTaskWoken );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains a message that is xReceivedBytes long.  Process
+        // the message here....
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xMessageBufferReceiveFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+void vMessageBufferDelete( MessageBufferHandle_t xMessageBuffer );
+
+ * + * Deletes a message buffer that was previously created using a call to + * xMessageBufferCreate() or xMessageBufferCreateStatic(). If the message + * buffer was created using dynamic memory (that is, by xMessageBufferCreate()), + * then the allocated memory is freed. + * + * A message buffer handle must not be used after the message buffer has been + * deleted. + * + * @param xMessageBuffer The handle of the message buffer to be deleted. + * + */ +#define vMessageBufferDelete( xMessageBuffer ) vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) );
+
+ * + * Tests to see if a message buffer is full. A message buffer is full if it + * cannot accept any more messages, of any size, until space is made available + * by a message being removed from the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is full then + * pdTRUE is returned. Otherwise pdFALSE is returned. + */ +#define xMessageBufferIsFull( xMessageBuffer ) xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) );
+
+ * + * Tests to see if a message buffer is empty (does not contain any messages). + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is empty then + * pdTRUE is returned. Otherwise pdFALSE is returned. + * + */ +#define xMessageBufferIsEmpty( xMessageBuffer ) xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer );
+
+ * + * Resets a message buffer to its initial empty state, discarding any message it + * contained. + * + * A message buffer can only be reset if there are no tasks blocked on it. + * + * @param xMessageBuffer The handle of the message buffer being reset. + * + * @return If the message buffer was reset then pdPASS is returned. If the + * message buffer could not be reset because either there was a task blocked on + * the message queue to wait for space to become available, or to wait for a + * a message to be available, then pdFAIL is returned. + * + * \defgroup xMessageBufferReset xMessageBufferReset + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReset( xMessageBuffer ) xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer ) + + +/** + * message_buffer.h +
+size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) );
+
+ * Returns the number of bytes of free space in the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The number of bytes that can be written to the message buffer before + * the message buffer would be full. When a message is written to the message + * buffer an additional sizeof( size_t ) bytes are also written to store the + * message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size + * of the largest message that can be written to the message buffer is 6 bytes. + * + * \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSpaceAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) +#define xMessageBufferSpacesAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */ + +/** + * message_buffer.h +
+ size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) );
+ 
+ * Returns the length (in bytes) of the next message in a message buffer. + * Useful if xMessageBufferReceive() returned 0 because the size of the buffer + * passed into xMessageBufferReceive() was too small to hold the next message. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The length (in bytes) of the next message in the message buffer, or 0 + * if the message buffer is empty. + * + * \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes + * \ingroup MessageBufferManagement + */ +#define xMessageBufferNextLengthBytes( xMessageBuffer ) xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION; + +/** + * message_buffer.h + * +
+BaseType_t xMessageBufferSendCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xMessageBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferSendCompletedFromISR(). If calling + * xMessageBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+BaseType_t xMessageBufferReceiveCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xMessageBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferReceiveCompletedFromISR(). If calling + * xMessageBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +#if defined( __cplusplus ) +} /* extern "C" */ +#endif + +#endif /* !defined( FREERTOS_MESSAGE_BUFFER_H ) */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h new file mode 100644 index 000000000..5d7490719 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h @@ -0,0 +1,157 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * When the MPU is used the standard (non MPU) API functions are mapped to + * equivalents that start "MPU_", the prototypes for which are defined in this + * header files. This will cause the application code to call the MPU_ version + * which wraps the non-MPU version with privilege promoting then demoting code, + * so the kernel code always runs will full privileges. + */ + + +#ifndef MPU_PROTOTYPES_H +#define MPU_PROTOTYPES_H + +/* MPU versions of tasks.h API functions. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL; +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of queue.h API functions. */ +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of timers.h API functions. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL; +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of event_group.h API functions. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL; +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of message/stream_buffer.h API functions. */ +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ) FREERTOS_SYSTEM_CALL; + + + +#endif /* MPU_PROTOTYPES_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h new file mode 100644 index 000000000..711393f6a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h @@ -0,0 +1,186 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef MPU_WRAPPERS_H +#define MPU_WRAPPERS_H + +/* This file redefines API functions to be called through a wrapper macro, but +only for ports that are using the MPU. */ +#ifdef portUSING_MPU_WRAPPERS + + /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is + included from queue.c or task.c to prevent it from having an effect within + those files. */ + #ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + + /* + * Map standard (non MPU) API functions to equivalents that start + * "MPU_". This will cause the application code to call the MPU_ + * version, which wraps the non-MPU version with privilege promoting + * then demoting code, so the kernel code always runs will full + * privileges. + */ + + /* Map standard tasks.h API functions to the MPU equivalents. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define vTaskDelete MPU_vTaskDelete + #define vTaskDelay MPU_vTaskDelay + #define vTaskDelayUntil MPU_vTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define vTaskSuspendAll MPU_vTaskSuspendAll + #define xTaskResumeAll MPU_xTaskResumeAll + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define xTaskGetHandle MPU_xTaskGetHandle + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define vTaskList MPU_vTaskList + #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats + #define xTaskGetIdleRunTimeCounter MPU_xTaskGetIdleRunTimeCounter + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskNotifyWait MPU_xTaskNotifyWait + #define ulTaskNotifyTake MPU_ulTaskNotifyTake + #define xTaskNotifyStateClear MPU_xTaskNotifyStateClear + + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + /* Map standard queue.h API functions to the MPU equivalents. */ + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define vQueueDelete MPU_vQueueDelete + #define xQueueCreateMutex MPU_xQueueCreateMutex + #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic + #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore + #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueGenericCreate MPU_xQueueGenericCreate + #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueCreateSet MPU_xQueueCreateSet + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + #define xQueueGenericReset MPU_xQueueGenericReset + + #if( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif + + /* Map standard timer.h API functions to the MPU equivalents. */ + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerPendFunctionCall MPU_xTimerPendFunctionCall + #define pcTimerGetName MPU_pcTimerGetName + #define vTimerSetReloadMode MPU_vTimerSetReloadMode + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + #define xTimerGenericCommand MPU_xTimerGenericCommand + + /* Map standard event_group.h API functions to the MPU equivalents. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + #define vEventGroupDelete MPU_vEventGroupDelete + + /* Map standard message/stream_buffer.h API functions to the MPU + equivalents. */ + #define xStreamBufferSend MPU_xStreamBufferSend + #define xStreamBufferReceive MPU_xStreamBufferReceive + #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferIsFull MPU_xStreamBufferIsFull + #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty + #define xStreamBufferReset MPU_xStreamBufferReset + #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable + #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable + #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + + + /* Remove the privileged function macro, but keep the PRIVILEGED_DATA + macro so applications can place data in privileged access sections + (useful when using statically allocated objects). */ + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL + + #else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + + /* Ensure API functions go in the privileged execution section. */ + #define PRIVILEGED_FUNCTION __attribute__((section("privileged_functions"))) + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL __attribute__((section( "freertos_system_calls"))) + + #endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + +#else /* portUSING_MPU_WRAPPERS */ + + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA + #define FREERTOS_SYSTEM_CALL + #define portUSING_MPU_WRAPPERS 0 + +#endif /* portUSING_MPU_WRAPPERS */ + + +#endif /* MPU_WRAPPERS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h new file mode 100644 index 000000000..59e816947 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h @@ -0,0 +1,181 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Portable layer API. Each function must be defined for each port. + *----------------------------------------------------------*/ + +#ifndef PORTABLE_H +#define PORTABLE_H + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. +Purely for reasons of backward compatibility the old method is still valid, but +to make it clear that new projects should not use it, support for the port +specific constants has been moved into the deprecated_definitions.h header +file. */ +#include "deprecated_definitions.h" + +/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h +did not result in a portmacro.h header file being included - and it should be +included here. In this case the path to the correct portmacro.h header file +must be set in the compiler's include path. */ +#ifndef portENTER_CRITICAL + #include "portmacro.h" +#endif + +#if portBYTE_ALIGNMENT == 32 + #define portBYTE_ALIGNMENT_MASK ( 0x001f ) +#endif + +#if portBYTE_ALIGNMENT == 16 + #define portBYTE_ALIGNMENT_MASK ( 0x000f ) +#endif + +#if portBYTE_ALIGNMENT == 8 + #define portBYTE_ALIGNMENT_MASK ( 0x0007 ) +#endif + +#if portBYTE_ALIGNMENT == 4 + #define portBYTE_ALIGNMENT_MASK ( 0x0003 ) +#endif + +#if portBYTE_ALIGNMENT == 2 + #define portBYTE_ALIGNMENT_MASK ( 0x0001 ) +#endif + +#if portBYTE_ALIGNMENT == 1 + #define portBYTE_ALIGNMENT_MASK ( 0x0000 ) +#endif + +#ifndef portBYTE_ALIGNMENT_MASK + #error "Invalid portBYTE_ALIGNMENT definition" +#endif + +#ifndef portNUM_CONFIGURABLE_REGIONS + #define portNUM_CONFIGURABLE_REGIONS 1 +#endif + +#ifndef portHAS_STACK_OVERFLOW_CHECKING + #define portHAS_STACK_OVERFLOW_CHECKING 0 +#endif + +#ifndef portARCH_NAME + #define portARCH_NAME NULL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mpu_wrappers.h" + +/* + * Setup the stack of a new task so it is ready to be placed under the + * scheduler control. The registers have to be placed on the stack in + * the order that the port expects to find them. + * + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #endif +#else + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #endif +#endif + +/* Used by heap_5.c. */ +typedef struct HeapRegion +{ + uint8_t *pucStartAddress; + size_t xSizeInBytes; +} HeapRegion_t; + +/* + * Used to define multiple heap regions for use by heap_5.c. This function + * must be called before any calls to pvPortMalloc() - not creating a task, + * queue, semaphore, mutex, software timer, event group, etc. will result in + * pvPortMalloc being called. + * + * pxHeapRegions passes in an array of HeapRegion_t structures - each of which + * defines a region of memory that can be used as the heap. The array is + * terminated by a HeapRegions_t structure that has a size of 0. The region + * with the lowest start address must appear first in the array. + */ +void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION; + + +/* + * Map to the memory management routines required for the port. + */ +void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; +void vPortFree( void *pv ) PRIVILEGED_FUNCTION; +void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION; + +/* + * Setup the hardware ready for the scheduler to take control. This generally + * sets up a tick interrupt and sets timers for the correct tick frequency. + */ +BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so + * the hardware is left in its original condition after the scheduler stops + * executing. + */ +void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * The structures and methods of manipulating the MPU are contained within the + * port layer. + * + * Fills the xMPUSettings structure with the memory region information + * contained in xRegions. + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + struct xMEMORY_REGION; + void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* PORTABLE_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h new file mode 100644 index 000000000..e0458619e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h @@ -0,0 +1,124 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef PROJDEFS_H +#define PROJDEFS_H + +/* + * Defines the prototype to which task functions must conform. Defined in this + * file to ensure the type is known before portable.h is included. + */ +typedef void (*TaskFunction_t)( void * ); + +/* Converts a time in milliseconds to a time in ticks. This macro can be +overridden by a macro of the same name defined in FreeRTOSConfig.h in case the +definition here is not suitable for your application. */ +#ifndef pdMS_TO_TICKS + #define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000 ) ) +#endif + +#define pdFALSE ( ( BaseType_t ) 0 ) +#define pdTRUE ( ( BaseType_t ) 1 ) + +#define pdPASS ( pdTRUE ) +#define pdFAIL ( pdFALSE ) +#define errQUEUE_EMPTY ( ( BaseType_t ) 0 ) +#define errQUEUE_FULL ( ( BaseType_t ) 0 ) + +/* FreeRTOS error definitions. */ +#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ( -1 ) +#define errQUEUE_BLOCKED ( -4 ) +#define errQUEUE_YIELD ( -5 ) + +/* Macros used for basic data corruption checks. */ +#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES + #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 +#endif + +#if( configUSE_16_BIT_TICKS == 1 ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a +#else + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#endif + +/* The following errno values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_ERRNO_NONE 0 /* No errors */ +#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */ +#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */ +#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */ +#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */ +#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */ +#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */ +#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */ +#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */ +#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */ +#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */ +#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */ +#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */ +#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */ +#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */ +#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */ +#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */ +#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */ +#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */ +#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */ +#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */ +#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */ +#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */ +#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */ +#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */ +#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ +#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ +#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ +#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ +#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ +#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */ +#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */ +#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */ +#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */ +#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */ +#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */ +#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */ +#define pdFREERTOS_ERRNO_EILSEQ 138 /* An invalid UTF-16 sequence was encountered. */ +#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */ + +/* The following endian values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_LITTLE_ENDIAN 0 +#define pdFREERTOS_BIG_ENDIAN 1 + +/* Re-defining endian values for generic naming. */ +#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN +#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN + + +#endif /* PROJDEFS_H */ + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h new file mode 100644 index 000000000..3b9da937a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h @@ -0,0 +1,1655 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef QUEUE_H +#define QUEUE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include queue.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "task.h" + +/** + * Type by which queues are referenced. For example, a call to xQueueCreate() + * returns an QueueHandle_t variable that can then be used as a parameter to + * xQueueSend(), xQueueReceive(), etc. + */ +struct QueueDefinition; /* Using old naming convention so as not to break kernel aware debuggers. */ +typedef struct QueueDefinition * QueueHandle_t; + +/** + * Type by which queue sets are referenced. For example, a call to + * xQueueCreateSet() returns an xQueueSet variable that can then be used as a + * parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc. + */ +typedef struct QueueDefinition * QueueSetHandle_t; + +/** + * Queue sets can contain both queues and semaphores, so the + * QueueSetMemberHandle_t is defined as a type to be used where a parameter or + * return value can be either an QueueHandle_t or an SemaphoreHandle_t. + */ +typedef struct QueueDefinition * QueueSetMemberHandle_t; + +/* For internal use only. */ +#define queueSEND_TO_BACK ( ( BaseType_t ) 0 ) +#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 ) +#define queueOVERWRITE ( ( BaseType_t ) 2 ) + +/* For internal use only. These definitions *must* match those in queue.c. */ +#define queueQUEUE_TYPE_BASE ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_SET ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_MUTEX ( ( uint8_t ) 1U ) +#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ( ( uint8_t ) 2U ) +#define queueQUEUE_TYPE_BINARY_SEMAPHORE ( ( uint8_t ) 3U ) +#define queueQUEUE_TYPE_RECURSIVE_MUTEX ( ( uint8_t ) 4U ) + +/** + * queue. h + *
+ QueueHandle_t xQueueCreate(
+							  UBaseType_t uxQueueLength,
+							  UBaseType_t uxItemSize
+						  );
+ * 
+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @return If the queue is successfully create then a handle to the newly + * created queue is returned. If the queue cannot be created then 0 is + * returned. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ };
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+	if( xQueue1 == 0 )
+	{
+		// Queue was not created and must not be used.
+	}
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue2 == 0 )
+	{
+		// Queue was not created and must not be used.
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueCreate xQueueCreate + * \ingroup QueueManagement + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) ) +#endif + +/** + * queue. h + *
+ QueueHandle_t xQueueCreateStatic(
+							  UBaseType_t uxQueueLength,
+							  UBaseType_t uxItemSize,
+							  uint8_t *pucQueueStorageBuffer,
+							  StaticQueue_t *pxQueueBuffer
+						  );
+ * 
+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @param pucQueueStorageBuffer If uxItemSize is not zero then + * pucQueueStorageBuffer must point to a uint8_t array that is at least large + * enough to hold the maximum number of items that can be in the queue at any + * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is + * zero then pucQueueStorageBuffer can be NULL. + * + * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which + * will be used to hold the queue's data structure. + * + * @return If the queue is created then a handle to the created queue is + * returned. If pxQueueBuffer is NULL then NULL is returned. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ };
+
+ #define QUEUE_LENGTH 10
+ #define ITEM_SIZE sizeof( uint32_t )
+
+ // xQueueBuffer will hold the queue structure.
+ StaticQueue_t xQueueBuffer;
+
+ // ucQueueStorage will hold the items posted to the queue.  Must be at least
+ // [(queue length) * ( queue item size)] bytes long.
+ uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ];
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold.
+							ITEM_SIZE	  // The size of each item in the queue
+							&( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue.
+							&xQueueBuffer ); // The buffer that will hold the queue structure.
+
+	// The queue is guaranteed to be created successfully as no dynamic memory
+	// allocation is used.  Therefore xQueue1 is now a handle to a valid queue.
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueCreateStatic xQueueCreateStatic + * \ingroup QueueManagement + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * queue. h + *
+ BaseType_t xQueueSendToToFront(
+								   QueueHandle_t	xQueue,
+								   const void		*pvItemToQueue,
+								   TickType_t		xTicksToWait
+							   );
+ * 
+ * + * Post an item to the front of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT ) + +/** + * queue. h + *
+ BaseType_t xQueueSendToBack(
+								   QueueHandle_t	xQueue,
+								   const void		*pvItemToQueue,
+								   TickType_t		xTicksToWait
+							   );
+ * 
+ * + * This is a macro that calls xQueueGenericSend(). + * + * Post an item to the back of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the queue + * is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueSend(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue,
+							  TickType_t xTicksToWait
+						 );
+ * 
+ * + * This is a macro that calls xQueueGenericSend(). It is included for + * backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToFront() and xQueueSendToBack() macros. It is + * equivalent to xQueueSendToBack(). + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueOverwrite(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue
+						 );
+ * 
+ * + * Only for use with queues that have a length of one - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * This function must not be called from an interrupt service routine. + * See xQueueOverwriteFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle of the queue to which the data is being sent. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and + * therefore has the same return values as xQueueSendToFront(). However, pdPASS + * is the only value that can be returned because xQueueOverwrite() will write + * to the queue even when the queue is already full. + * + * Example usage: +
+
+ void vFunction( void *pvParameters )
+ {
+ QueueHandle_t xQueue;
+ uint32_t ulVarToSend, ulValReceived;
+
+	// Create a queue to hold one uint32_t value.  It is strongly
+	// recommended *not* to use xQueueOverwrite() on queues that can
+	// contain more than one value, and doing so will trigger an assertion
+	// if configASSERT() is defined.
+	xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+
+	// Write the value 10 to the queue using xQueueOverwrite().
+	ulVarToSend = 10;
+	xQueueOverwrite( xQueue, &ulVarToSend );
+
+	// Peeking the queue should now return 10, but leave the value 10 in
+	// the queue.  A block time of zero is used as it is known that the
+	// queue holds a value.
+	ulValReceived = 0;
+	xQueuePeek( xQueue, &ulValReceived, 0 );
+
+	if( ulValReceived != 10 )
+	{
+		// Error unless the item was removed by a different task.
+	}
+
+	// The queue is still full.  Use xQueueOverwrite() to overwrite the
+	// value held in the queue with 100.
+	ulVarToSend = 100;
+	xQueueOverwrite( xQueue, &ulVarToSend );
+
+	// This time read from the queue, leaving the queue empty once more.
+	// A block time of 0 is used again.
+	xQueueReceive( xQueue, &ulValReceived, 0 );
+
+	// The value read should be the last value written, even though the
+	// queue was already full when the value was written.
+	if( ulValReceived != 100 )
+	{
+		// Error!
+	}
+
+	// ...
+}
+ 
+ * \defgroup xQueueOverwrite xQueueOverwrite + * \ingroup QueueManagement + */ +#define xQueueOverwrite( xQueue, pvItemToQueue ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), 0, queueOVERWRITE ) + + +/** + * queue. h + *
+ BaseType_t xQueueGenericSend(
+									QueueHandle_t xQueue,
+									const void * pvItemToQueue,
+									TickType_t xTicksToWait
+									BaseType_t xCopyPosition
+								);
+ * 
+ * + * It is preferred that the macros xQueueSend(), xQueueSendToFront() and + * xQueueSendToBack() are used in place of calling this function directly. + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueuePeek(
+							 QueueHandle_t xQueue,
+							 void * const pvBuffer,
+							 TickType_t xTicksToWait
+						 );
+ * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * This macro must not be used in an interrupt service routine. See + * xQueuePeekFromISR() for an alternative that can be called from an interrupt + * service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * xQueuePeek() will return immediately if xTicksToWait is 0 and the queue + * is empty. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Send a pointer to a struct AMessage object.  Don't block if the
+	// queue is already full.
+	pxMessage = & xMessage;
+	xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+	// ... Rest of task code.
+ }
+
+ // Task to peek the data from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+	if( xQueue != 0 )
+	{
+		// Peek a message on the created queue.  Block for 10 ticks if a
+		// message is not immediately available.
+		if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+		{
+			// pcRxedMessage now points to the struct AMessage variable posted
+			// by vATask, but the item still remains on the queue.
+		}
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueuePeek xQueuePeek + * \ingroup QueueManagement + */ +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueuePeekFromISR(
+									QueueHandle_t xQueue,
+									void *pvBuffer,
+								);
+ * + * A version of xQueuePeek() that can be called from an interrupt service + * routine (ISR). + * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * \defgroup xQueuePeekFromISR xQueuePeekFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueReceive(
+								 QueueHandle_t xQueue,
+								 void *pvBuffer,
+								 TickType_t xTicksToWait
+							);
+ * + * Receive an item from a queue. The item is received by copy so a buffer of + * adequate size must be provided. The number of bytes copied into the buffer + * was defined when the queue was created. + * + * Successfully received items are removed from the queue. + * + * This function must not be used in an interrupt service routine. See + * xQueueReceiveFromISR for an alternative that can. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. xQueueReceive() will return immediately if xTicksToWait + * is zero and the queue is empty. The time is defined in tick periods so the + * constant portTICK_PERIOD_MS should be used to convert to real time if this is + * required. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Send a pointer to a struct AMessage object.  Don't block if the
+	// queue is already full.
+	pxMessage = & xMessage;
+	xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+	// ... Rest of task code.
+ }
+
+ // Task to receive from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+	if( xQueue != 0 )
+	{
+		// Receive a message on the created queue.  Block for 10 ticks if a
+		// message is not immediately available.
+		if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+		{
+			// pcRxedMessage now points to the struct AMessage variable posted
+			// by vATask.
+		}
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueReceive xQueueReceive + * \ingroup QueueManagement + */ +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
+ * + * Return the number of messages stored in a queue. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of messages available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
+ * + * Return the number of free spaces available in a queue. This is equal to the + * number of items that can be sent to the queue before the queue becomes full + * if no items are removed. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of spaces available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
void vQueueDelete( QueueHandle_t xQueue );
+ * + * Delete a queue - freeing all the memory allocated for storing of items + * placed on the queue. + * + * @param xQueue A handle to the queue to be deleted. + * + * \defgroup vQueueDelete vQueueDelete + * \ingroup QueueManagement + */ +void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueSendToFrontFromISR(
+										 QueueHandle_t xQueue,
+										 const void *pvItemToQueue,
+										 BaseType_t *pxHigherPriorityTaskWoken
+									  );
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the front of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPrioritTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT ) + + +/** + * queue. h + *
+ BaseType_t xQueueSendToBackFromISR(
+										 QueueHandle_t xQueue,
+										 const void *pvItemToQueue,
+										 BaseType_t *pxHigherPriorityTaskWoken
+									  );
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the back of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueOverwriteFromISR(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue,
+							  BaseType_t *pxHigherPriorityTaskWoken
+						 );
+ * 
+ * + * A version of xQueueOverwrite() that can be used in an interrupt service + * routine (ISR). + * + * Only for use with queues that can hold a single item - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return xQueueOverwriteFromISR() is a macro that calls + * xQueueGenericSendFromISR(), and therefore has the same return values as + * xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be + * returned because xQueueOverwriteFromISR() will write to the queue even when + * the queue is already full. + * + * Example usage: +
+
+ QueueHandle_t xQueue;
+
+ void vFunction( void *pvParameters )
+ {
+ 	// Create a queue to hold one uint32_t value.  It is strongly
+	// recommended *not* to use xQueueOverwriteFromISR() on queues that can
+	// contain more than one value, and doing so will trigger an assertion
+	// if configASSERT() is defined.
+	xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+}
+
+void vAnInterruptHandler( void )
+{
+// xHigherPriorityTaskWoken must be set to pdFALSE before it is used.
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+uint32_t ulVarToSend, ulValReceived;
+
+	// Write the value 10 to the queue using xQueueOverwriteFromISR().
+	ulVarToSend = 10;
+	xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+	// The queue is full, but calling xQueueOverwriteFromISR() again will still
+	// pass because the value held in the queue will be overwritten with the
+	// new value.
+	ulVarToSend = 100;
+	xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+	// Reading from the queue will now return 100.
+
+	// ...
+
+	if( xHigherPrioritytaskWoken == pdTRUE )
+	{
+		// Writing to the queue caused a task to unblock and the unblocked task
+		// has a priority higher than or equal to the priority of the currently
+		// executing task (the task this interrupt interrupted).  Perform a context
+		// switch so this interrupt returns directly to the unblocked task.
+		portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the port.
+	}
+}
+ 
+ * \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR + * \ingroup QueueManagement + */ +#define xQueueOverwriteFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE ) + +/** + * queue. h + *
+ BaseType_t xQueueSendFromISR(
+									 QueueHandle_t xQueue,
+									 const void *pvItemToQueue,
+									 BaseType_t *pxHigherPriorityTaskWoken
+								);
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). It is included + * for backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR() + * macros. + * + * Post an item to the back of a queue. It is safe to use this function from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		// Actual macro used here is port specific.
+		portYIELD_FROM_ISR ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueGenericSendFromISR(
+										   QueueHandle_t		xQueue,
+										   const	void	*pvItemToQueue,
+										   BaseType_t	*pxHigherPriorityTaskWoken,
+										   BaseType_t	xCopyPosition
+									   );
+ 
+ * + * It is preferred that the macros xQueueSendFromISR(), + * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place + * of calling this function directly. xQueueGiveFromISR() is an + * equivalent for use by semaphores that don't actually copy any data. + * + * Post an item on a queue. It is safe to use this function from within an + * interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWokenByPost;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWokenByPost = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post each byte.
+		xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.  Note that the
+	// name of the yield function required is port specific.
+	if( xHigherPriorityTaskWokenByPost )
+	{
+		taskYIELD_YIELD_FROM_ISR();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueReceiveFromISR(
+									   QueueHandle_t	xQueue,
+									   void	*pvBuffer,
+									   BaseType_t *pxTaskWoken
+								   );
+ * 
+ * + * Receive an item from a queue. It is safe to use this function from within an + * interrupt service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param pxTaskWoken A task may be blocked waiting for space to become + * available on the queue. If xQueueReceiveFromISR causes such a task to + * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * remain unchanged. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+
+ QueueHandle_t xQueue;
+
+ // Function to create a queue and post some values.
+ void vAFunction( void *pvParameters )
+ {
+ char cValueToPost;
+ const TickType_t xTicksToWait = ( TickType_t )0xff;
+
+	// Create a queue capable of containing 10 characters.
+	xQueue = xQueueCreate( 10, sizeof( char ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Post some characters that will be used within an ISR.  If the queue
+	// is full then this task will block for xTicksToWait ticks.
+	cValueToPost = 'a';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+	cValueToPost = 'b';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+
+	// ... keep posting characters ... this task may block when the queue
+	// becomes full.
+
+	cValueToPost = 'c';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+ }
+
+ // ISR that outputs all the characters received on the queue.
+ void vISR_Routine( void )
+ {
+ BaseType_t xTaskWokenByReceive = pdFALSE;
+ char cRxedChar;
+
+	while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) )
+	{
+		// A character was received.  Output the character now.
+		vOutputCharacter( cRxedChar );
+
+		// If removing the character from the queue woke the task that was
+		// posting onto the queue cTaskWokenByReceive will have been set to
+		// pdTRUE.  No matter how many times this loop iterates only one
+		// task will be woken.
+	}
+
+	if( cTaskWokenByPost != ( char ) pdFALSE;
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* + * Utilities to query queues that are safe to use from an ISR. These utilities + * should be used only from witin an ISR, or within a critical section. + */ +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ); + +/* + * For internal use only. Use xSemaphoreCreateMutex(), + * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling + * these functions directly. + */ +QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Use xSemaphoreTakeMutexRecursive() or + * xSemaphoreGiveMutexRecursive() instead of calling these functions directly. + */ +BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; + +/* + * Reset a queue back to its original empty state. The return value is now + * obsolete and is always set to pdPASS. + */ +#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE ) + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger. If you are not using a kernel + * aware debugger then this function can be ignored. + * + * configQUEUE_REGISTRY_SIZE defines the maximum number of handles the + * registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0 + * within FreeRTOSConfig.h for the registry to be available. Its value + * does not effect the number of queues, semaphores and mutexes that can be + * created - just the number that the registry can hold. + * + * @param xQueue The handle of the queue being added to the registry. This + * is the handle returned by a call to xQueueCreate(). Semaphore and mutex + * handles can also be passed in here. + * + * @param pcName The name to be associated with the handle. This is the + * name that the kernel aware debugger will display. The queue registry only + * stores a pointer to the string - so the string must be persistent (global or + * preferably in ROM/Flash), not on the stack. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger, and vQueueUnregisterQueue() to + * remove the queue, semaphore or mutex from the register. If you are not using + * a kernel aware debugger then this function can be ignored. + * + * @param xQueue The handle of the queue being removed from the registry. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueUnregisterQueue( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +#endif + +/* + * The queue registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call pcQueueGetName() to look + * up and return the name of a queue in the queue registry from the queue's + * handle. + * + * @param xQueue The handle of the queue the name of which will be returned. + * @return If the queue is in the registry then a pointer to the name of the + * queue is returned. If the queue is not in the registry then NULL is + * returned. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + const char *pcQueueGetName( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Queue sets provide a mechanism to allow a task to block (pend) on a read + * operation from multiple queues or semaphores simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * A queue set must be explicitly created using a call to xQueueCreateSet() + * before it can be used. Once created, standard FreeRTOS queues and semaphores + * can be added to the set using calls to xQueueAddToSet(). + * xQueueSelectFromSet() is then used to determine which, if any, of the queues + * or semaphores contained in the set is in a state where a queue read or + * semaphore take operation would be successful. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: An additional 4 bytes of RAM is required for each space in a every + * queue added to a queue set. Therefore counting semaphores that have a high + * maximum count value should not be added to a queue set. + * + * Note 4: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param uxEventQueueLength Queue sets store events that occur on + * the queues and semaphores contained in the set. uxEventQueueLength specifies + * the maximum number of events that can be queued at once. To be absolutely + * certain that events are not lost uxEventQueueLength should be set to the + * total sum of the length of the queues added to the set, where binary + * semaphores and mutexes have a length of 1, and counting semaphores have a + * length set by their maximum count value. Examples: + * + If a queue set is to hold a queue of length 5, another queue of length 12, + * and a binary semaphore, then uxEventQueueLength should be set to + * (5 + 12 + 1), or 18. + * + If a queue set is to hold three binary semaphores then uxEventQueueLength + * should be set to (1 + 1 + 1 ), or 3. + * + If a queue set is to hold a counting semaphore that has a maximum count of + * 5, and a counting semaphore that has a maximum count of 3, then + * uxEventQueueLength should be set to (5 + 3), or 8. + * + * @return If the queue set is created successfully then a handle to the created + * queue set is returned. Otherwise NULL is returned. + */ +QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; + +/* + * Adds a queue or semaphore to a queue set that was previously created by a + * call to xQueueCreateSet(). + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being added to + * the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set to which the queue or semaphore + * is being added. + * + * @return If the queue or semaphore was successfully added to the queue set + * then pdPASS is returned. If the queue could not be successfully added to the + * queue set because it is already a member of a different queue set then pdFAIL + * is returned. + */ +BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * Removes a queue or semaphore from a queue set. A queue or semaphore can only + * be removed from a set if the queue or semaphore is empty. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being removed + * from the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set in which the queue or semaphore + * is included. + * + * @return If the queue or semaphore was successfully removed from the queue set + * then pdPASS is returned. If the queue was not in the queue set, or the + * queue (or semaphore) was not empty, then pdFAIL is returned. + */ +BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * xQueueSelectFromSet() selects from the members of a queue set a queue or + * semaphore that either contains data (in the case of a queue) or is available + * to take (in the case of a semaphore). xQueueSelectFromSet() effectively + * allows a task to block (pend) on a read operation on all the queues and + * semaphores in a queue set simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueSet The queue set on which the task will (potentially) block. + * + * @param xTicksToWait The maximum time, in ticks, that the calling task will + * remain in the Blocked state (with other tasks executing) to wait for a member + * of the queue set to be ready for a successful queue read or semaphore take + * operation. + * + * @return xQueueSelectFromSet() will return the handle of a queue (cast to + * a QueueSetMemberHandle_t type) contained in the queue set that contains data, + * or the handle of a semaphore (cast to a QueueSetMemberHandle_t type) contained + * in the queue set that is available, or NULL if no such queue or semaphore + * exists before before the specified block time expires. + */ +QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * A version of xQueueSelectFromSet() that can be used from an ISR. + */ +QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* Not public API functions. */ +void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif + +#endif /* QUEUE_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h new file mode 100644 index 000000000..2c106eac0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h @@ -0,0 +1,1140 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef SEMAPHORE_H +#define SEMAPHORE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include semphr.h" +#endif + +#include "queue.h" + +typedef QueueHandle_t SemaphoreHandle_t; + +#define semBINARY_SEMAPHORE_QUEUE_LENGTH ( ( uint8_t ) 1U ) +#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U ) +#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + + +/** + * semphr. h + *
vSemaphoreCreateBinary( SemaphoreHandle_t xSemaphore )
+ * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * This old vSemaphoreCreateBinary() macro is now deprecated in favour of the + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * Macro that implements a semaphore by using the existing queue mechanism. + * The queue length is 1 as this is a binary semaphore. The data size is 0 + * as we don't want to actually store any data - we just want to know if the + * queue is empty or full. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param xSemaphore Handle to the created semaphore. Should be of type SemaphoreHandle_t. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to vSemaphoreCreateBinary ().
+    // This is a macro so pass the variable in directly.
+    vSemaphoreCreateBinary( xSemaphore );
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define vSemaphoreCreateBinary( xSemaphore ) \ + { \ + ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ + if( ( xSemaphore ) != NULL ) \ + { \ + ( void ) xSemaphoreGive( ( xSemaphore ) ); \ + } \ + } +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateBinary( void )
+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @return Handle to the created semaphore, or NULL if the memory required to + * hold the semaphore's data structures could not be allocated. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateBinary();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * NOTE: In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the semaphore is created then a handle to the created semaphore is + * returned. If pxSemaphoreBuffer is NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
+    // The semaphore's data structures will be placed in the xSemaphoreBuffer
+    // variable, the address of which is passed into the function.  The
+    // function's parameter is not NULL, so the function will not attempt any
+    // dynamic memory allocation, and therefore the function will not return
+    // return NULL.
+    xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer );
+
+    // Rest of task code goes here.
+ }
+ 
+ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
xSemaphoreTake(
+ *                   SemaphoreHandle_t xSemaphore,
+ *                   TickType_t xBlockTime
+ *               )
+ * + * Macro to obtain a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). + * + * @param xSemaphore A handle to the semaphore being taken - obtained when + * the semaphore was created. + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. A block + * time of portMAX_DELAY can be used to block indefinitely (provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h). + * + * @return pdTRUE if the semaphore was obtained. pdFALSE + * if xBlockTime expired without the semaphore becoming available. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ // A task that creates a semaphore.
+ void vATask( void * pvParameters )
+ {
+    // Create the semaphore to guard a shared resource.
+    xSemaphore = xSemaphoreCreateBinary();
+ }
+
+ // A task that uses the semaphore.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xSemaphore != NULL )
+    {
+        // See if we can obtain the semaphore.  If the semaphore is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTake( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the semaphore and can now access the
+            // shared resource.
+
+            // ...
+
+            // We have finished accessing the shared resource.  Release the
+            // semaphore.
+            xSemaphoreGive( xSemaphore );
+        }
+        else
+        {
+            // We could not obtain the semaphore and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreTake xSemaphoreTake + * \ingroup Semaphores + */ +#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) ) + +/** + * semphr. h + * xSemaphoreTakeRecursive( + * SemaphoreHandle_t xMutex, + * TickType_t xBlockTime + * ) + * + * Macro to recursively obtain, or 'take', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being obtained. This is the + * handle returned by xSemaphoreCreateRecursiveMutex(); + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. If + * the task already owns the semaphore then xSemaphoreTakeRecursive() will + * return immediately no matter what the value of xBlockTime. + * + * @return pdTRUE if the semaphore was obtained. pdFALSE if xBlockTime + * expired without the semaphore becoming available. + * + * Example usage: +
+ SemaphoreHandle_t xMutex = NULL;
+
+ // A task that creates a mutex.
+ void vATask( void * pvParameters )
+ {
+    // Create the mutex to guard a shared resource.
+    xMutex = xSemaphoreCreateRecursiveMutex();
+ }
+
+ // A task that uses the mutex.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xMutex != NULL )
+    {
+        // See if we can obtain the mutex.  If the mutex is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTakeRecursive( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the mutex and can now access the
+            // shared resource.
+
+            // ...
+            // For some reason due to the nature of the code further calls to
+            // xSemaphoreTakeRecursive() are made on the same mutex.  In real
+            // code these would not be just sequential calls as this would make
+            // no sense.  Instead the calls are likely to be buried inside
+            // a more complex call structure.
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+
+            // The mutex has now been 'taken' three times, so will not be
+            // available to another task until it has also been given back
+            // three times.  Again it is unlikely that real code would have
+            // these calls sequentially, but instead buried in a more complex
+            // call structure.  This is just for illustrative purposes.
+            xSemaphoreGiveRecursive( xMutex );
+            xSemaphoreGiveRecursive( xMutex );
+            xSemaphoreGiveRecursive( xMutex );
+
+            // Now the mutex can be taken by other tasks.
+        }
+        else
+        {
+            // We could not obtain the mutex and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreTakeRecursive( xMutex, xBlockTime ) xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) ) +#endif + +/** + * semphr. h + *
xSemaphoreGive( SemaphoreHandle_t xSemaphore )
+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). and obtained using sSemaphoreTake(). + * + * This macro must not be used from an ISR. See xSemaphoreGiveFromISR () for + * an alternative which can be used from an ISR. + * + * This macro must also not be used on semaphores created using + * xSemaphoreCreateRecursiveMutex(). + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @return pdTRUE if the semaphore was released. pdFALSE if an error occurred. + * Semaphores are implemented using queues. An error can occur if there is + * no space on the queue to post a message - indicating that the + * semaphore was not first obtained correctly. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Create the semaphore to guard a shared resource.
+    xSemaphore = vSemaphoreCreateBinary();
+
+    if( xSemaphore != NULL )
+    {
+        if( xSemaphoreGive( xSemaphore ) != pdTRUE )
+        {
+            // We would expect this call to fail because we cannot give
+            // a semaphore without first "taking" it!
+        }
+
+        // Obtain the semaphore - don't block if the semaphore is not
+        // immediately available.
+        if( xSemaphoreTake( xSemaphore, ( TickType_t ) 0 ) )
+        {
+            // We now have the semaphore and can access the shared resource.
+
+            // ...
+
+            // We have finished accessing the shared resource so can free the
+            // semaphore.
+            if( xSemaphoreGive( xSemaphore ) != pdTRUE )
+            {
+                // We would not expect this call to fail because we must have
+                // obtained the semaphore to get here.
+            }
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreGive xSemaphoreGive + * \ingroup Semaphores + */ +#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK ) + +/** + * semphr. h + *
xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex )
+ * + * Macro to recursively release, or 'give', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being released, or 'given'. This is the + * handle returned by xSemaphoreCreateMutex(); + * + * @return pdTRUE if the semaphore was given. + * + * Example usage: +
+ SemaphoreHandle_t xMutex = NULL;
+
+ // A task that creates a mutex.
+ void vATask( void * pvParameters )
+ {
+    // Create the mutex to guard a shared resource.
+    xMutex = xSemaphoreCreateRecursiveMutex();
+ }
+
+ // A task that uses the mutex.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xMutex != NULL )
+    {
+        // See if we can obtain the mutex.  If the mutex is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the mutex and can now access the
+            // shared resource.
+
+            // ...
+            // For some reason due to the nature of the code further calls to
+			// xSemaphoreTakeRecursive() are made on the same mutex.  In real
+			// code these would not be just sequential calls as this would make
+			// no sense.  Instead the calls are likely to be buried inside
+			// a more complex call structure.
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+
+            // The mutex has now been 'taken' three times, so will not be
+			// available to another task until it has also been given back
+			// three times.  Again it is unlikely that real code would have
+			// these calls sequentially, it would be more likely that the calls
+			// to xSemaphoreGiveRecursive() would be called as a call stack
+			// unwound.  This is just for demonstrative purposes.
+            xSemaphoreGiveRecursive( xMutex );
+			xSemaphoreGiveRecursive( xMutex );
+			xSemaphoreGiveRecursive( xMutex );
+
+			// Now the mutex can be taken by other tasks.
+        }
+        else
+        {
+            // We could not obtain the mutex and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreGiveRecursive( xMutex ) xQueueGiveMutexRecursive( ( xMutex ) ) +#endif + +/** + * semphr. h + *
+ xSemaphoreGiveFromISR(
+                          SemaphoreHandle_t xSemaphore,
+                          BaseType_t *pxHigherPriorityTaskWoken
+                      )
+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary() or xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR. + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if giving the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreGiveFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully given, otherwise errQUEUE_FULL. + * + * Example usage: +
+ \#define LONG_TIME 0xffff
+ \#define TICKS_TO_WAIT	10
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ // Repetitive task.
+ void vATask( void * pvParameters )
+ {
+    for( ;; )
+    {
+        // We want this task to run every 10 ticks of a timer.  The semaphore
+        // was created before this task was started.
+
+        // Block waiting for the semaphore to become available.
+        if( xSemaphoreTake( xSemaphore, LONG_TIME ) == pdTRUE )
+        {
+            // It is time to execute.
+
+            // ...
+
+            // We have finished our task.  Return to the top of the loop where
+            // we will block on the semaphore until it is time to execute
+            // again.  Note when using the semaphore for synchronisation with an
+			// ISR in this manner there is no need to 'give' the semaphore back.
+        }
+    }
+ }
+
+ // Timer ISR
+ void vTimerISR( void * pvParameters )
+ {
+ static uint8_t ucLocalTickCount = 0;
+ static BaseType_t xHigherPriorityTaskWoken;
+
+    // A timer tick has occurred.
+
+    // ... Do other time functions.
+
+    // Is it time for vATask () to run?
+	xHigherPriorityTaskWoken = pdFALSE;
+    ucLocalTickCount++;
+    if( ucLocalTickCount >= TICKS_TO_WAIT )
+    {
+        // Unblock the task by releasing the semaphore.
+        xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken );
+
+        // Reset the count so we release the semaphore again in 10 ticks time.
+        ucLocalTickCount = 0;
+    }
+
+    if( xHigherPriorityTaskWoken != pdFALSE )
+    {
+        // We can force a context switch here.  Context switching from an
+        // ISR uses port specific syntax.  Check the demo task for your port
+        // to find the syntax required.
+    }
+ }
+ 
+ * \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR + * \ingroup Semaphores + */ +#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *
+ xSemaphoreTakeFromISR(
+                          SemaphoreHandle_t xSemaphore,
+                          BaseType_t *pxHigherPriorityTaskWoken
+                      )
+ * + * Macro to take a semaphore from an ISR. The semaphore must have + * previously been created with a call to xSemaphoreCreateBinary() or + * xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR, however taking a semaphore from an ISR + * is not a common operation. It is likely to only be useful when taking a + * counting semaphore when an interrupt is obtaining an object from a resource + * pool (when the semaphore count indicates the number of resources available). + * + * @param xSemaphore A handle to the semaphore being taken. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreTakeFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if taking the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreTakeFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully taken, otherwise + * pdFALSE + */ +#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateMutex( void )
+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return If the mutex was successfully created then a handle to the created + * semaphore is returned. If there was not enough heap to allocate the mutex + * data structures then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateMutex();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will be used to hold the mutex's data structure, removing the need for + * the memory to be allocated dynamically. + * + * @return If the mutex was successfully created then a handle to the created + * mutex is returned. If pxMutexBuffer was NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A mutex cannot be used before it has been created.  xMutexBuffer is
+    // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is
+    // attempted.
+    xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic + * \ingroup Semaphores + */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void )
+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return xSemaphore Handle to the created mutex semaphore. Should be of type + * SemaphoreHandle_t. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateRecursiveMutex();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateRecursiveMutex xSemaphoreCreateRecursiveMutex + * \ingroup Semaphores + */ +#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the recursive mutex's data structure, + * removing the need for the memory to be allocated dynamically. + * + * @return If the recursive mutex was successfully created then a handle to the + * created recursive mutex is returned. If pxMutexBuffer was NULL then NULL is + * returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A recursive semaphore cannot be used before it is created.  Here a
+    // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic().
+    // The address of xMutexBuffer is passed into the function, and will hold
+    // the mutexes data structures - so no dynamic memory allocation will be
+    // attempted.
+    xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic + * \ingroup Semaphores + */ +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount )
+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer can + * instead optionally provide the memory that will get used by the counting + * semaphore. xSemaphoreCreateCountingStatic() therefore allows a counting + * semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @return Handle to the created semaphore. Null if the semaphore could not be + * created. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+ SemaphoreHandle_t xSemaphore = NULL;
+
+    // Semaphore cannot be used before a call to xSemaphoreCreateCounting().
+    // The max value to which the semaphore can count should be 10, and the
+    // initial value assigned to the count should be 0.
+    xSemaphore = xSemaphoreCreateCounting( 10, 0 );
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer must + * provide the memory. xSemaphoreCreateCountingStatic() therefore allows a + * counting semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the counting semaphore was successfully created then a handle to + * the created counting semaphore is returned. If pxSemaphoreBuffer was NULL + * then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+ SemaphoreHandle_t xSemaphore = NULL;
+
+    // Counting semaphore cannot be used before they have been created.  Create
+    // a counting semaphore using xSemaphoreCreateCountingStatic().  The max
+    // value to which the semaphore can count is 10, and the initial value
+    // assigned to the count will be 0.  The address of xSemaphoreBuffer is
+    // passed in and will be used to hold the semaphore structure, so no dynamic
+    // memory allocation will be used.
+    xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer );
+
+    // No memory allocation was attempted so xSemaphore cannot be NULL, so there
+    // is no need to check its value.
+ }
+ 
+ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer ) xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( pxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );
+ * + * Delete a semaphore. This function must be used with care. For example, + * do not delete a mutex type semaphore if the mutex is held by a task. + * + * @param xSemaphore A handle to the semaphore to be deleted. + * + * \defgroup vSemaphoreDelete vSemaphoreDelete + * \ingroup Semaphores + */ +#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) + +/** + * semphr.h + *
TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );
+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + * Note: This is a good way of determining if the calling task is the mutex + * holder, but not a good way of determining the identity of the mutex holder as + * the holder may change between the function exiting and the returned value + * being tested. + */ +#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) + +/** + * semphr.h + *
TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );
+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + */ +#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) + +/** + * semphr.h + *
UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );
+ * + * If the semaphore is a counting semaphore then uxSemaphoreGetCount() returns + * its current count value. If the semaphore is a binary semaphore then + * uxSemaphoreGetCount() returns 1 if the semaphore is available, and 0 if the + * semaphore is not available. + * + */ +#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) + +#endif /* SEMAPHORE_H */ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h new file mode 100644 index 000000000..18406bbf9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h @@ -0,0 +1,129 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h new file mode 100644 index 000000000..0f00119ee --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h @@ -0,0 +1,855 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * Stream buffers are used to send a continuous stream of data from one task or + * interrupt to another. Their implementation is light weight, making them + * particularly suited for interrupt to task and core to core communication + * scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section section and set the + * receive block time to 0. + * + */ + +#ifndef STREAM_BUFFER_H +#define STREAM_BUFFER_H + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which stream buffers are referenced. For example, a call to + * xStreamBufferCreate() returns an StreamBufferHandle_t variable that can + * then be used as a parameter to xStreamBufferSend(), xStreamBufferReceive(), + * etc. + */ +struct StreamBufferDef_t; +typedef struct StreamBufferDef_t * StreamBufferHandle_t; + + +/** + * message_buffer.h + * +
+StreamBufferHandle_t xStreamBufferCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes );
+
+ * + * Creates a new stream buffer using dynamically allocated memory. See + * xStreamBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xStreamBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes the stream buffer will be + * able to hold at any one time. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @return If NULL is returned, then the stream buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the stream buffer data structures and storage area. A non-NULL value being + * returned indicates that the stream buffer has been created successfully - + * the returned value should be stored as the handle to the created stream + * buffer. + * + * Example use: +
+
+void vAFunction( void )
+{
+StreamBufferHandle_t xStreamBuffer;
+const size_t xStreamBufferSizeBytes = 100, xTriggerLevel = 10;
+
+    // Create a stream buffer that can hold 100 bytes.  The memory used to hold
+    // both the stream buffer structure and the data in the stream buffer is
+    // allocated dynamically.
+    xStreamBuffer = xStreamBufferCreate( xStreamBufferSizeBytes, xTriggerLevel );
+
+    if( xStreamBuffer == NULL )
+    {
+        // There was not enough heap memory space available to create the
+        // stream buffer.
+    }
+    else
+    {
+        // The stream buffer was created successfully and can now be used.
+    }
+}
+
+ * \defgroup xStreamBufferCreate xStreamBufferCreate + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE ) + +/** + * stream_buffer.h + * +
+StreamBufferHandle_t xStreamBufferCreateStatic( size_t xBufferSizeBytes,
+                                                size_t xTriggerLevelBytes,
+                                                uint8_t *pucStreamBufferStorageArea,
+                                                StaticStreamBuffer_t *pxStaticStreamBuffer );
+
+ * Creates a new stream buffer using statically allocated memory. See + * xStreamBufferCreate() for a version that uses dynamically allocated memory. + * + * configSUPPORT_STATIC_ALLOCATION must be set to 1 in FreeRTOSConfig.h for + * xStreamBufferCreateStatic() to be available. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucStreamBufferStorageArea parameter. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @param pucStreamBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which streams are + * copied when they are written to the stream buffer. + * + * @param pxStaticStreamBuffer Must point to a variable of type + * StaticStreamBuffer_t, which will be used to hold the stream buffer's data + * structure. + * + * @return If the stream buffer is created successfully then a handle to the + * created stream buffer is returned. If either pucStreamBufferStorageArea or + * pxStaticstreamBuffer are NULL then NULL is returned. + * + * Example use: +
+
+// Used to dimension the array used to hold the streams.  The available space
+// will actually be one less than this, so 999.
+#define STORAGE_SIZE_BYTES 1000
+
+// Defines the memory that will actually hold the streams within the stream
+// buffer.
+static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
+
+// The variable used to hold the stream buffer structure.
+StaticStreamBuffer_t xStreamBufferStruct;
+
+void MyFunction( void )
+{
+StreamBufferHandle_t xStreamBuffer;
+const size_t xTriggerLevel = 1;
+
+    xStreamBuffer = xStreamBufferCreateStatic( sizeof( ucBufferStorage ),
+                                               xTriggerLevel,
+                                               ucBufferStorage,
+                                               &xStreamBufferStruct );
+
+    // As neither the pucStreamBufferStorageArea or pxStaticStreamBuffer
+    // parameters were NULL, xStreamBuffer will not be NULL, and can be used to
+    // reference the created stream buffer in other stream buffer API calls.
+
+    // Other code that uses the stream buffer can go here.
+}
+
+
+ * \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer ) + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+                          const void *pvTxData,
+                          size_t xDataLengthBytes,
+                          TickType_t xTicksToWait );
+
+ * + * Sends bytes to a stream buffer. The bytes are copied into the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the buffer that holds the bytes to be copied + * into the stream buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for enough space to become available in the stream + * buffer, should the stream buffer contain too little space to hold the + * another xDataLengthBytes bytes. The block time is specified in tick periods, + * so the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. If a task times out + * before it can write all xDataLengthBytes into the buffer it will still write + * as many bytes as possible. A task does not use any CPU time when it is in + * the blocked state. + * + * @return The number of bytes written to the stream buffer. If a task times + * out before it can write all xDataLengthBytes into the buffer it will still + * write as many bytes as possible. + * + * Example use: +
+void vAFunction( StreamBufferHandle_t xStreamBuffer )
+{
+size_t xBytesSent;
+uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
+char *pcStringToSend = "String to send";
+const TickType_t x100ms = pdMS_TO_TICKS( 100 );
+
+    // Send an array to the stream buffer, blocking for a maximum of 100ms to
+    // wait for enough space to be available in the stream buffer.
+    xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
+
+    if( xBytesSent != sizeof( ucArrayToSend ) )
+    {
+        // The call to xStreamBufferSend() times out before there was enough
+        // space in the buffer for the data to be written, but it did
+        // successfully write xBytesSent bytes.
+    }
+
+    // Send the string to the stream buffer.  Return immediately if there is not
+    // enough space in the buffer.
+    xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The entire string could not be added to the stream buffer because
+        // there was not enough free space in the buffer, but xBytesSent bytes
+        // were sent.  Could try again to send the remaining bytes.
+    }
+}
+
+ * \defgroup xStreamBufferSend xStreamBufferSend + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+                                 const void *pvTxData,
+                                 size_t xDataLengthBytes,
+                                 BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * Interrupt safe version of the API function that sends a stream of bytes to + * the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the data that is to be copied into the stream + * buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for data. Calling + * xStreamBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xStreamBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xStreamBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xStreamBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the example code below for an example. + * + * @return The number of bytes actually written to the stream buffer, which will + * be less than xDataLengthBytes if the stream buffer didn't have enough free + * space for all the bytes to be written. + * + * Example use: +
+// A stream buffer that has already been created.
+StreamBufferHandle_t xStreamBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+size_t xBytesSent;
+char *pcStringToSend = "String to send";
+BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
+
+    // Attempt to send the string to the stream buffer.
+    xBytesSent = xStreamBufferSendFromISR( xStreamBuffer,
+                                           ( void * ) pcStringToSend,
+                                           strlen( pcStringToSend ),
+                                           &xHigherPriorityTaskWoken );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // There was not enough free space in the stream buffer for the entire
+        // string to be written, ut xBytesSent bytes were written.
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xStreamBufferSendFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+                             void *pvRxData,
+                             size_t xBufferLengthBytes,
+                             TickType_t xTicksToWait );
+
+ * + * Receives bytes from a stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferReceive() to read from a stream buffer from a task. Use + * xStreamBufferReceiveFromISR() to read from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which bytes are to + * be received. + * + * @param pvRxData A pointer to the buffer into which the received bytes will be + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for data to become available if the stream buffer is + * empty. xStreamBufferReceive() will return immediately if xTicksToWait is + * zero. The block time is specified in tick periods, so the absolute time it + * represents is dependent on the tick frequency. The macro pdMS_TO_TICKS() can + * be used to convert a time specified in milliseconds into a time specified in + * ticks. Setting xTicksToWait to portMAX_DELAY will cause the task to wait + * indefinitely (without timing out), provided INCLUDE_vTaskSuspend is set to 1 + * in FreeRTOSConfig.h. A task does not use any CPU time when it is in the + * Blocked state. + * + * @return The number of bytes actually read from the stream buffer, which will + * be less than xBufferLengthBytes if the call to xStreamBufferReceive() timed + * out before xBufferLengthBytes were available. + * + * Example use: +
+void vAFunction( StreamBuffer_t xStreamBuffer )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
+
+    // Receive up to another sizeof( ucRxData ) bytes from the stream buffer.
+    // Wait in the Blocked state (so not using any CPU processing time) for a
+    // maximum of 100ms for the full sizeof( ucRxData ) number of bytes to be
+    // available.
+    xReceivedBytes = xStreamBufferReceive( xStreamBuffer,
+                                           ( void * ) ucRxData,
+                                           sizeof( ucRxData ),
+                                           xBlockTime );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains another xRecievedBytes bytes of data, which can
+        // be processed here....
+    }
+}
+
+ * \defgroup xStreamBufferReceive xStreamBufferReceive + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+                                    void *pvRxData,
+                                    size_t xBufferLengthBytes,
+                                    BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * An interrupt safe version of the API function that receives bytes from a + * stream buffer. + * + * Use xStreamBufferReceive() to read bytes from a stream buffer from a task. + * Use xStreamBufferReceiveFromISR() to read bytes from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which a stream + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received bytes are + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for space to become available. Calling + * xStreamBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xStreamBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xStreamBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xStreamBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The number of bytes read from the stream buffer, if any. + * + * Example use: +
+// A stream buffer that has already been created.
+StreamBuffer_t xStreamBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;  // Initialised to pdFALSE.
+
+    // Receive the next stream from the stream buffer.
+    xReceivedBytes = xStreamBufferReceiveFromISR( xStreamBuffer,
+                                                  ( void * ) ucRxData,
+                                                  sizeof( ucRxData ),
+                                                  &xHigherPriorityTaskWoken );
+
+    if( xReceivedBytes > 0 )
+    {
+        // ucRxData contains xReceivedBytes read from the stream buffer.
+        // Process the stream here....
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xStreamBufferReceiveFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Deletes a stream buffer that was previously created using a call to + * xStreamBufferCreate() or xStreamBufferCreateStatic(). If the stream + * buffer was created using dynamic memory (that is, by xStreamBufferCreate()), + * then the allocated memory is freed. + * + * A stream buffer handle must not be used after the stream buffer has been + * deleted. + * + * @param xStreamBuffer The handle of the stream buffer to be deleted. + * + * \defgroup vStreamBufferDelete vStreamBufferDelete + * \ingroup StreamBufferManagement + */ +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see if it is full. A stream buffer is full if it + * does not have any free space, and therefore cannot accept any more data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is full then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsFull xStreamBufferIsFull + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see if it is empty. A stream buffer is empty if + * it does not contain any data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is empty then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Resets a stream buffer to its initial, empty, state. Any data that was in + * the stream buffer is discarded. A stream buffer can only be reset if there + * are no tasks blocked waiting to either send to or receive from the stream + * buffer. + * + * @param xStreamBuffer The handle of the stream buffer being reset. + * + * @return If the stream buffer is reset then pdPASS is returned. If there was + * a task blocked waiting to send to or read from the stream buffer then the + * stream buffer is not reset and pdFAIL is returned. + * + * \defgroup xStreamBufferReset xStreamBufferReset + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see how much free space it contains, which is + * equal to the amount of data that can be sent to the stream buffer before it + * is full. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be written to the stream buffer before + * the stream buffer would be full. + * + * \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see how much data it contains, which is equal to + * the number of bytes that can be read from the stream buffer before the stream + * buffer would be empty. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be read from the stream buffer before + * the stream buffer would be empty. + * + * \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel );
+
+ * + * A stream buffer's trigger level is the number of bytes that must be in the + * stream buffer before a task that is blocked on the stream buffer to + * wait for data is moved out of the blocked state. For example, if a task is + * blocked on a read of an empty stream buffer that has a trigger level of 1 + * then the task will be unblocked when a single byte is written to the buffer + * or the task's block time expires. As another example, if a task is blocked + * on a read of an empty stream buffer that has a trigger level of 10 then the + * task will not be unblocked until the stream buffer contains at least 10 bytes + * or the task's block time expires. If a reading task's block time expires + * before the trigger level is reached then the task will still receive however + * many bytes are actually available. Setting a trigger level of 0 will result + * in a trigger level of 1 being used. It is not valid to specify a trigger + * level that is greater than the buffer size. + * + * A trigger level is set when the stream buffer is created, and can be modified + * using xStreamBufferSetTriggerLevel(). + * + * @param xStreamBuffer The handle of the stream buffer being updated. + * + * @param xTriggerLevel The new trigger level for the stream buffer. + * + * @return If xTriggerLevel was less than or equal to the stream buffer's length + * then the trigger level will be updated and pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xStreamBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferSendCompletedFromISR(). If calling + * xStreamBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xStreamBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferReceiveCompletedFromISR(). If calling + * xStreamBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* Functions below here are not part of the public API. */ +StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + +StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION; + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +#endif + +#if defined( __cplusplus ) +} +#endif + +#endif /* !defined( STREAM_BUFFER_H ) */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/task.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/task.h new file mode 100644 index 000000000..f3cf118f8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/task.h @@ -0,0 +1,2421 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef INC_TASK_H +#define INC_TASK_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include task.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +#define tskKERNEL_VERSION_NUMBER "V10.2.0" +#define tskKERNEL_VERSION_MAJOR 10 +#define tskKERNEL_VERSION_MINOR 2 +#define tskKERNEL_VERSION_BUILD 0 + +/* MPU region parameters passed in ulParameters + * of MemoryRegion_t struct. */ +#define tskMPU_REGION_READ_ONLY ( 1UL << 0UL ) +#define tskMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define tskMPU_REGION_EXECUTE_NEVER ( 1UL << 2UL ) +#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) +#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) + +/** + * task. h + * + * Type by which tasks are referenced. For example, a call to xTaskCreate + * returns (via a pointer parameter) an TaskHandle_t variable that can then + * be used as a parameter to vTaskDelete to delete the task. + * + * \defgroup TaskHandle_t TaskHandle_t + * \ingroup Tasks + */ +struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tskTaskControlBlock* TaskHandle_t; + +/* + * Defines the prototype to which the application task hook function must + * conform. + */ +typedef BaseType_t (*TaskHookFunction_t)( void * ); + +/* Task states returned by eTaskGetState. */ +typedef enum +{ + eRunning = 0, /* A task is querying the state of itself, so must be running. */ + eReady, /* The task being queried is in a read or pending ready list. */ + eBlocked, /* The task being queried is in the Blocked state. */ + eSuspended, /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + eDeleted, /* The task being queried has been deleted, but its TCB has not yet been freed. */ + eInvalid /* Used as an 'invalid state' value. */ +} eTaskState; + +/* Actions that can be performed when vTaskNotify() is called. */ +typedef enum +{ + eNoAction = 0, /* Notify the task without updating its notify value. */ + eSetBits, /* Set bits in the task's notification value. */ + eIncrement, /* Increment the task's notification value. */ + eSetValueWithOverwrite, /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */ + eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */ +} eNotifyAction; + +/* + * Used internally only. + */ +typedef struct xTIME_OUT +{ + BaseType_t xOverflowCount; + TickType_t xTimeOnEntering; +} TimeOut_t; + +/* + * Defines the memory ranges allocated to the task when an MPU is used. + */ +typedef struct xMEMORY_REGION +{ + void *pvBaseAddress; + uint32_t ulLengthInBytes; + uint32_t ulParameters; +} MemoryRegion_t; + +/* + * Parameters required to create an MPU protected task. + */ +typedef struct xTASK_PARAMETERS +{ + TaskFunction_t pvTaskCode; + const char * const pcName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + configSTACK_DEPTH_TYPE usStackDepth; + void *pvParameters; + UBaseType_t uxPriority; + StackType_t *puxStackBuffer; + MemoryRegion_t xRegions[ portNUM_CONFIGURABLE_REGIONS ]; + #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + StaticTask_t * const pxTaskBuffer; + #endif +} TaskParameters_t; + +/* Used with the uxTaskGetSystemState() function to return the state of each task +in the system. */ +typedef struct xTASK_STATUS +{ + TaskHandle_t xHandle; /* The handle of the task to which the rest of the information in the structure relates. */ + const char *pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + UBaseType_t xTaskNumber; /* A number unique to the task. */ + eTaskState eCurrentState; /* The state in which the task existed when the structure was populated. */ + UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ + UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ + uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See http://www.freertos.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t *pxStackBase; /* Points to the lowest address of the task's stack area. */ + configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ +} TaskStatus_t; + +/* Possible return values for eTaskConfirmSleepModeStatus(). */ +typedef enum +{ + eAbortSleep = 0, /* A task has been made ready or a context switch pended since portSUPPORESS_TICKS_AND_SLEEP() was called - abort entering a sleep mode. */ + eStandardSleep, /* Enter a sleep mode that will not last any longer than the expected idle time. */ + eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe to enter a sleep mode that can only be exited by an external interrupt. */ +} eSleepModeStatus; + +/** + * Defines the priority used by the idle task. This must not be modified. + * + * \ingroup TaskUtils + */ +#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) + +/** + * task. h + * + * Macro for forcing a context switch. + * + * \defgroup taskYIELD taskYIELD + * \ingroup SchedulerControl + */ +#define taskYIELD() portYIELD() + +/** + * task. h + * + * Macro to mark the start of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL + * \ingroup SchedulerControl + */ +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/** + * task. h + * + * Macro to mark the end of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL + * \ingroup SchedulerControl + */ +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +/** + * task. h + * + * Macro to disable all maskable interrupts. + * + * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to enable microcontroller interrupts. + * + * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() + +/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is +0 to generate more optimal code when configASSERT() is defined as the constant +is used in assert() statements. */ +#define taskSCHEDULER_SUSPENDED ( ( BaseType_t ) 0 ) +#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) +#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) + + +/*----------------------------------------------------------- + * TASK CREATION API + *----------------------------------------------------------*/ + +/** + * task. h + *
+ BaseType_t xTaskCreate(
+							  TaskFunction_t pvTaskCode,
+							  const char * const pcName,
+							  configSTACK_DEPTH_TYPE usStackDepth,
+							  void *pvParameters,
+							  UBaseType_t uxPriority,
+							  TaskHandle_t *pvCreatedTask
+						  );
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. + * + * xTaskCreate() can only be used to create a task that has unrestricted + * access to the entire microcontroller memory map. Systems that include MPU + * support can alternatively create an MPU constrained task using + * xTaskCreateRestricted(). + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default + * is 16. + * + * @param usStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task should run. Systems that + * include MPU support can optionally create tasks in a privileged (system) + * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For + * example, to create a privileged task at priority 2 the uxPriority parameter + * should be set to ( 2 | portPRIVILEGE_BIT ). + * + * @param pvCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+ // Task to be created.
+ void vTaskCode( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+	 }
+ }
+
+ // Function that creates a task.
+ void vOtherFunction( void )
+ {
+ static uint8_t ucParameterToPass;
+ TaskHandle_t xHandle = NULL;
+
+	 // Create the task, storing the handle.  Note that the passed parameter ucParameterToPass
+	 // must exist for the lifetime of the task, so in this case is declared static.  If it was just an
+	 // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time
+	 // the new task attempts to access it.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle );
+     configASSERT( xHandle );
+
+	 // Use the handle to delete the task.
+     if( xHandle != NULL )
+     {
+	     vTaskDelete( xHandle );
+     }
+ }
+   
+ * \defgroup xTaskCreate xTaskCreate + * \ingroup Tasks + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode,
+								 const char * const pcName,
+								 uint32_t ulStackDepth,
+								 void *pvParameters,
+								 UBaseType_t uxPriority,
+								 StackType_t *pxStackBuffer,
+								 StaticTask_t *pxTaskBuffer );
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param pxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will + * be created and a handle to the created task is returned. If either + * pxStackBuffer or pxTaskBuffer are NULL then the task will not be created and + * NULL is returned. + * + * Example usage: +
+
+    // Dimensions the buffer that the task being created will use as its stack.
+    // NOTE:  This is the number of words the stack will hold, not the number of
+    // bytes.  For example, if each stack item is 32-bits, and this is set to 100,
+    // then 400 bytes (100 * 32-bits) will be allocated.
+    #define STACK_SIZE 200
+
+    // Structure that will hold the TCB of the task being created.
+    StaticTask_t xTaskBuffer;
+
+    // Buffer that the task being created will use as its stack.  Note this is
+    // an array of StackType_t variables.  The size of StackType_t is dependent on
+    // the RTOS port.
+    StackType_t xStack[ STACK_SIZE ];
+
+    // Function that implements the task being created.
+    void vTaskCode( void * pvParameters )
+    {
+        // The parameter value is expected to be 1 as 1 is passed in the
+        // pvParameters value in the call to xTaskCreateStatic().
+        configASSERT( ( uint32_t ) pvParameters == 1UL );
+
+        for( ;; )
+        {
+            // Task code goes here.
+        }
+    }
+
+    // Function that creates a task.
+    void vOtherFunction( void )
+    {
+        TaskHandle_t xHandle = NULL;
+
+        // Create the task without using any dynamic memory allocation.
+        xHandle = xTaskCreateStatic(
+                      vTaskCode,       // Function that implements the task.
+                      "NAME",          // Text name for the task.
+                      STACK_SIZE,      // Stack size in words, not bytes.
+                      ( void * ) 1,    // Parameter passed into the task.
+                      tskIDLE_PRIORITY,// Priority at which the task is created.
+                      xStack,          // Array to use as the task's stack.
+                      &xTaskBuffer );  // Variable to hold the task's data structure.
+
+        // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
+        // been created, and xHandle will be the task's handle.  Use the handle
+        // to suspend the task.
+        vTaskSuspend( xHandle );
+    }
+   
+ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * task. h + *
+ BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * + * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1. + * + * xTaskCreateRestricted() should only be used in systems that include an MPU + * implementation. + * + * Create a new task and add it to the list of tasks that are ready to run. + * The function parameters define the memory regions and associated access + * permissions allocated to the task. + * + * See xTaskCreateRestrictedStatic() for a version that does not use any + * dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+// Create an TaskParameters_t structure that defines the task to be created.
+static const TaskParameters_t xCheckTaskParameters =
+{
+	vATask,		// pvTaskCode - the function that implements the task.
+	"ATask",	// pcName - just a text name for the task to assist debugging.
+	100,		// usStackDepth	- the stack size DEFINED IN WORDS.
+	NULL,		// pvParameters - passed into the task function as the function parameters.
+	( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+	cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+
+	// xRegions - Allocate up to three separate memory regions for access by
+	// the task, with appropriate access permissions.  Different processors have
+	// different memory alignment requirements - refer to the FreeRTOS documentation
+	// for full information.
+	{
+		// Base address					Length	Parameters
+        { cReadWriteArray,				32,		portMPU_REGION_READ_WRITE },
+        { cReadOnlyArray,				32,		portMPU_REGION_READ_ONLY },
+        { cPrivilegedOnlyAccessArray,	128,	portMPU_REGION_PRIVILEGED_READ_WRITE }
+	}
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+	// Create a task from the const structure defined above.  The task handle
+	// is requested (the second parameter is not NULL) but in this case just for
+	// demonstration purposes as its not actually used.
+	xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+	// Start the scheduler.
+	vTaskStartScheduler();
+
+	// Will only get here if there was insufficient memory to create the idle
+	// and/or timer task.
+	for( ;; );
+}
+   
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * + * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1. + * + * xTaskCreateRestrictedStatic() should only be used in systems that include an + * MPU implementation. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreateRestricted() then the stack is provided by the application writer, + * and the memory used to hold the task's data structure is automatically + * dynamically allocated inside the xTaskCreateRestricted() function. If a task + * is created using xTaskCreateRestrictedStatic() then the application writer + * must provide the memory used to hold the task's data structures too. + * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be + * created without using any dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure + * contains an additional member, which is used to point to a variable of type + * StaticTask_t - which is then used to hold the task's data structure. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+// Create an TaskParameters_t structure that defines the task to be created.
+// The StaticTask_t variable is only included in the structure when
+// configSUPPORT_STATIC_ALLOCATION is set to 1.  The PRIVILEGED_DATA macro can
+// be used to force the variable into the RTOS kernel's privileged data area.
+static PRIVILEGED_DATA StaticTask_t xTaskBuffer;
+static const TaskParameters_t xCheckTaskParameters =
+{
+	vATask,		// pvTaskCode - the function that implements the task.
+	"ATask",	// pcName - just a text name for the task to assist debugging.
+	100,		// usStackDepth	- the stack size DEFINED IN WORDS.
+	NULL,		// pvParameters - passed into the task function as the function parameters.
+	( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+	cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+
+	// xRegions - Allocate up to three separate memory regions for access by
+	// the task, with appropriate access permissions.  Different processors have
+	// different memory alignment requirements - refer to the FreeRTOS documentation
+	// for full information.
+	{
+		// Base address					Length	Parameters
+        { cReadWriteArray,				32,		portMPU_REGION_READ_WRITE },
+        { cReadOnlyArray,				32,		portMPU_REGION_READ_ONLY },
+        { cPrivilegedOnlyAccessArray,	128,	portMPU_REGION_PRIVILEGED_READ_WRITE }
+	}
+
+	&xTaskBuffer; // Holds the task's data structure.
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+	// Create a task from the const structure defined above.  The task handle
+	// is requested (the second parameter is not NULL) but in this case just for
+	// demonstration purposes as its not actually used.
+	xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+	// Start the scheduler.
+	vTaskStartScheduler();
+
+	// Will only get here if there was insufficient memory to create the idle
+	// and/or timer task.
+	for( ;; );
+}
+   
+ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic + * \ingroup Tasks + */ +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
+ * + * Memory regions are assigned to a restricted task when the task is created by + * a call to xTaskCreateRestricted(). These regions can be redefined using + * vTaskAllocateMPURegions(). + * + * @param xTask The handle of the task being updated. + * + * @param xRegions A pointer to an MemoryRegion_t structure that contains the + * new memory region definitions. + * + * Example usage: +
+// Define an array of MemoryRegion_t structures that configures an MPU region
+// allowing read/write access for 1024 bytes starting at the beginning of the
+// ucOneKByte array.  The other two of the maximum 3 definable regions are
+// unused so set to zero.
+static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] =
+{
+	// Base address		Length		Parameters
+	{ ucOneKByte,		1024,		portMPU_REGION_READ_WRITE },
+	{ 0,				0,			0 },
+	{ 0,				0,			0 }
+};
+
+void vATask( void *pvParameters )
+{
+	// This task was created such that it has access to certain regions of
+	// memory as defined by the MPU configuration.  At some point it is
+	// desired that these MPU regions are replaced with that defined in the
+	// xAltRegions const struct above.  Use a call to vTaskAllocateMPURegions()
+	// for this purpose.  NULL is used as the task handle to indicate that this
+	// function should modify the MPU regions of the calling task.
+	vTaskAllocateMPURegions( NULL, xAltRegions );
+
+	// Now the task can continue its function, but from this point on can only
+	// access its stack and the ucOneKByte array (unless any other statically
+	// defined or shared regions have been declared elsewhere).
+}
+   
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskDelete( TaskHandle_t xTask );
+ * + * INCLUDE_vTaskDelete must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Remove a task from the RTOS real time kernel's management. The task being + * deleted will be removed from all ready, blocked, suspended and event lists. + * + * NOTE: The idle task is responsible for freeing the kernel allocated + * memory from tasks that have been deleted. It is therefore important that + * the idle task is not starved of microcontroller processing time if your + * application makes any calls to vTaskDelete (). Memory allocated by the + * task code is not automatically freed, and should be freed before the task + * is deleted. + * + * See the demo application file death.c for sample code that utilises + * vTaskDelete (). + * + * @param xTask The handle of the task to be deleted. Passing NULL will + * cause the calling task to be deleted. + * + * Example usage: +
+ void vOtherFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create the task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // Use the handle to delete the task.
+	 vTaskDelete( xHandle );
+ }
+   
+ * \defgroup vTaskDelete vTaskDelete + * \ingroup Tasks + */ +void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK CONTROL API + *----------------------------------------------------------*/ + +/** + * task. h + *
void vTaskDelay( const TickType_t xTicksToDelay );
+ * + * Delay a task for a given number of ticks. The actual time that the + * task remains blocked depends on the tick rate. The constant + * portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * INCLUDE_vTaskDelay must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * + * vTaskDelay() specifies a time at which the task wishes to unblock relative to + * the time at which vTaskDelay() is called. For example, specifying a block + * period of 100 ticks will cause the task to unblock 100 ticks after + * vTaskDelay() is called. vTaskDelay() does not therefore provide a good method + * of controlling the frequency of a periodic task as the path taken through the + * code, as well as other task and interrupt activity, will effect the frequency + * at which vTaskDelay() gets called and therefore the time at which the task + * next executes. See vTaskDelayUntil() for an alternative API function designed + * to facilitate fixed frequency execution. It does this by specifying an + * absolute time (rather than a relative time) at which the calling task should + * unblock. + * + * @param xTicksToDelay The amount of time, in tick periods, that + * the calling task should block. + * + * Example usage: + + void vTaskFunction( void * pvParameters ) + { + // Block for 500ms. + const TickType_t xDelay = 500 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Simply toggle the LED every 500ms, blocking between each toggle. + vToggleLED(); + vTaskDelay( xDelay ); + } + } + + * \defgroup vTaskDelay vTaskDelay + * \ingroup TaskCtrl + */ +void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
+ * + * INCLUDE_vTaskDelayUntil must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Delay a task until a specified time. This function can be used by periodic + * tasks to ensure a constant execution frequency. + * + * This function differs from vTaskDelay () in one important aspect: vTaskDelay () will + * cause a task to block for the specified number of ticks from the time vTaskDelay () is + * called. It is therefore difficult to use vTaskDelay () by itself to generate a fixed + * execution frequency as the time between a task starting to execute and that task + * calling vTaskDelay () may not be fixed [the task may take a different path though the + * code between calls, or may get interrupted or preempted a different number of times + * each time it executes]. + * + * Whereas vTaskDelay () specifies a wake time relative to the time at which the function + * is called, vTaskDelayUntil () specifies the absolute (exact) time at which it wishes to + * unblock. + * + * The constant portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the + * task was last unblocked. The variable must be initialised with the current time + * prior to its first use (see the example below). Following this the variable is + * automatically updated within vTaskDelayUntil (). + * + * @param xTimeIncrement The cycle time period. The task will be unblocked at + * time *pxPreviousWakeTime + xTimeIncrement. Calling vTaskDelayUntil with the + * same xTimeIncrement parameter value will cause the task to execute with + * a fixed interface period. + * + * Example usage: +
+ // Perform an action every 10 ticks.
+ void vTaskFunction( void * pvParameters )
+ {
+ TickType_t xLastWakeTime;
+ const TickType_t xFrequency = 10;
+
+	 // Initialise the xLastWakeTime variable with the current time.
+	 xLastWakeTime = xTaskGetTickCount ();
+	 for( ;; )
+	 {
+		 // Wait for the next cycle.
+		 vTaskDelayUntil( &xLastWakeTime, xFrequency );
+
+		 // Perform action here.
+	 }
+ }
+   
+ * \defgroup vTaskDelayUntil vTaskDelayUntil + * \ingroup TaskCtrl + */ +void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
+ * + * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this + * function to be available. + * + * A task will enter the Blocked state when it is waiting for an event. The + * event it is waiting for can be a temporal event (waiting for a time), such + * as when vTaskDelay() is called, or an event on an object, such as when + * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task + * that is in the Blocked state is used in a call to xTaskAbortDelay() then the + * task will leave the Blocked state, and return from whichever function call + * placed the task into the Blocked state. + * + * @param xTask The handle of the task to remove from the Blocked state. + * + * @return If the task referenced by xTask was not in the Blocked state then + * pdFAIL is returned. Otherwise pdPASS is returned. + * + * \defgroup xTaskAbortDelay xTaskAbortDelay + * \ingroup TaskCtrl + */ +BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the priority of any task. + * + * @param xTask Handle of the task to be queried. Passing a NULL + * handle results in the priority of the calling task being returned. + * + * @return The priority of xTask. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to obtain the priority of the created task.
+	 // It was created with tskIDLE_PRIORITY, but may have changed
+	 // it itself.
+	 if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY )
+	 {
+		 // The task has changed it's priority.
+	 }
+
+	 // ...
+
+	 // Is our priority higher than the created task?
+	 if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) )
+	 {
+		 // Our priority (obtained using NULL handle) is higher.
+	 }
+ }
+   
+ * \defgroup uxTaskPriorityGet uxTaskPriorityGet + * \ingroup TaskCtrl + */ +UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
+ * + * A version of uxTaskPriorityGet() that can be used from an ISR. + */ +UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
eTaskState eTaskGetState( TaskHandle_t xTask );
+ * + * INCLUDE_eTaskGetState must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the state of any task. States are encoded by the eTaskState + * enumerated type. + * + * @param xTask Handle of the task to be queried. + * + * @return The state of xTask at the time the function was called. Note the + * state of the task might change between the function being called, and the + * functions return value being tested by the calling task. + */ +eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
+ * + * configUSE_TRACE_FACILITY must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * Populates a TaskStatus_t structure with information about a task. + * + * @param xTask Handle of the task being queried. If xTask is NULL then + * information will be returned about the calling task. + * + * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be + * filled with information about the task referenced by the handle passed using + * the xTask parameter. + * + * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report + * the stack high water mark of the task being queried. Calculating the stack + * high water mark takes a relatively long time, and can make the system + * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to + * allow the high water mark checking to be skipped. The high watermark value + * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is + * not set to pdFALSE; + * + * @param eState The TaskStatus_t structure contains a member to report the + * state of the task being queried. Obtaining the task state is not as fast as + * a simple assignment - so the eState parameter is provided to allow the state + * information to be omitted from the TaskStatus_t structure. To obtain state + * information then set eState to eInvalid - otherwise the value passed in + * eState will be reported as the task state in the TaskStatus_t structure. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+ TaskStatus_t xTaskDetails;
+
+    // Obtain the handle of a task from its name.
+    xHandle = xTaskGetHandle( "Task_Name" );
+
+    // Check the handle is not NULL.
+    configASSERT( xHandle );
+
+    // Use the handle to obtain further information about the task.
+    vTaskGetInfo( xHandle,
+                  &xTaskDetails,
+                  pdTRUE, // Include the high water mark in xTaskDetails.
+                  eInvalid ); // Include the task state in xTaskDetails.
+ }
+   
+ * \defgroup vTaskGetInfo vTaskGetInfo + * \ingroup TaskCtrl + */ +void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
+ * + * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Set the priority of any task. + * + * A context switch will occur before the function returns if the priority + * being set is higher than the currently executing task. + * + * @param xTask Handle to the task for which the priority is being set. + * Passing a NULL handle results in the priority of the calling task being set. + * + * @param uxNewPriority The priority to which the task will be set. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to raise the priority of the created task.
+	 vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 );
+
+	 // ...
+
+	 // Use a NULL handle to raise our priority to the same value.
+	 vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
+ }
+   
+ * \defgroup vTaskPrioritySet vTaskPrioritySet + * \ingroup TaskCtrl + */ +void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskSuspend( TaskHandle_t xTaskToSuspend );
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Suspend any task. When suspended a task will never get any microcontroller + * processing time, no matter what its priority. + * + * Calls to vTaskSuspend are not accumulative - + * i.e. calling vTaskSuspend () twice on the same task still only requires one + * call to vTaskResume () to ready the suspended task. + * + * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL + * handle will cause the calling task to be suspended. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to suspend the created task.
+	 vTaskSuspend( xHandle );
+
+	 // ...
+
+	 // The created task will not run during this period, unless
+	 // another task calls vTaskResume( xHandle ).
+
+	 //...
+
+
+	 // Suspend ourselves.
+	 vTaskSuspend( NULL );
+
+	 // We cannot get here unless another task calls vTaskResume
+	 // with our handle as the parameter.
+ }
+   
+ * \defgroup vTaskSuspend vTaskSuspend + * \ingroup TaskCtrl + */ +void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskResume( TaskHandle_t xTaskToResume );
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Resumes a suspended task. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * vTaskResume (). + * + * @param xTaskToResume Handle to the task being readied. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to suspend the created task.
+	 vTaskSuspend( xHandle );
+
+	 // ...
+
+	 // The created task will not run during this period, unless
+	 // another task calls vTaskResume( xHandle ).
+
+	 //...
+
+
+	 // Resume the suspended task ourselves.
+	 vTaskResume( xHandle );
+
+	 // The created task will once again get microcontroller processing
+	 // time in accordance with its priority within the system.
+ }
+   
+ * \defgroup vTaskResume vTaskResume + * \ingroup TaskCtrl + */ +void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
+ * + * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * An implementation of vTaskResume() that can be called from within an ISR. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * xTaskResumeFromISR (). + * + * xTaskResumeFromISR() should not be used to synchronise a task with an + * interrupt if there is a chance that the interrupt could arrive prior to the + * task being suspended - as this can lead to interrupts being missed. Use of a + * semaphore as a synchronisation mechanism would avoid this eventuality. + * + * @param xTaskToResume Handle to the task being readied. + * + * @return pdTRUE if resuming the task should result in a context switch, + * otherwise pdFALSE. This is used by the ISR to determine if a context switch + * may be required following the ISR. + * + * \defgroup vTaskResumeFromISR vTaskResumeFromISR + * \ingroup TaskCtrl + */ +BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * SCHEDULER CONTROL + *----------------------------------------------------------*/ + +/** + * task. h + *
void vTaskStartScheduler( void );
+ * + * Starts the real time kernel tick processing. After calling the kernel + * has control over which tasks are executed and when. + * + * See the demo application file main.c for an example of creating + * tasks and starting the kernel. + * + * Example usage: +
+ void vAFunction( void )
+ {
+	 // Create at least one task before starting the kernel.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+	 // Start the real time kernel with preemption.
+	 vTaskStartScheduler ();
+
+	 // Will not get here unless a task calls vTaskEndScheduler ()
+ }
+   
+ * + * \defgroup vTaskStartScheduler vTaskStartScheduler + * \ingroup SchedulerControl + */ +void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskEndScheduler( void );
+ * + * NOTE: At the time of writing only the x86 real mode port, which runs on a PC + * in place of DOS, implements this function. + * + * Stops the real time kernel tick. All created tasks will be automatically + * deleted and multitasking (either preemptive or cooperative) will + * stop. Execution then resumes from the point where vTaskStartScheduler () + * was called, as if vTaskStartScheduler () had just returned. + * + * See the demo application file main. c in the demo/PC directory for an + * example that uses vTaskEndScheduler (). + * + * vTaskEndScheduler () requires an exit function to be defined within the + * portable layer (see vPortEndScheduler () in port. c for the PC port). This + * performs hardware specific operations such as stopping the kernel tick. + * + * vTaskEndScheduler () will cause all of the resources allocated by the + * kernel to be freed - but will not free resources allocated by application + * tasks. + * + * Example usage: +
+ void vTaskCode( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // At some point we want to end the real time kernel processing
+		 // so call ...
+		 vTaskEndScheduler ();
+	 }
+ }
+
+ void vAFunction( void )
+ {
+	 // Create at least one task before starting the kernel.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+	 // Start the real time kernel with preemption.
+	 vTaskStartScheduler ();
+
+	 // Will only get here when the vTaskCode () task has called
+	 // vTaskEndScheduler ().  When we get here we are back to single task
+	 // execution.
+ }
+   
+ * + * \defgroup vTaskEndScheduler vTaskEndScheduler + * \ingroup SchedulerControl + */ +void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskSuspendAll( void );
+ * + * Suspends the scheduler without disabling interrupts. Context switches will + * not occur while the scheduler is suspended. + * + * After calling vTaskSuspendAll () the calling task will continue to execute + * without risk of being swapped out until a call to xTaskResumeAll () has been + * made. + * + * API functions that have the potential to cause a context switch (for example, + * vTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler + * is suspended. + * + * Example usage: +
+ void vTask1( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // ...
+
+		 // At some point the task wants to perform a long operation during
+		 // which it does not want to get swapped out.  It cannot use
+		 // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+		 // operation may cause interrupts to be missed - including the
+		 // ticks.
+
+		 // Prevent the real time kernel swapping out the task.
+		 vTaskSuspendAll ();
+
+		 // Perform the operation here.  There is no need to use critical
+		 // sections as we have all the microcontroller processing time.
+		 // During this time interrupts will still operate and the kernel
+		 // tick count will be maintained.
+
+		 // ...
+
+		 // The operation is complete.  Restart the kernel.
+		 xTaskResumeAll ();
+	 }
+ }
+   
+ * \defgroup vTaskSuspendAll vTaskSuspendAll + * \ingroup SchedulerControl + */ +void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskResumeAll( void );
+ * + * Resumes scheduler activity after it was suspended by a call to + * vTaskSuspendAll(). + * + * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks + * that were previously suspended by a call to vTaskSuspend(). + * + * @return If resuming the scheduler caused a context switch then pdTRUE is + * returned, otherwise pdFALSE is returned. + * + * Example usage: +
+ void vTask1( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // ...
+
+		 // At some point the task wants to perform a long operation during
+		 // which it does not want to get swapped out.  It cannot use
+		 // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+		 // operation may cause interrupts to be missed - including the
+		 // ticks.
+
+		 // Prevent the real time kernel swapping out the task.
+		 vTaskSuspendAll ();
+
+		 // Perform the operation here.  There is no need to use critical
+		 // sections as we have all the microcontroller processing time.
+		 // During this time interrupts will still operate and the real
+		 // time kernel tick count will be maintained.
+
+		 // ...
+
+		 // The operation is complete.  Restart the kernel.  We want to force
+		 // a context switch - but there is no point if resuming the scheduler
+		 // caused a context switch already.
+		 if( !xTaskResumeAll () )
+		 {
+			  taskYIELD ();
+		 }
+	 }
+ }
+   
+ * \defgroup xTaskResumeAll xTaskResumeAll + * \ingroup SchedulerControl + */ +BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK UTILITIES + *----------------------------------------------------------*/ + +/** + * task. h + *
TickType_t xTaskGetTickCount( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * \defgroup xTaskGetTickCount xTaskGetTickCount + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
TickType_t xTaskGetTickCountFromISR( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * This is a version of xTaskGetTickCount() that is safe to be called from an + * ISR - provided that TickType_t is the natural word size of the + * microcontroller being used or interrupt nesting is either not supported or + * not being used. + * + * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
uint16_t uxTaskGetNumberOfTasks( void );
+ * + * @return The number of tasks that the real time kernel is currently managing. + * This includes all ready, blocked and suspended tasks. A task that + * has been deleted but not yet freed by the idle task will also be + * included in the count. + * + * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks + * \ingroup TaskUtils + */ +UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
char *pcTaskGetName( TaskHandle_t xTaskToQuery );
+ * + * @return The text (human readable) name of the task referenced by the handle + * xTaskToQuery. A task can query its own name by either passing in its own + * handle, or by setting xTaskToQuery to NULL. + * + * \defgroup pcTaskGetName pcTaskGetName + * \ingroup TaskUtils + */ +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
+ * + * NOTE: This function takes a relatively long time to complete and should be + * used sparingly. + * + * @return The handle of the task that has the human readable name pcNameToQuery. + * NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle + * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available. + * + * \defgroup pcTaskGetHandle pcTaskGetHandle + * \ingroup TaskUtils + */ +TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task.h + *
UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* When using trace macros it is sometimes necessary to include task.h before +FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been defined, +so the following two prototypes will cause a compilation error. This can be +fixed by simply guarding against the inclusion of these two prototypes unless +they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration +constant. */ +#ifdef configUSE_APPLICATION_TASK_TAG + #if configUSE_APPLICATION_TASK_TAG == 1 + /** + * task.h + *
void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
+ * + * Sets pxHookFunction to be the task hook function used by the task xTask. + * Passing xTask as NULL has the effect of setting the calling tasks hook + * function. + */ + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION; + + /** + * task.h + *
void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
+ * + * Returns the pxHookFunction value assigned to the task xTask. Do not + * call from an interrupt service routine - call + * xTaskGetApplicationTaskTagFromISR() instead. + */ + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + /** + * task.h + *
void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
+ * + * Returns the pxHookFunction value assigned to the task xTask. Can + * be called from an interrupt service routine. + */ + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + #endif /* configUSE_APPLICATION_TASK_TAG ==1 */ +#endif /* ifdef configUSE_APPLICATION_TASK_TAG */ + +#if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + + /* Each task contains an array of pointers that is dimensioned by the + configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The + kernel does not use the pointers itself, so the application writer can use + the pointers for any purpose they wish. The following two functions are + used to set and query a pointer respectively. */ + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) PRIVILEGED_FUNCTION; + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) PRIVILEGED_FUNCTION; + +#endif + +/** + * task.h + *
BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
+ * + * Calls the hook function associated with xTask. Passing xTask as NULL has + * the effect of calling the Running tasks (the calling task) hook function. + * + * pvParameter is passed to the hook function for the task to interpret as it + * wants. The return value is the value returned by the task hook function + * registered by the user. + */ +BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) PRIVILEGED_FUNCTION; + +/** + * xTaskGetIdleTaskHandle() is only available if + * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. + * + * Simply returns the handle of the idle task. It is not valid to call + * xTaskGetIdleTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for + * uxTaskGetSystemState() to be available. + * + * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in + * the system. TaskStatus_t structures contain, among other things, members + * for the task handle, task name, task priority, task state, and total amount + * of run time consumed by the task. See the TaskStatus_t structure + * definition in this file for the full member list. + * + * NOTE: This function is intended for debugging use only as its use results in + * the scheduler remaining suspended for an extended period. + * + * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures. + * The array must contain at least one TaskStatus_t structure for each task + * that is under the control of the RTOS. The number of tasks under the control + * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API function. + * + * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray + * parameter. The size is specified as the number of indexes in the array, or + * the number of TaskStatus_t structures contained in the array, not by the + * number of bytes in the array. + * + * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in + * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to the + * total run time (as defined by the run time stats clock, see + * http://www.freertos.org/rtos-run-time-stats.html) since the target booted. + * pulTotalRunTime can be set to NULL to omit the total run time information. + * + * @return The number of TaskStatus_t structures that were populated by + * uxTaskGetSystemState(). This should equal the number returned by the + * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed + * in the uxArraySize parameter was too small. + * + * Example usage: +
+    // This example demonstrates how a human readable table of run time stats
+	// information is generated from raw data provided by uxTaskGetSystemState().
+	// The human readable table is written to pcWriteBuffer
+	void vTaskGetRunTimeStats( char *pcWriteBuffer )
+	{
+	TaskStatus_t *pxTaskStatusArray;
+	volatile UBaseType_t uxArraySize, x;
+	uint32_t ulTotalRunTime, ulStatsAsPercentage;
+
+		// Make sure the write buffer does not contain a string.
+		*pcWriteBuffer = 0x00;
+
+		// Take a snapshot of the number of tasks in case it changes while this
+		// function is executing.
+		uxArraySize = uxTaskGetNumberOfTasks();
+
+		// Allocate a TaskStatus_t structure for each task.  An array could be
+		// allocated statically at compile time.
+		pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t ) );
+
+		if( pxTaskStatusArray != NULL )
+		{
+			// Generate raw status information about each task.
+			uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalRunTime );
+
+			// For percentage calculations.
+			ulTotalRunTime /= 100UL;
+
+			// Avoid divide by zero errors.
+			if( ulTotalRunTime > 0 )
+			{
+				// For each populated position in the pxTaskStatusArray array,
+				// format the raw data as human readable ASCII data
+				for( x = 0; x < uxArraySize; x++ )
+				{
+					// What percentage of the total run time has the task used?
+					// This will always be rounded down to the nearest integer.
+					// ulTotalRunTimeDiv100 has already been divided by 100.
+					ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalRunTime;
+
+					if( ulStatsAsPercentage > 0UL )
+					{
+						sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
+					}
+					else
+					{
+						// If the percentage is zero here then the task has
+						// consumed less than 1% of the total run time.
+						sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter );
+					}
+
+					pcWriteBuffer += strlen( ( char * ) pcWriteBuffer );
+				}
+			}
+
+			// The array is no longer needed, free the memory it consumes.
+			vPortFree( pxTaskStatusArray );
+		}
+	}
+	
+ */ +UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskList( char *pcWriteBuffer );
+ * + * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must + * both be defined as 1 for this function to be available. See the + * configuration section of the FreeRTOS.org website for more information. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Lists all the current tasks, along with their current state and stack + * usage high water mark. + * + * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or + * suspended ('S'). + * + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays task + * names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that might + * bloat the code size, use a lot of stack, and provide different results on + * different platforms. An alternative, tiny, third party, and limited + * functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly through a + * call to vTaskList(). + * + * @param pcWriteBuffer A buffer into which the above mentioned details + * will be written, in ASCII form. This buffer is assumed to be large + * enough to contain the generated report. Approximately 40 bytes per + * task should be sufficient. + * + * \defgroup vTaskList vTaskList + * \ingroup TaskUtils + */ +void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
void vTaskGetRunTimeStats( char *pcWriteBuffer );
+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * Calling vTaskGetRunTimeStats() writes the total execution time of each + * task into a buffer, both as an absolute count value and as a percentage + * of the total system execution time. + * + * NOTE 2: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays the + * amount of time each task has spent in the Running state in both absolute and + * percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function + * that might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, and + * limited functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() directly + * to get access to raw stats data, rather than indirectly through a call to + * vTaskGetRunTimeStats(). + * + * @param pcWriteBuffer A buffer into which the execution times will be + * written, in ASCII form. This buffer is assumed to be large enough to + * contain the generated report. Approximately 40 bytes per task should + * be sufficient. + * + * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats + * \ingroup TaskUtils + */ +void vTaskGetRunTimeStats( char *pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** +* task. h +*
TickType_t xTaskGetIdleRunTimeCounter( void );
+* +* configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS +* must both be defined as 1 for this function to be available. The application +* must also then provide definitions for +* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() +* to configure a peripheral timer/counter and return the timers current count +* value respectively. The counter should be at least 10 times the frequency of +* the tick count. +* +* Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total +* accumulated execution time being stored for each task. The resolution +* of the accumulated time value depends on the frequency of the timer +* configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. +* While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total +* execution time of each task into a buffer, xTaskGetIdleRunTimeCounter() +* returns the total execution time of just the idle task. +* +* @return The total run time of the idle task. This is the amount of time the +* idle task has actually been executing. The unit of time is dependent on the +* frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and +* portGET_RUN_TIME_COUNTER_VALUE() macros. +* +* \defgroup xTaskGetIdleRunTimeCounter xTaskGetIdleRunTimeCounter +* \ingroup TaskUtils +*/ +TickType_t xTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * pulPreviousNotificationValue - + * Can be used to pass out the subject task's notification value before any + * bits are modified by the notify function. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; +#define xTaskNotify( xTaskToNotify, ulValue, eAction ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL ) +#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotify() that can be used from an interrupt service routine + * (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should + * be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value + * will be cleared in the calling task's notification value before the task + * checks to see if any notifications are pending, and optionally blocks if no + * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if + * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have + * the effect of resetting the task's notification value to 0. Setting + * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged. + * + * @param ulBitsToClearOnExit If a notification is pending or received before + * the calling task exits the xTaskNotifyWait() function then the task's + * notification value (see the xTaskNotify() API function) is passed out using + * the pulNotificationValue parameter. Then any bits that are set in + * ulBitsToClearOnExit will be cleared in the task's notification value (note + * *pulNotificationValue is set before any bits are cleared). Setting + * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL + * (if limits.h is not included) will have the effect of resetting the task's + * notification value to 0 before the function exits. Setting + * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged + * when the function exits (in which case the value passed out in + * pulNotificationValue will match the task's notification value). + * + * @param pulNotificationValue Used to pass the task's notification value out + * of the function. Note the value passed out will not be effected by the + * clearing of any bits caused by ulBitsToClearOnExit being non-zero. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for a notification to be received, should a notification + * not already be pending when xTaskNotifyWait() was called. The task + * will not consume any processing time while it is in the Blocked state. This + * is specified in kernel ticks, the macro pdMS_TO_TICSK( value_in_ms ) can be + * used to convert a time specified in milliseconds to a time specified in + * ticks. + * + * @return If a notification was received (including notifications that were + * already pending when xTaskNotifyWait was called) then pdPASS is + * returned. Otherwise pdFAIL is returned. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * xTaskNotifyGive() is a helper macro intended for use when task notifications + * are used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given using the xSemaphoreGive() API function, + * the equivalent action that instead uses a task notification is + * xTaskNotifyGive(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the + * eAction parameter set to eIncrement - so pdPASS is always returned. + * + * \defgroup xTaskNotifyGive xTaskNotifyGive + * \ingroup TaskNotifications + */ +#define xTaskNotifyGive( xTaskToNotify ) xTaskGenericNotify( ( xTaskToNotify ), ( 0 ), eIncrement, NULL ) + +/** + * task. h + *
void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro
+ * to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * A version of xTaskNotifyGive() that can be called from an interrupt service
+ * routine (ISR).
+ *
+ * Events can be sent to a task using an intermediary object.  Examples of such
+ * objects are queues, semaphores, mutexes and event groups.  Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value.  In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * vTaskNotifyGiveFromISR() is intended for use when task notifications are
+ * used as light weight and faster binary or counting semaphore equivalents.
+ * Actual FreeRTOS semaphores are given from an ISR using the
+ * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses
+ * a task notification is vTaskNotifyGiveFromISR().
+ *
+ * When task notifications are being used as a binary or counting semaphore
+ * equivalent then the task being notified should wait for the notification
+ * using the ulTaskNotificationTake() API function rather than the
+ * xTaskNotifyWait() API function.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
+ *
+ * @param xTaskToNotify The handle of the task being notified.  The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @param pxHigherPriorityTaskWoken  vTaskNotifyGiveFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
+ * task to which the notification was sent to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently running task.  If
+ * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch
+ * should be requested before the interrupt is exited.  How a context switch is
+ * requested from an ISR is dependent on the port - see the documentation page
+ * for the port in use.
+ *
+ * \defgroup xTaskNotifyWait xTaskNotifyWait
+ * \ingroup TaskNotifications
+ */
+void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * 
uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * ulTaskNotifyTake() is intended for use when a task notification is used as a + * faster and lighter weight binary or counting semaphore alternative. Actual + * FreeRTOS semaphores are taken using the xSemaphoreTake() API function, the + * equivalent action that instead uses a task notification is + * ulTaskNotifyTake(). + * + * When a task is using its notification value as a binary or counting semaphore + * other tasks should send notifications to it using the xTaskNotifyGive() + * macro, or xTaskNotify() function with the eAction parameter set to + * eIncrement. + * + * ulTaskNotifyTake() can either clear the task's notification value to + * zero on exit, in which case the notification value acts like a binary + * semaphore, or decrement the task's notification value on exit, in which case + * the notification value acts like a counting semaphore. + * + * A task can use ulTaskNotifyTake() to [optionally] block to wait for a + * the task's notification value to be non-zero. The task does not consume any + * CPU time while it is in the Blocked state. + * + * Where as xTaskNotifyWait() will return when a notification is pending, + * ulTaskNotifyTake() will return when the task's notification value is + * not zero. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's + * notification value is decremented when the function exits. In this way the + * notification value acts like a counting semaphore. If xClearCountOnExit is + * not pdFALSE then the task's notification value is cleared to zero when the + * function exits. In this way the notification value acts like a binary + * semaphore. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for the task's notification value to be greater than zero, + * should the count not already be greater than zero when + * ulTaskNotifyTake() was called. The task will not consume any processing + * time while it is in the Blocked state. This is specified in kernel ticks, + * the macro pdMS_TO_TICSK( value_in_ms ) can be used to convert a time + * specified in milliseconds to a time specified in ticks. + * + * @return The task's notification count before it is either cleared to zero or + * decremented (see the xClearCountOnExit parameter). + * + * \defgroup ulTaskNotifyTake ulTaskNotifyTake + * \ingroup TaskNotifications + */ +uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
+ * + * If the notification state of the task referenced by the handle xTask is + * eNotified, then set the task's notification state to eNotWaitingNotification. + * The task's notification value is not altered. Set xTask to NULL to clear the + * notification state of the calling task. + * + * @return pdTRUE if the task's notification state was set to + * eNotWaitingNotification, otherwise pdFALSE. + * \defgroup xTaskNotifyStateClear xTaskNotifyStateClear + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ); + +/*----------------------------------------------------------- + * SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES + *----------------------------------------------------------*/ + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Called from the real time kernel tick (either preemptive or cooperative), + * this increments the tick count and checks if any tasks that are blocked + * for a finite period required removing from a blocked list and placing on + * a ready list. If a non-zero value is returned then a context switch is + * required because either: + * + A task was removed from a blocked list because its timeout had expired, + * or + * + Time slicing is in use and there is a task of equal priority to the + * currently running task. + */ +BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes the calling task from the ready list and places it both + * on the list of tasks waiting for a particular event, and the + * list of delayed tasks. The task will be removed from both lists + * and replaced on the ready list should either the event occur (and + * there be no higher priority tasks waiting on the same event) or + * the delay period expires. + * + * The 'unordered' version replaces the event list item value with the + * xItemValue value, and inserts the list item at the end of the list. + * + * The 'ordered' version uses the existing event list item value (which is the + * owning tasks priority) to insert the list item into the event list is task + * priority order. + * + * @param pxEventList The list containing tasks that are blocked waiting + * for the event to occur. + * + * @param xItemValue The item value to use for the event list item when the + * event list is not ordered by task priority. + * + * @param xTicksToWait The maximum amount of time that the task should wait + * for the event to occur. This is specified in kernel ticks,the constant + * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time + * period. + */ +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * This function performs nearly the same function as vTaskPlaceOnEventList(). + * The difference being that this function does not permit tasks to block + * indefinitely, whereas vTaskPlaceOnEventList() does. + * + */ +void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes a task from both the specified event list and the list of blocked + * tasks, and places it on a ready queue. + * + * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called + * if either an event occurs to unblock a task, or the block timeout period + * expires. + * + * xTaskRemoveFromEventList() is used when the event list is in task priority + * order. It removes the list item from the head of the event list as that will + * have the highest priority owning task of all the tasks on the event list. + * vTaskRemoveFromUnorderedEventList() is used when the event list is not + * ordered and the event list items hold something other than the owning tasks + * priority. In this case the event list item value is updated to the value + * passed in the xItemValue parameter. + * + * @return pdTRUE if the task being removed has a higher priority than the task + * making the call, otherwise pdFALSE. + */ +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run. + */ +portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; + +/* + * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY + * THE EVENT BITS MODULE. + */ +TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the calling task. + */ +TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; + +/* + * Capture the current time status for future reference. + */ +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/* + * Compare the time status now with that previously captured to see if the + * timeout has expired. + */ +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * Shortcut used by the queue implementation to prevent unnecessary call to + * taskYIELD(); + */ +void vTaskMissedYield( void ) PRIVILEGED_FUNCTION; + +/* + * Returns the scheduler state as taskSCHEDULER_RUNNING, + * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED. + */ +BaseType_t xTaskGetSchedulerState( void ) PRIVILEGED_FUNCTION; + +/* + * Raises the priority of the mutex holder to that of the calling task should + * the mutex holder have a priority less than the calling task. + */ +BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * Set the priority of a task back to its proper priority in the case that it + * inherited a higher priority while it was holding a semaphore. + */ +BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * If a higher priority task attempting to obtain a mutex caused a lower + * priority task to inherit the higher priority task's priority - but the higher + * priority task then timed out without obtaining the mutex, then the lower + * priority task will disinherit the priority again - but only down as far as + * the highest priority task that is still waiting for the mutex (if there were + * more than one task waiting for the mutex). + */ +void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) PRIVILEGED_FUNCTION; + +/* + * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. + */ +UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* + * Set the uxTaskNumber of the task referenced by the xTask parameter to + * uxHandle. + */ +void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * If tickless mode is being used, or a low power mode is implemented, then + * the tick interrupt will not execute during idle periods. When this is the + * case, the tick count value maintained by the scheduler needs to be kept up + * to date with the actual execution time by being skipped forward by a time + * equal to the idle period. + */ +void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port + * specific sleep function to determine if it is ok to proceed with the sleep, + * and if it is ok to proceed, if it is ok to sleep indefinitely. + * + * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only + * called with the scheduler suspended, not from within a critical section. It + * is therefore possible for an interrupt to request a context switch between + * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being + * entered. eTaskConfirmSleepModeStatus() should be called from a short + * critical section between the timer being stopped and the sleep mode being + * entered to ensure it is ok to proceed into the sleep mode. + */ +eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Increment the mutex held count when a mutex is + * taken and return the handle of the task that has taken the mutex. + */ +TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as vTaskSetTimeOutState(), but without a critial + * section. + */ +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif +#endif /* INC_TASK_H */ + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h new file mode 100644 index 000000000..cb721797f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h @@ -0,0 +1,1295 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef TIMERS_H +#define TIMERS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include timers.h" +#endif + +/*lint -save -e537 This headers are only multiply included if the application code +happens to also be including task.h. */ +#include "task.h" +/*lint -restore */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +/* IDs for commands that can be sent/received on the timer queue. These are to +be used solely through the macros that make up the public software timer API, +as defined below. The commands that are sent from interrupts must use the +highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task +or interrupt version of the queue send function should be used. */ +#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR ( ( BaseType_t ) -2 ) +#define tmrCOMMAND_EXECUTE_CALLBACK ( ( BaseType_t ) -1 ) +#define tmrCOMMAND_START_DONT_TRACE ( ( BaseType_t ) 0 ) +#define tmrCOMMAND_START ( ( BaseType_t ) 1 ) +#define tmrCOMMAND_RESET ( ( BaseType_t ) 2 ) +#define tmrCOMMAND_STOP ( ( BaseType_t ) 3 ) +#define tmrCOMMAND_CHANGE_PERIOD ( ( BaseType_t ) 4 ) +#define tmrCOMMAND_DELETE ( ( BaseType_t ) 5 ) + +#define tmrFIRST_FROM_ISR_COMMAND ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_START_FROM_ISR ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_RESET_FROM_ISR ( ( BaseType_t ) 7 ) +#define tmrCOMMAND_STOP_FROM_ISR ( ( BaseType_t ) 8 ) +#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ( ( BaseType_t ) 9 ) + + +/** + * Type by which software timers are referenced. For example, a call to + * xTimerCreate() returns an TimerHandle_t variable that can then be used to + * reference the subject timer in calls to other software timer API functions + * (for example, xTimerStart(), xTimerReset(), etc.). + */ +struct tmrTimerControl; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tmrTimerControl * TimerHandle_t; + +/* + * Defines the prototype to which timer callback functions must conform. + */ +typedef void (*TimerCallbackFunction_t)( TimerHandle_t xTimer ); + +/* + * Defines the prototype to which functions used with the + * xTimerPendFunctionCallFromISR() function must conform. + */ +typedef void (*PendedFunction_t)( void *, uint32_t ); + +/** + * TimerHandle_t xTimerCreate( const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @return If the timer is successfully created then a handle to the newly + * created timer is returned. If the timer cannot be created (because either + * there is insufficient FreeRTOS heap remaining to allocate the timer + * structures, or the timer period was set to 0) then NULL is returned. + * + * Example usage: + * @verbatim + * #define NUM_TIMERS 5 + * + * // An array to hold handles to the created timers. + * TimerHandle_t xTimers[ NUM_TIMERS ]; + * + * // An array to hold a count of the number of times each timer expires. + * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 }; + * + * // Define a callback function that will be used by multiple timer instances. + * // The callback function does nothing but count the number of times the + * // associated timer expires, and stop the timer once the timer has expired + * // 10 times. + * void vTimerCallback( TimerHandle_t pxTimer ) + * { + * int32_t lArrayIndex; + * const int32_t xMaxExpiryCountBeforeStopping = 10; + * + * // Optionally do something if the pxTimer parameter is NULL. + * configASSERT( pxTimer ); + * + * // Which timer expired? + * lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer ); + * + * // Increment the number of times that pxTimer has expired. + * lExpireCounters[ lArrayIndex ] += 1; + * + * // If the timer has expired 10 times then stop it from running. + * if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping ) + * { + * // Do not use a block time if calling a timer API function from a + * // timer callback function, as doing so could cause a deadlock! + * xTimerStop( pxTimer, 0 ); + * } + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start some timers. Starting the timers before the scheduler + * // has been started means the timers will start running immediately that + * // the scheduler starts. + * for( x = 0; x < NUM_TIMERS; x++ ) + * { + * xTimers[ x ] = xTimerCreate( "Timer", // Just a text name, not used by the kernel. + * ( 100 * x ), // The timer period in ticks. + * pdTRUE, // The timers will auto-reload themselves when they expire. + * ( void * ) x, // Assign each timer a unique id equal to its array index. + * vTimerCallback // Each timer calls the same callback when it expires. + * ); + * + * if( xTimers[ x ] == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xTimers[ x ], 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +#endif + +/** + * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction, + * StaticTimer_t *pxTimerBuffer ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which + * will be then be used to hold the software timer's data structures, removing + * the need for the memory to be allocated dynamically. + * + * @return If the timer is created then a handle to the created timer is + * returned. If pxTimerBuffer was NULL then NULL is returned. + * + * Example usage: + * @verbatim + * + * // The buffer used to hold the software timer's data structure. + * static StaticTimer_t xTimerBuffer; + * + * // A variable that will be incremented by the software timer's callback + * // function. + * UBaseType_t uxVariableToIncrement = 0; + * + * // A software timer callback function that increments a variable passed to + * // it when the software timer was created. After the 5th increment the + * // callback function stops the software timer. + * static void prvTimerCallback( TimerHandle_t xExpiredTimer ) + * { + * UBaseType_t *puxVariableToIncrement; + * BaseType_t xReturned; + * + * // Obtain the address of the variable to increment from the timer ID. + * puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID( xExpiredTimer ); + * + * // Increment the variable to show the timer callback has executed. + * ( *puxVariableToIncrement )++; + * + * // If this callback has executed the required number of times, stop the + * // timer. + * if( *puxVariableToIncrement == 5 ) + * { + * // This is called from a timer callback so must not block. + * xTimerStop( xExpiredTimer, staticDONT_BLOCK ); + * } + * } + * + * + * void main( void ) + * { + * // Create the software time. xTimerCreateStatic() has an extra parameter + * // than the normal xTimerCreate() API function. The parameter is a pointer + * // to the StaticTimer_t structure that will hold the software timer + * // structure. If the parameter is passed as NULL then the structure will be + * // allocated dynamically, just as if xTimerCreate() had been called. + * xTimer = xTimerCreateStatic( "T1", // Text name for the task. Helps debugging only. Not used by FreeRTOS. + * xTimerPeriod, // The period of the timer in ticks. + * pdTRUE, // This is an auto-reload timer. + * ( void * ) &uxVariableToIncrement, // A variable incremented by the software timer's callback function + * prvTimerCallback, // The function to execute when the timer expires. + * &xTimerBuffer ); // The buffer that will hold the software timer structure. + * + * // The scheduler has not started yet so a block time is not used. + * xReturned = xTimerStart( xTimer, 0 ); + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * void *pvTimerGetTimerID( TimerHandle_t xTimer ); + * + * Returns the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer, and by calling the + * vTimerSetTimerID() API function. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being queried. + * + * @return The ID assigned to the timer being queried. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ); + * + * Sets the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being updated. + * + * @param pvNewID The ID to assign to the timer. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ); + * + * Queries a timer to see if it is active or dormant. + * + * A timer will be dormant if: + * 1) It has been created but not started, or + * 2) It is an expired one-shot timer that has not been restarted. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a timer into the + * active state. + * + * @param xTimer The timer being queried. + * + * @return pdFALSE will be returned if the timer is dormant. A value other than + * pdFALSE will be returned if the timer is active. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is active, do something. + * } + * else + * { + * // xTimer is not active, do something else. + * } + * } + * @endverbatim + */ +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ); + * + * Simply returns the handle of the timer service/daemon task. It it not valid + * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStart() starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerStart() has equivalent functionality + * to the xTimerReset() API function. + * + * Starting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerStart() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerStart() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerStart() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStart() + * to be available. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the start command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStart() was called. xTicksToWait is ignored if xTimerStart() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStart( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStop() stops a timer that was previously started using either of the + * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(), + * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions. + * + * Stopping a timer ensures the timer is not in the active state. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop() + * to be available. + * + * @param xTimer The handle of the timer being stopped. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the stop command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStop() was called. xTicksToWait is ignored if xTimerStop() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStop( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerChangePeriod( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerChangePeriod() changes the period of a timer that was previously + * created using the xTimerCreate() API function. + * + * xTimerChangePeriod() can be called to change the period of an active or + * dormant state timer. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerChangePeriod() to be available. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the change period command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerChangePeriod() was called. xTicksToWait is ignored if + * xTimerChangePeriod() is called before the scheduler is started. + * + * @return pdFAIL will be returned if the change period command could not be + * sent to the timer command queue even after xTicksToWait ticks had passed. + * pdPASS will be returned if the command was successfully sent to the timer + * command queue. When the command is actually processed will depend on the + * priority of the timer service/daemon task relative to other tasks in the + * system. The timer service/daemon task priority is set by the + * configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. If the timer + * // referenced by xTimer is already active when it is called, then the timer + * // is deleted. If the timer referenced by xTimer is not active when it is + * // called, then the period of the timer is set to 500ms and the timer is + * // started. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is already active - delete it. + * xTimerDelete( xTimer ); + * } + * else + * { + * // xTimer is not active, change its period to 500ms. This will also + * // cause the timer to start. Block for a maximum of 100 ticks if the + * // change period command cannot immediately be sent to the timer + * // command queue. + * if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) == pdPASS ) + * { + * // The command was successfully sent. + * } + * else + * { + * // The command could not be sent, even after waiting for 100 ticks + * // to pass. Take appropriate action here. + * } + * } + * } + * @endverbatim + */ + #define xTimerChangePeriod( xTimer, xNewPeriod, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD, ( xNewPeriod ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerDelete() deletes a timer that was previously created using the + * xTimerCreate() API function. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerDelete() to be available. + * + * @param xTimer The handle of the timer being deleted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the delete command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerDelete() was called. xTicksToWait is ignored if xTimerDelete() + * is called before the scheduler is started. + * + * @return pdFAIL will be returned if the delete command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerChangePeriod() API function example usage scenario. + */ +#define xTimerDelete( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_DELETE, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerReset() re-starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerReset() will cause the timer to + * re-evaluate its expiry time so that it is relative to when xTimerReset() was + * called. If the timer was in the dormant state then xTimerReset() has + * equivalent functionality to the xTimerStart() API function. + * + * Resetting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerReset() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerReset() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerReset() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerReset() + * to be available. + * + * @param xTimer The handle of the timer being reset/started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the reset command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerReset() was called. xTicksToWait is ignored if xTimerReset() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // When a key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer. + * + * TimerHandle_t xBacklightTimer = NULL; + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press event handler. + * void vKeyPressEventHandler( char cKey ) + * { + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. Wait 10 ticks for the command to be successfully sent + * // if it cannot be sent immediately. + * vSetBacklightState( BACKLIGHT_ON ); + * if( xTimerReset( xBacklightTimer, 100 ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start the one-shot timer that is responsible for turning + * // the back-light off if no keys are pressed within a 5 second period. + * xBacklightTimer = xTimerCreate( "BacklightTimer", // Just a text name, not used by the kernel. + * ( 5000 / portTICK_PERIOD_MS), // The timer period in ticks. + * pdFALSE, // The timer is a one-shot timer. + * 0, // The id is not used by the callback so can take any value. + * vBacklightTimerCallback // The callback function that switches the LCD back-light off. + * ); + * + * if( xBacklightTimer == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xBacklightTimer, 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timer running as it has already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#define xTimerReset( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStartFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStart() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStartFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStartFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStartFromISR() function. If + * xTimerStartFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerStartFromISR() is actually called. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then restart the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The start command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStartFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerStopFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStop() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being stopped. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStopFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStopFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStopFromISR() function. If + * xTimerStopFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the timer should be simply stopped. + * + * // The interrupt service routine that stops the timer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - simply stop the timer. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The stop command was not executed successfully. Take appropriate + * // action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStopFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP_FROM_ISR, 0, ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerChangePeriod() that can be called from an interrupt + * service routine. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerChangePeriodFromISR() writes a message to the + * timer command queue, so has the potential to transition the timer service/ + * daemon task out of the Blocked state. If calling xTimerChangePeriodFromISR() + * causes the timer service/daemon task to leave the Blocked state, and the + * timer service/daemon task has a priority equal to or greater than the + * currently executing task (the task that was interrupted), then + * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the + * xTimerChangePeriodFromISR() function. If xTimerChangePeriodFromISR() sets + * this value to pdTRUE then a context switch should be performed before the + * interrupt exits. + * + * @return pdFAIL will be returned if the command to change the timers period + * could not be sent to the timer command queue. pdPASS will be returned if the + * command was successfully sent to the timer command queue. When the command + * is actually processed will depend on the priority of the timer service/daemon + * task relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the period of xTimer should be changed to 500ms. + * + * // The interrupt service routine that changes the period of xTimer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - change the period of xTimer to 500ms. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The command to change the timers period was not executed + * // successfully. Take appropriate action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerChangePeriodFromISR( xTimer, xNewPeriod, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, ( xNewPeriod ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerResetFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerReset() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer that is to be started, reset, or + * restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerResetFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerResetFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerResetFromISR() function. If + * xTimerResetFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerResetFromISR() is actually called. The timer service/daemon + * task priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerResetFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + + +/** + * BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * + * Used from application interrupt service routines to defer the execution of a + * function to the RTOS daemon task (the timer service task, hence this function + * is implemented in timers.c and is prefixed with 'Timer'). + * + * Ideally an interrupt service routine (ISR) is kept as short as possible, but + * sometimes an ISR either has a lot of processing to do, or needs to perform + * processing that is not deterministic. In these cases + * xTimerPendFunctionCallFromISR() can be used to defer processing of a function + * to the RTOS daemon task. + * + * A mechanism is provided that allows the interrupt to return directly to the + * task that will subsequently execute the pended callback function. This + * allows the callback function to execute contiguously in time with the + * interrupt - just as if the callback had executed in the interrupt itself. + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task (which is set using + * configTIMER_TASK_PRIORITY in FreeRTOSConfig.h) is higher than the priority of + * the currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE within + * xTimerPendFunctionCallFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + * Example usage: + * @verbatim + * + * // The callback function that will execute in the context of the daemon task. + * // Note callback functions must all use this same prototype. + * void vProcessInterface( void *pvParameter1, uint32_t ulParameter2 ) + * { + * BaseType_t xInterfaceToService; + * + * // The interface that requires servicing is passed in the second + * // parameter. The first parameter is not used in this case. + * xInterfaceToService = ( BaseType_t ) ulParameter2; + * + * // ...Perform the processing here... + * } + * + * // An ISR that receives data packets from multiple interfaces + * void vAnISR( void ) + * { + * BaseType_t xInterfaceToService, xHigherPriorityTaskWoken; + * + * // Query the hardware to determine which interface needs processing. + * xInterfaceToService = prvCheckInterfaces(); + * + * // The actual processing is to be deferred to a task. Request the + * // vProcessInterface() callback function is executed, passing in the + * // number of the interface that needs processing. The interface to + * // service is passed in the second parameter. The first parameter is + * // not used in this case. + * xHigherPriorityTaskWoken = pdFALSE; + * xTimerPendFunctionCallFromISR( vProcessInterface, NULL, ( uint32_t ) xInterfaceToService, &xHigherPriorityTaskWoken ); + * + * // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + * // switch should be requested. The macro used is port specific and will + * // be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - refer to + * // the documentation page for the port being used. + * portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + * + * } + * @endverbatim + */ +BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + + /** + * BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * TickType_t xTicksToWait ); + * + * + * Used to defer the execution of a function to the RTOS daemon task (the timer + * service task, hence this function is implemented in timers.c and is prefixed + * with 'Timer'). + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param xTicksToWait Calling this function will result in a message being + * sent to the timer daemon task on a queue. xTicksToWait is the amount of + * time the calling task should remain in the Blocked state (so not using any + * processing time) for space to become available on the timer queue if the + * queue is found to be full. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + */ +BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * const char * const pcTimerGetName( TimerHandle_t xTimer ); + * + * Returns the name that was assigned to a timer when the timer was created. + * + * @param xTimer The handle of the timer being queried. + * + * @return The name assigned to the timer specified by the xTimer parameter. + */ +const char * pcTimerGetName( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ); + * + * Updates a timer to be either an autoreload timer, in which case the timer + * automatically resets itself each time it expires, or a one shot timer, in + * which case the timer will only expire once unless it is manually restarted. + * + * @param xTimer The handle of the timer being updated. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the timer's period (see the + * xTimerPeriodInTicks parameter of the xTimerCreate() API function). If + * uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + */ +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION; + +/** + * TickType_t xTimerGetPeriod( TimerHandle_t xTimer ); + * + * Returns the period of a timer. + * + * @param xTimer The handle of the timer being queried. + * + * @return The period of the timer in ticks. + */ +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** +* TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ); +* +* Returns the time in ticks at which the timer will expire. If this is less +* than the current tick count then the expiry time has overflowed from the +* current time. +* +* @param xTimer The handle of the timer being queried. +* +* @return If the timer is running then the time in ticks at which the timer +* will next expire is returned. If the timer is not running then the return +* value is undefined. +*/ +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/* + * Functions beyond this part are not part of the public API and are intended + * for use by the kernel only. + */ +BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* TIMERS_H */ + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/list.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/list.c new file mode 100644 index 000000000..21dabdecd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/list.c @@ -0,0 +1,198 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#include +#include "FreeRTOS.h" +#include "list.h" + +/*----------------------------------------------------------- + * PUBLIC LIST API documented in list.h + *----------------------------------------------------------*/ + +void vListInitialise( List_t * const pxList ) +{ + /* The list structure contains a list item which is used to mark the + end of the list. To initialise the list the list end is inserted + as the only list entry. */ + pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + /* The list end value is the highest possible value in the list to + ensure it remains at the end of the list. */ + pxList->xListEnd.xItemValue = portMAX_DELAY; + + /* The list end next and previous pointers point to itself so we know + when the list is empty. */ + pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + pxList->uxNumberOfItems = ( UBaseType_t ) 0U; + + /* Write known values into the list if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ); + listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ); +} +/*-----------------------------------------------------------*/ + +void vListInitialiseItem( ListItem_t * const pxItem ) +{ + /* Make sure the list item is not recorded as being on a list. */ + pxItem->pxContainer = NULL; + + /* Write known values into the list item if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); + listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); +} +/*-----------------------------------------------------------*/ + +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert a new list item into pxList, but rather than sort the list, + makes the new list item the last item to be removed by a call to + listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t *pxIterator; +const TickType_t xValueOfInsertion = pxNewListItem->xItemValue; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert the new list item into the list, sorted in xItemValue order. + + If the list already contains a list item with the same item value then the + new list item should be placed after it. This ensures that TCBs which are + stored in ready lists (all of which have the same xItemValue value) get a + share of the CPU. However, if the xItemValue is the same as the back marker + the iteration loop below will not end. Therefore the value is checked + first, and the algorithm slightly modified if necessary. */ + if( xValueOfInsertion == portMAX_DELAY ) + { + pxIterator = pxList->xListEnd.pxPrevious; + } + else + { + /* *** NOTE *********************************************************** + If you find your application is crashing here then likely causes are + listed below. In addition see https://www.freertos.org/FAQHelp.html for + more tips, and ensure configASSERT() is defined! + https://www.freertos.org/a00110.html#configASSERT + + 1) Stack overflow - + see https://www.freertos.org/Stacks-and-stack-overflow-checking.html + 2) Incorrect interrupt priority assignment, especially on Cortex-M + parts where numerically high priority values denote low actual + interrupt priorities, which can seem counter intuitive. See + https://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition + of configMAX_SYSCALL_INTERRUPT_PRIORITY on + https://www.freertos.org/a00110.html + 3) Calling an API function from within a critical section or when + the scheduler is suspended, or calling an API function that does + not end in "FromISR" from an interrupt. + 4) Using a queue or semaphore before it has been initialised or + before the scheduler has been started (are interrupts firing + before vTaskStartScheduler() has been called?). + **********************************************************************/ + + for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */ + { + /* There is nothing to do here, just iterating to the wanted + insertion position. */ + } + } + + pxNewListItem->pxNext = pxIterator->pxNext; + pxNewListItem->pxNext->pxPrevious = pxNewListItem; + pxNewListItem->pxPrevious = pxIterator; + pxIterator->pxNext = pxNewListItem; + + /* Remember which list the item is in. This allows fast removal of the + item later. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) +{ +/* The list item knows which list it is in. Obtain the list from the list +item. */ +List_t * const pxList = pxItemToRemove->pxContainer; + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pxContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; +} +/*-----------------------------------------------------------*/ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c new file mode 100644 index 000000000..ce867ee67 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c @@ -0,0 +1,765 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Implementation of functions defined in portable.h for the ARM CM4F port. + *----------------------------------------------------------*/ + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +#ifndef __VFP_FP__ + #error This port can only be used when the project options are configured to enable hardware floating point support. +#endif + +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + /* The way the SysTick is clocked is not modified in case it is not the same + as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif + +/* Constants required to manipulate the core. Registers first... */ +#define portNVIC_SYSTICK_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( * ( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( * ( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SYSPRI2_REG ( * ( ( volatile uint32_t * ) 0xe000ed20 ) ) +/* ...then bits in the registers. */ +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) + +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) + +/* Constants required to check the validity of an interrupt priority. */ +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( * ( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) + +/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ +#define portVECTACTIVE_MASK ( 0xFFUL ) + +/* Constants required to manipulate the VFP. */ +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */ +#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL ) + +/* Constants required to set up the initial stack. */ +#define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffd ) + +/* The systick is a 24-bit counter. */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/* For strict compliance with the Cortex-M spec the task start address should +have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ +#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* A fiddle factor to estimate the number of SysTick counts that would have +occurred while the SysTick counter is stopped during tickless idle +calculations. */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) + +/* Let the user override the pre-loading of the initial LR with the address of +prvTaskExitError() in case it messes up unwinding of the stack in the +debugger. */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/* + * Setup the timer to generate the tick interrupts. The implementation in this + * file is weak to allow application writers to change the timer used to + * generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ); + +/* + * Exception handlers. + */ +void xPortPendSVHandler( void ) __attribute__ (( naked )); +void xPortSysTickHandler( void ); +void vPortSVCHandler( void ) __attribute__ (( naked )); + +/* + * Start first task is a separate function so it can be tested in isolation. + */ +static void prvPortStartFirstTask( void ) __attribute__ (( naked )); + +/* + * Function to enable the VFP. + */ +static void vPortEnableVFP( void ) __attribute__ (( naked )); + +/* + * Used to catch tasks that attempt to return from their implementing function. + */ +static void prvTaskExitError( void ); + +/*-----------------------------------------------------------*/ + +/* Each task maintains its own interrupt status in the critical nesting +variable. */ +static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; + +/* + * The number of SysTick increments that make up one tick period. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulTimerCountsForOneTick = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * The maximum number of tick periods that can be suppressed is limited by the + * 24 bit resolution of the SysTick timer. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t xMaximumPossibleSuppressedTicks = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Compensate for the CPU cycles that pass while the SysTick is stopped (low + * power functionality only. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if( configASSERT_DEFINED == 1 ) + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * const ) portNVIC_IP_REGISTERS_OFFSET_16; +#endif /* configASSERT_DEFINED */ + +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + /* Simulate the stack frame as it would be created by a context switch + interrupt. */ + + /* Offset added to account for the way the MCU uses the stack on entry/exit + of interrupts, and to ensure alignment. */ + pxTopOfStack--; + + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + + /* Save code space by skipping register initialisation. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + + /* A save method is being used that requires each task to maintain its + own exec return value. */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; + + pxTopOfStack -= 8; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ +volatile uint32_t ulDummy = 0; + + /* A function that implements a task must not exit or attempt to return to + its caller as there is nothing to return to. If a task wants to exit it + should instead call vTaskDelete( NULL ). + + Artificially force an assert() to be triggered if configASSERT() is + defined, then stop here so application writers can catch the error. */ + configASSERT( uxCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + started to remove a compiler warning about the function being defined + but never called. ulDummy is used purely to quieten other warnings + about code appearing after this function is called - making ulDummy + volatile makes the compiler think the function could return and + therefore not output an 'unreachable code' warning for code that appears + after it. */ + } +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler( void ) +{ + __asm volatile ( + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + ); +} +/*-----------------------------------------------------------*/ + +static void prvPortStartFirstTask( void ) +{ + /* Start the first task. This also clears the bit that indicates the FPU is + in use in case the FPU was used before the scheduler was started - which + would otherwise result in the unnecessary leaving of space in the SVC stack + for lazy saving of FPU registers. */ + __asm volatile( + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr control, r0 \n" + " cpsie i \n" /* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc 0 \n" /* System call to start first task. */ + " nop \n" + ); +} +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. + See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); + + #if( configASSERT_DEFINED == 1 ) + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + functions can be called. ISR safe functions are those that end in + "FromISR". FreeRTOS maintains separate thread and ISR API functions to + ensure interrupt entry is as fast and simple as possible. + + Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; + + /* Determine the number of priority bits available. First write to all + possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Calculate the maximum acceptable priority group value for the number + of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } + #endif /* conifgASSERT_DEFINED */ + + /* Make PendSV and SysTick the lowest priority interrupts. */ + portNVIC_SYSPRI2_REG |= portNVIC_PENDSV_PRI; + portNVIC_SYSPRI2_REG |= portNVIC_SYSTICK_PRI; + + /* Start the timer that generates the tick ISR. Interrupts are disabled + here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + /* Ensure the VFP is enabled - it should be anyway. */ + vPortEnableVFP(); + + /* Lazy save always. */ + *( portFPCCR ) |= portASPEN_AND_LSPEN_BITS; + + /* Start the first task. */ + prvPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + exit error function to prevent compiler warnings about a static function + not being called in the case that the application writer overrides this + functionality by defining configTASK_RETURN_ADDRESS. Call + vTaskSwitchContext() so link time optimisation does not remove the + symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Not implemented in ports where there is nothing to return to. + Artificially force an assert. */ + configASSERT( uxCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) +{ + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + assert() if it is being called from an interrupt context. Only API + functions that end in "FromISR" can be used in an interrupt. Only assert if + the critical nesting count is 1 to protect against recursive calls if the + assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) +{ + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void xPortPendSVHandler( void ) +{ + /* This is a naked function. */ + + __asm volatile + ( + " mrs r0, psp \n" + " isb \n" + " \n" + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ + " ldr r2, [r3] \n" + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n" + " \n" + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ + " \n" + " stmdb sp!, {r0, r3} \n" + " mov r0, %0 \n" + " cpsid i \n" /* Errata workaround. */ + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " cpsie i \n" /* Errata workaround. */ + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " ldmia sp!, {r0, r3} \n" + " \n" + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r0, [r1] \n" + " \n" + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n" + " \n" + " msr psp, r0 \n" + " isb \n" + " \n" + #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */ + #if WORKAROUND_PMU_CM001 == 1 + " push { r14 } \n" + " pop { pc } \n" + #endif + #endif + " \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) + ); +} +/*-----------------------------------------------------------*/ + +void xPortSysTickHandler( void ) +{ + /* The SysTick runs at the lowest interrupt priority, so when this interrupt + executes all interrupts must be unmasked. There is therefore no need to + save and then restore the interrupt mask value as its value is already + known. */ + portDISABLE_INTERRUPTS(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* A context switch is required. Context switching is performed in + the PendSV interrupt. Pend the PendSV interrupt. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portENABLE_INTERRUPTS(); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE == 1 ) + + __attribute__((weak)) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for + is accounted for as best it can be, but using the tickless mode will + inevitably result in some tiny drift of the time maintained by the + kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + tick periods. -1 is used because this code will execute part way + through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + method as that will mask interrupts that should exit sleep mode. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + above. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + set its parameter to 0 to indicate that its implementation contains + its own wait for interrupt or wait for event instruction, and so wfi + should not be executed again. However, the original expected idle + time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( &xModifiableIdleTime ); + if( xModifiableIdleTime > 0 ) + { + __asm volatile( "dsb" ::: "memory" ); + __asm volatile( "wfi" ); + __asm volatile( "isb" ); + } + configPOST_SLEEP_PROCESSING( &xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + out of sleep mode to execute immediately. see comments above + __disable_interrupt() call above. */ + __asm volatile( "cpsie i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + and interrupts that execute while the clock is stopped will increase + any slippage between the time maintained by the RTOS and calendar + time. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable the SysTick clock without reading the + portNVIC_SYSTICK_CTRL_REG register to ensure the + portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + the time the SysTick is stopped for is accounted for as best it can + be, but using the tickless mode will inevitably result in some tiny + drift of the time maintained by the kernel with respect to calendar + time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + been set back to the current reload value (the reload back being + correct for the entire expected idle time) or if the SysTick is yet + to count to zero (in which case an interrupt other than the SysTick + must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + reloaded with ulReloadValue. Reset the + portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + underflowed because the post sleep hook did something + that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + function exits, the tick value maintained by the tick is stepped + forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + Work out how long the sleep lasted rounded to complete tick + periods (not the ulReload value which accounted for part + ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrpts enabled. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + } + +#endif /* #if configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +/* + * Setup the systick timer to generate the tick interrupts at the required + * frequency. + */ +__attribute__(( weak )) void vPortSetupTimerInterrupt( void ) +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and clear the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); +} +/*-----------------------------------------------------------*/ + +/* This is a naked function. */ +static void vPortEnableVFP( void ) +{ + __asm volatile + ( + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ + " ldr r1, [r0] \n" + " \n" + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ + " str r1, [r0] \n" + " bx r14 " + ); +} +/*-----------------------------------------------------------*/ + +#if( configASSERT_DEFINED == 1 ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + an interrupt that has been assigned a priority above + configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + function. ISR safe FreeRTOS API functions must *only* be called + from interrupts that have been assigned a priority at or below + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Numerically low interrupt priority numbers represent logically high + interrupt priorities, therefore the priority of the interrupt must + be set to a value equal to or numerically *higher* than + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Interrupts that use the FreeRTOS API must not be left at their + default priority of zero as that is the highest possible priority, + which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + and therefore also guaranteed to be invalid. + + FreeRTOS maintains separate thread and ISR API functions to ensure + interrupt entry is as fast and simple as possible. + + The following links provide detailed information: + http://www.freertos.org/RTOS-Cortex-M3-M4.html + http://www.freertos.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + that define each interrupt's priority to be split between bits that + define the interrupt's pre-emption priority bits and bits that define + the interrupt's sub-priority. For simplicity all bits must be defined + to be pre-emption priority bits. The following assertion will fail if + this is not the case (if some bits represent a sub-priority). + + If the application only uses CMSIS libraries for interrupt + configuration then the correct setting can be achieved on all Cortex-M + devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + scheduler. Note however that some vendor specific peripheral libraries + assume a non-zero priority group setting, in which cases using a value + of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* configASSERT_DEFINED */ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h new file mode 100644 index 000000000..62543ac74 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -0,0 +1,247 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the + * given hardware and compiler. + * + * These settings should not be altered. + *----------------------------------------------------------- + */ + +/* Type definitions. */ +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specifics. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() \ +{ \ + /* Set a PendSV to request a context switch. */ \ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; \ + \ + /* Barriers are normally not required but do ensure the code is completely \ + within the specified behaviour for the architecture. */ \ + __asm volatile( "dsb" ::: "memory" ); \ + __asm volatile( "isb" ); \ +} + +#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD() +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* Critical section management. */ +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortRaiseBASEPRI() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortSetBASEPRI(x) +#define portDISABLE_INTERRUPTS() vPortRaiseBASEPRI() +#define portENABLE_INTERRUPTS() vPortSetBASEPRI(0) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + +/* Tickless idle/low power functionality. */ +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Generic helper function. */ + __attribute__( ( always_inline ) ) static inline uint8_t ucPortCountLeadingZeros( uint32_t ulBitmap ) + { + uint8_t ucReturn; + + __asm volatile ( "clz %0, %1" : "=r" ( ucReturn ) : "r" ( ulBitmap ) : "memory" ); + return ucReturn; + } + + /* Check the configuration. */ + #if( configMAX_PRIORITIES > 32 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice. + #endif + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) ucPortCountLeadingZeros( ( uxReadyPriorities ) ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif + +/* portNOP() is not required by this port. */ +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__(( always_inline)) +#endif + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ +uint32_t ulCurrentInterrupt; +BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortRaiseBASEPRI( void ) +{ +uint32_t ulNewBASEPRI; + + __asm volatile + ( + " mov %0, %1 \n" \ + " cpsid i \n" \ + " msr basepri, %0 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void ) +{ +uint32_t ulOriginalBASEPRI, ulNewBASEPRI; + + __asm volatile + ( + " mrs %0, basepri \n" \ + " mov %1, %2 \n" \ + " cpsid i \n" \ + " msr basepri, %1 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulOriginalBASEPRI), "=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); + + /* This return will not be reached but is necessary to prevent compiler + warnings. */ + return ulOriginalBASEPRI; +} +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortSetBASEPRI( uint32_t ulNewMaskValue ) +{ + __asm volatile + ( + " msr basepri, %0 " :: "r" ( ulNewMaskValue ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) + +#ifdef __cplusplus +} +#endif + +#endif /* PORTMACRO_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c new file mode 100644 index 000000000..d7cd8a5b4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c @@ -0,0 +1,436 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * A sample implementation of pvPortMalloc() and vPortFree() that combines + * (coalescences) adjacent memory blocks as they are freed, and in so doing + * limits memory fragmentation. + * + * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the + * memory management pages of http://www.FreeRTOS.org for more information. + */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Allocate the memory for the heap. */ +#if( configAPPLICATION_ALLOCATED_HEAP == 1 ) + /* The application writer has already defined the array used for the RTOS + heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#else + static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/* Define the linked list structure. This is used to link free blocks in order +of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ + size_t xBlockSize; /*<< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory +block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +static BlockLink_t xStart, *pxEnd = NULL; + +/* Keeps track of the number of free bytes remaining, but says nothing about +fragmentation. */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize +member of an BlockLink_t structure is set then the block belongs to the +application. When the bit is free the block is still part of the free heap +space. */ +static size_t xBlockAllocatedBit = 0; + +/*-----------------------------------------------------------*/ + +void *pvPortMalloc( size_t xWantedSize ) +{ +BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; +void *pvReturn = NULL; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is + set. The top bit of the block size member of the BlockLink_t structure + is used to determine who owns the block - the application or the + kernel, so it must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); + configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + block following the number of bytes requested. The void + cast is used to prevent byte alignment warnings from the + compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + by the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void *pv ) +{ +uint8_t *puc = ( uint8_t * ) pv; +BlockLink_t *pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocks( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ +BlockLink_t *pxFirstFreeBlock; +uint8_t *pucAlignedHeap; +size_t uxAddress; +size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) +{ +BlockLink_t *pxIterator; +uint8_t *puc; + + /* Iterate through the list until a block is found that has a higher address + than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + before and the block after, then it's pxNextFreeBlock pointer will have + already been set, and should not be set here as that would make it point + to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/queue.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/queue.c new file mode 100644 index 000000000..d882bf670 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/queue.c @@ -0,0 +1,2941 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" + +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + + +/* Constants used with the cRxLock and cTxLock structure members. */ +#define queueUNLOCKED ( ( int8_t ) -1 ) +#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + +/* When the Queue_t structure is used to represent a base queue its pcHead and +pcTail members are used as pointers into the queue storage area. When the +Queue_t structure is used to represent a mutex pcHead and pcTail pointers are +not necessary, and the pcHead pointer is set to NULL to indicate that the +structure instead holds a pointer to the mutex holder (if any). Map alternative +names to the pcHead and structure member to ensure the readability of the code +is maintained. The QueuePointers_t and SemaphoreData_t types are used to form +a union as their usage is mutually exclusive dependent on what the queue is +being used for. */ +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL + +typedef struct QueuePointers +{ + int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ +} QueuePointers_t; + +typedef struct SemaphoreData +{ + TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ +} SemaphoreData_t; + +/* Semaphores do not actually store or copy data, so have an item size of +zero. */ +#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) +#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define queueYIELD_IF_USING_PREEMPTION() +#else + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* + * Definition of the queue used by the scheduler. + * Items are queued by copy, not reference. See the following link for the + * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html + */ +typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + int8_t *pcHead; /*< Points to the beginning of the queue storage area. */ + int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */ + + union + { + QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + } u; + + List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + + volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */ + UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + struct QueueDefinition *pxQueueSetContainer; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif + +} xQUEUE; + +/* The old xQUEUE name is maintained above then typedefed to the new Queue_t +name below to enable the use of older kernel aware debuggers. */ +typedef xQUEUE Queue_t; + +/*-----------------------------------------------------------*/ + +/* + * The queue registry is just a means for kernel aware debuggers to locate + * queue structures. It has no other purpose so is an optional component. + */ +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + /* The type stored within the queue registry array. This allows a name + to be assigned to each queue making kernel aware debugging a little + more user friendly. */ + typedef struct QUEUE_REGISTRY_ITEM + { + const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + QueueHandle_t xHandle; + } xQueueRegistryItem; + + /* The old xQueueRegistryItem name is maintained above then typedefed to the + new xQueueRegistryItem name below to enable the use of older kernel aware + debuggers. */ + typedef xQueueRegistryItem QueueRegistryItem_t; + + /* The queue registry is simply an array of QueueRegistryItem_t structures. + The pcQueueName member of a structure being NULL is indicative of the + array position being vacant. */ + PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + +#endif /* configQUEUE_REGISTRY_SIZE */ + +/* + * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not + * prevent an ISR from adding or removing items to the queue, but does prevent + * an ISR from removing tasks from the queue event lists. If an ISR finds a + * queue is locked it will instead increment the appropriate queue lock count + * to indicate that a task may require unblocking. When the queue in unlocked + * these lock counts are inspected, and the appropriate action taken. + */ +static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any data in a queue. + * + * @return pdTRUE if the queue contains no items, otherwise pdFALSE. + */ +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any space in a queue. + * + * @return pdTRUE if there is no space, otherwise pdFALSE; + */ +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Copies an item into the queue, either at the front of the queue or the + * back of the queue. + */ +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION; + +/* + * Copies an item out of a queue. + */ +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +#if ( configUSE_QUEUE_SETS == 1 ) + /* + * Checks to see if a queue is a member of a queue set, and if so, notifies + * the queue set that the queue contains data. + */ + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +#endif + +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + +#if( configUSE_MUTEXES == 1 ) + /* + * If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. This function returns + * that priority. + */ + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif +/*-----------------------------------------------------------*/ + +/* + * Macro to mark a queue as locked. Locking a queue prevents an ISR from + * accessing the queue event lists. + */ +#define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL(); \ + { \ + if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ + } \ + if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ + } \ + } \ + taskEXIT_CRITICAL() +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; + pxQueue->pcWriteTo = pxQueue->pcHead; + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + + if( xNewQueue == pdFALSE ) + { + /* If there are tasks blocked waiting to read from the queue, then + the tasks will remain blocked as after this function exits the queue + will still be empty. If there are tasks blocked waiting to write to + the queue, then one should be unblocked as after this function exits + it will be possible to write to it. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Ensure the event queues start in the correct state. */ + vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); + vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); + } + } + taskEXIT_CRITICAL(); + + /* A value is returned for calling semantic consistency with previous + versions. */ + return pdPASS; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + /* The StaticQueue_t structure and the queue storage area must be + supplied. */ + configASSERT( pxStaticQueue != NULL ); + + /* A queue storage area should be provided if the item size is not 0, and + should not be provided if the item size is 0. */ + configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ); + configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticQueue_t or StaticSemaphore_t equals the size of + the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + configASSERT( xSize == sizeof( Queue_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + The address of a statically allocated storage area was also passed in + but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewQueue != NULL ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + note this queue was allocated statically in case the queue is + later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + size_t xQueueSizeInBytes; + uint8_t *pucQueueStorage; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* There is not going to be a queue storage area. */ + xQueueSizeInBytes = ( size_t ) 0; + } + else + { + /* Allocate enough space to hold the maximum number of items that + can be in the queue at any time. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + + /* Allocate the queue and storage area. Justification for MISRA + deviation as follows: pvPortMalloc() always ensures returned memory + blocks are aligned per the requirements of the MCU stack. In this case + pvPortMalloc() must return a pointer that is guaranteed to meet the + alignment requirements of the Queue_t structure - which in this case + is an int8_t *. Therefore, whenever the stack alignment requirements + are greater than or equal to the pointer to char requirements the cast + is safe. In other cases alignment requirements are not strict (one or + two bytes). */ + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + storage area. */ + pucQueueStorage = ( uint8_t * ) pxNewQueue; + pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + note this task was created dynamically in case it is later + deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) +{ + /* Remove compiler warnings about unused parameters should + configUSE_TRACE_FACILITY not be set to 1. */ + ( void ) ucQueueType; + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* No RAM was allocated for the queue storage area, but PC head cannot + be set to NULL because NULL is used as a key to say the queue is used as + a mutex. Therefore just set pcHead to point to the queue as a benign + value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; + } + else + { + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; + } + + /* Initialise the queue members as described where the queue type is + defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxNewQueue->ucQueueType = ucQueueType; + } + #endif /* configUSE_TRACE_FACILITY */ + + #if( configUSE_QUEUE_SETS == 1 ) + { + pxNewQueue->pxQueueSetContainer = NULL; + } + #endif /* configUSE_QUEUE_SETS */ + + traceQUEUE_CREATE( pxNewQueue ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static void prvInitialiseMutex( Queue_t *pxNewQueue ) + { + if( pxNewQueue != NULL ) + { + /* The queue create function will set all the queue structure members + correctly for a generic queue, but this function is creating a + mutex. Overwrite those members that need to be set differently - + in particular the information required for priority inheritance. */ + pxNewQueue->u.xSemaphore.xMutexHolder = NULL; + pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; + + /* In case this is a recursive mutex. */ + pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + + traceCREATE_MUTEX( pxNewQueue ); + + /* Start with the semaphore in the expected state. */ + ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); + } + else + { + traceCREATE_MUTEX_FAILED(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore; + + /* This function is called by xSemaphoreGetMutexHolder(), and should not + be called directly. Note: This is a good way of determining if the + calling task is the mutex holder, but not a good way of determining the + identity of the mutex holder, as the holder may change between the + following critical section exiting and the function returning. */ + taskENTER_CRITICAL(); + { + if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + } + taskEXIT_CRITICAL(); + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + + configASSERT( xSemaphore ); + + /* Mutexes cannot be used in interrupt service routines, so the mutex + holder should not change in an ISR, and therefore a critical section is + not required here. */ + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* If this is the task that holds the mutex then xMutexHolder will not + change outside of this task. If this task does not hold the mutex then + pxMutexHolder can never coincidentally equal the tasks handle, and as + this is the only condition we are interested in it does not matter if + pxMutexHolder is accessed simultaneously by another task. Therefore no + mutual exclusion is required to test the pxMutexHolder variable. */ + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + traceGIVE_MUTEX_RECURSIVE( pxMutex ); + + /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to + the task handle, therefore no underflow check is required. Also, + uxRecursiveCallCount is only modified by the mutex holder, and as + there can only be one, no mutual exclusion is required to modify the + uxRecursiveCallCount member. */ + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--; + + /* Has the recursive call count unwound to 0? */ + if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 ) + { + /* Return the mutex. This will automatically unblock any other + task that might be waiting to access the mutex. */ + ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + /* The mutex cannot be given because the calling task is not the + holder. */ + xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* Comments regarding mutual exclusion as per those within + xQueueGiveMutexRecursive(). */ + + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + xReturn = pdPASS; + } + else + { + xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); + + /* pdPASS will only be returned if the mutex was successfully + obtained. The calling task may have entered the Blocked state + before reaching here. */ + if( xReturn != pdFAIL ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + } + else + { + traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) +{ +BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Is there room on the queue now? The running task must be the + highest priority task wanting to access the queue. If the head item + in the queue is to be overwritten then it does not matter if the + queue is full. */ + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + traceQUEUE_SEND( pxQueue ); + + #if ( configUSE_QUEUE_SETS == 1 ) + { + UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; + + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) + { + /* Do not notify the queue set as an existing item + was overwritten in the queue so the number of items + in the queue has not changed. */ + mtCOVERAGE_TEST_MARKER(); + } + else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to + do this from within the critical section - the + kernel takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes + and the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to do + this from within the critical section - the kernel + takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes and + the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was full and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + + /* Return to the original privilege level before exiting + the function. */ + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was full and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + event list. It is possible that interrupts occurring now + remove this task from the event list again - but as the + scheduler is suspended the task will go onto the pending + ready last instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + ready list into the ready list - so it is feasible that this + task is already in a ready list before it yields - in which + case the yield will not cause a context switch unless there + is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + /* Similar to xQueueGenericSend, except without blocking if there is no room + in the queue. Also don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a + semaphore or mutex. That means prvCopyDataToQueue() cannot result + in a task disinheriting a priority and prvCopyDataToQueue() can be + called here even though the disinherit function does not check if + the scheduler is suspended before accessing the ready lists. */ + ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + /* Similar to xQueueGenericSendFromISR() but used with semaphores where the + item size is 0. Don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + + configASSERT( pxQueue ); + + /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() + if the item size is not 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Normally a mutex would not be given from an interrupt, especially if + there is a mutex holder, as priority inheritance makes no sense for an + interrupts, only tasks. */ + configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* When the queue is used to implement a semaphore no data is ever + moved through the queue but it is still valid to see if the queue 'has + space'. */ + if( uxMessagesWaiting < pxQueue->uxLength ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* A task can only have an inherited priority if it is a mutex + holder - and if there is a mutex holder then the mutex cannot be + given from an ISR. As this is the ISR version of the function it + can be assumed there is no mutex holder and no need to determine if + priority disinheritance is needed. Simply increase the count of + messages (semaphores) available. */ + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The semaphore is a member of a queue set, and + posting to the queue set caused a higher priority + task to unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data available, remove one item. */ + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_RECEIVE( pxQueue ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* There is now space in the queue, were any tasks waiting to + post to the queue? If so, unblock the highest priority waiting + task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + +#if( configUSE_MUTEXES == 1 ) + BaseType_t xInheritanceOccurred = pdFALSE; +#endif + + /* Check the queue pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* Check this really is a semaphore, in which case the item size will be + 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Semaphores are queues with an item size of 0, and where the + number of messages in the queue is the semaphore's count value. */ + const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxSemaphoreCount > ( UBaseType_t ) 0 ) + { + traceQUEUE_RECEIVE( pxQueue ); + + /* Semaphores are queues with a data size of zero and where the + messages waiting is the semaphore's count. Reduce the count. */ + pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* Record the information required to implement + priority inheritance should it become necessary. */ + pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + + /* Check to see if other tasks are blocked waiting to give the + semaphore, and if so, unblock the highest priority such task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* For inheritance to have occurred there must have been an + initial timeout, and an adjusted timeout cannot become 0, as + if it were 0 the function would have exited. */ + #if( configUSE_MUTEXES == 1 ) + { + configASSERT( xInheritanceOccurred == pdFALSE ); + } + #endif /* configUSE_MUTEXES */ + + /* The semaphore count was 0 and no block time is specified + (or the block time has expired) so exit now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The semaphore count was 0 and a block time was specified + so configure the timeout structure ready to block. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can give to and take from the semaphore + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + count is 0 then enter the Blocked state to wait for a semaphore to + become available. As semaphores are implemented with queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL(); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There was no timeout and the semaphore count was not 0, so + attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + expired. Otherwise return to attempt to take the semaphore that is + known to be available. As semaphores are implemented by queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL(); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + task to inherit this task's priority. Now this task + has timed out the priority should be disinherited + again, but only as low as the next highest priority + task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL(); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Remember the read position so it can be reset after the data + is read from the queue as this function is only peeking the + data, not removing it. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_PEEK( pxQueue ); + + /* The data is not being removed, so reset the read pointer. */ + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + /* The data is being left in the queue, so see if there are + any other tasks waiting for the data. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than this task. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure ready to enter the blocked + state. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Cannot block in an ISR, so check there is data available. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + const int8_t cRxLock = pxQueue->cRxLock; + + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* If the queue is locked the event list will not be modified. + Instead update the lock count so the task that unlocks the queue + will know that an ISR has removed data while the queue was + locked. */ + if( cRxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than us so + force a context switch. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was removed while it was locked. */ + pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Cannot block in an ISR, so check there is data available. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + traceQUEUE_PEEK_FROM_ISR( pxQueue ); + + /* Remember the read position so it can be reset as nothing is + actually being removed from the queue. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; + + configASSERT( xQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + uxReturn = pxQueue->uxMessagesWaiting; + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +void vQueueDelete( QueueHandle_t xQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + traceQUEUE_DELETE( pxQueue ); + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + vQueueUnregisterQueue( pxQueue ); + } + #endif + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + again. */ + vPortFree( pxQueue ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + /* The queue must have been statically allocated, so is not going to be + deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) + { + ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->ucQueueType; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) + { + UBaseType_t uxHighestPriorityOfWaitingTasks; + + /* If a task waiting for a mutex causes the mutex holder to inherit a + priority, but the waiting task times out, then the holder should + disinherit the priority - but only down to the highest priority of any + other tasks that are waiting for the same mutex. For this purpose, + return the priority of the highest priority task that is waiting for the + mutex. */ + if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U ) + { + uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); + } + else + { + uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; + } + + return uxHighestPriorityOfWaitingTasks; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) +{ +BaseType_t xReturn = pdFALSE; +UBaseType_t uxMessagesWaiting; + + /* This function is called from a critical section. */ + + uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) + { + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* The mutex is no longer being held. */ + xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); + pxQueue->u.xSemaphore.xMutexHolder = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + } + else if( xPosition == queueSEND_TO_BACK ) + { + ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->pcWriteTo = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ + pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xPosition == queueOVERWRITE ) + { + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* An item is not being added but overwritten, so subtract + one from the recorded number of items in the queue so when + one is added again below the number of recorded items remains + correct. */ + --uxMessagesWaiting; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) +{ + if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) + { + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + } +} +/*-----------------------------------------------------------*/ + +static void prvUnlockQueue( Queue_t * const pxQueue ) +{ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ + + /* The lock counts contains the number of extra data items placed or + removed from the queue while the queue was locked. When a queue is + locked items can be added or removed, but the event lists cannot be + updated. */ + taskENTER_CRITICAL(); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + the queue set caused a higher priority task to unblock. + A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + added to the pending ready list as the scheduler is still + suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL(); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already full we may have to block. A critical section + is required to prevent an interrupt removing something from the queue + between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already empty we may have to block. A critical section + is required to prevent an interrupt adding something to the queue + between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* We cannot block from an ISR, so check there is data available. If + not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + + /* See if there is an empty space in the registry. A NULL name denotes + a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].pcQueueName == NULL ) + { + /* Store the information on this queue. */ + xQueueRegistry[ ux ].pcQueueName = pcQueueName; + xQueueRegistry[ ux ].xHandle = xQueue; + + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + /* Note there is nothing here to protect against another task adding or + removing entries from the registry while it is being searched. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return pcReturn; + } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueUnregisterQueue( QueueHandle_t xQueue ) + { + UBaseType_t ux; + + /* See if the handle of the queue being unregistered in actually in the + registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; + + /* Set the handle to NULL to ensure the same queue handle cannot + appear in the registry twice if it is added, removed, then + added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + Queue_t * const pxQueue = xQueue; + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements. + It can result in vListInsert() being called on a list that can only + possibly ever have one item in it, so the list will be fast, but even + so it should be called with the scheduler locked and not from a critical + section. */ + + /* Only do anything if there are no messages in the queue. This function + will not actually cause the task to block, just place it on a blocked + list. It will not block until the scheduler is unlocked - at which + time a yield will be performed. If an item is added to the queue while + the queue is locked, and the calling task blocks on the queue, then the + calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvUnlockQueue( pxQueue ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) + { + QueueSetHandle_t pxQueue; + + pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); + + return pxQueue; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + { + /* Cannot add a queue/semaphore to more than one queue set. */ + xReturn = pdFAIL; + } + else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* Cannot add a queue/semaphore to a queue set if there are already + items in the queue/semaphore. */ + xReturn = pdFAIL; + } + else + { + ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + xReturn = pdPASS; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; + + if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) + { + /* The queue was not a member of the set. */ + xReturn = pdFAIL; + } + else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* It is dangerous to remove a queue from a set when the queue is + not empty because the queue set will still hold pending events for + the queue. */ + xReturn = pdFAIL; + } + else + { + taskENTER_CRITICAL(); + { + /* The queue is no longer contained in the set. */ + pxQueueOrSemaphore->pxQueueSetContainer = NULL; + } + taskEXIT_CRITICAL(); + xReturn = pdPASS; + } + + return xReturn; + } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) + { + Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + configASSERT( pxQueueSetContainer ); + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ + + + + + + + + + + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c new file mode 100644 index 000000000..85519707b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c @@ -0,0 +1,1263 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "stream_buffer.h" + +#if( configUSE_TASK_NOTIFICATIONS != 1 ) + #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c +#endif + +/* Lint e961, e9021 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* If the user has not provided application specific Rx notification macros, +or #defined the notification macros away, them provide default implementations +that uses task notifications. */ +/*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */ +#ifndef sbRECEIVE_COMPLETED + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbRECEIVE_COMPLETED */ + +#ifndef sbRECEIVE_COMPLETED_FROM_ISR + #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbRECEIVE_COMPLETED_FROM_ISR */ + +/* If the user has not provided an application specific Tx notification macro, +or #defined the notification macro away, them provide a default implementation +that uses task notifications. */ +#ifndef sbSEND_COMPLETED + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbSEND_COMPLETED */ + +#ifndef sbSEND_COMPLETE_FROM_ISR + #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbSEND_COMPLETE_FROM_ISR */ +/*lint -restore (9026) */ + +/* The number of bytes used to hold the length of a message in the buffer. */ +#define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( configMESSAGE_BUFFER_LENGTH_TYPE ) ) + +/* Bits stored in the ucFlags field of the stream buffer. */ +#define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */ +#define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */ + +/*-----------------------------------------------------------*/ + +/* Structure that hold state information on the buffer. */ +typedef struct StreamBufferDef_t /*lint !e9058 Style convention uses tag. */ +{ + volatile size_t xTail; /* Index to the next item to read within the buffer. */ + volatile size_t xHead; /* Index to the next item to write within the buffer. */ + size_t xLength; /* The length of the buffer pointed to by pucBuffer. */ + size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ + volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */ + volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */ + uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */ + uint8_t ucFlags; + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ + #endif +} StreamBuffer_t; + +/* + * The number of bytes available to be read from the buffer. + */ +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Add xCount bytes from pucData into the pxStreamBuffer message buffer. + * Returns the number of bytes written, which will either equal xCount in the + * success case, or 0 if there was not enough space in the buffer (in which case + * no data is written into the buffer). + */ +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then reads an entire + * message out of the buffer. If the stream buffer is being used as a stream + * buffer then read as many bytes as possible from the buffer. + * prvReadBytesFromBuffer() is called to actually extract the bytes from the + * buffer's data storage area. + */ +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then writes an entire + * message to the buffer. If the stream buffer is being used as a stream + * buffer then write as many bytes as possible to the buffer. + * prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's + * data storage area. + */ +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) PRIVILEGED_FUNCTION; + +/* + * Read xMaxCount bytes from the pxStreamBuffer message buffer and write them + * to pucData. + */ +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, + uint8_t *pucData, + size_t xMaxCount, + size_t xBytesAvailable ) PRIVILEGED_FUNCTION; + +/* + * Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to + * initialise the members of the newly created stream buffer structure. + */ +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) + { + uint8_t *pucAllocatedMemory; + uint8_t ucFlags; + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + if( xIsMessageBuffer == pdTRUE ) + { + /* Is a message buffer but not statically allocated. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER; + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* Not a message buffer and not statically allocated. */ + ucFlags = 0; + configASSERT( xBufferSizeBytes > 0 ); + } + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + /* A stream buffer requires a StreamBuffer_t structure and a buffer. + Both are allocated in a single call to pvPortMalloc(). The + StreamBuffer_t structure is placed at the start of the allocated memory + and the buffer follows immediately after. The requested size is + incremented so the free space is returned as the user would expect - + this is a quirk of the implementation that means otherwise the free + space would be reported as one byte smaller than would be logically + expected. */ + xBufferSizeBytes++; + pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + + if( pucAllocatedMemory != NULL ) + { + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + } + + return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) + { + StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ + StreamBufferHandle_t xReturn; + uint8_t ucFlags; + + configASSERT( pucStreamBufferStorageArea ); + configASSERT( pxStaticStreamBuffer ); + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + if( xIsMessageBuffer != pdFALSE ) + { + /* Statically allocated message buffer. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER | sbFLAGS_IS_STATICALLY_ALLOCATED; + } + else + { + /* Statically allocated stream buffer. */ + ucFlags = sbFLAGS_IS_STATICALLY_ALLOCATED; + } + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticStreamBuffer_t equals the size of the real + message buffer structure. */ + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } /*lint !e529 xSize is referenced is configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ + + if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pucStreamBufferStorageArea, + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + /* Remember this was statically allocated in case it is ever deleted + again. */ + pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); + + xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ + } + else + { + xReturn = NULL; + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + } + + return xReturn; + } + +#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + traceSTREAM_BUFFER_DELETE( xStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both the structure and the buffer were allocated using a single call + to pvPortMalloc(), hence only one call to vPortFree() is required. */ + vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */ + } + #else + { + /* Should not be possible to get here, ucFlags must be corrupt. + Force an assert. */ + configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 ); + } + #endif + } + else + { + /* The structure and buffer were not allocated dynamically and cannot be + freed - just scrub the structure so future use will assert. */ + ( void ) memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn = pdFAIL; + +#if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; +#endif + + configASSERT( pxStreamBuffer ); + + #if( configUSE_TRACE_FACILITY == 1 ) + { + /* Store the stream buffer number so it can be restored after the + reset. */ + uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber; + } + #endif + + /* Can only reset a message buffer if there are no tasks blocked on it. */ + taskENTER_CRITICAL(); + { + if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) + { + if( pxStreamBuffer->xTaskWaitingToSend == NULL ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pxStreamBuffer->pucBuffer, + pxStreamBuffer->xLength, + pxStreamBuffer->xTriggerLevelBytes, + pxStreamBuffer->ucFlags ); + xReturn = pdPASS; + + #if( configUSE_TRACE_FACILITY == 1 ) + { + pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + #endif + + traceSTREAM_BUFFER_RESET( xStreamBuffer ); + } + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; + + configASSERT( pxStreamBuffer ); + + /* It is not valid for the trigger level to be 0. */ + if( xTriggerLevel == ( size_t ) 0 ) + { + xTriggerLevel = ( size_t ) 1; + } + + /* The trigger level is the number of bytes that must be in the stream + buffer before a task that is waiting for data is unblocked. */ + if( xTriggerLevel <= pxStreamBuffer->xLength ) + { + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel; + xReturn = pdPASS; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xSpace; + + configASSERT( pxStreamBuffer ); + + xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail; + xSpace -= pxStreamBuffer->xHead; + xSpace -= ( size_t ) 1; + + if( xSpace >= pxStreamBuffer->xLength ) + { + xSpace -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xSpace; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn; + + configASSERT( pxStreamBuffer ); + + xReturn = prvBytesInBuffer( pxStreamBuffer ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace = 0; +size_t xRequiredSpace = xDataLengthBytes; +TimeOut_t xTimeOut; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + + /* Overflow? */ + configASSERT( xRequiredSpace > xDataLengthBytes ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + vTaskSetTimeOutState( &xTimeOut ); + + do + { + /* Wait until the required number of bytes are free in the message + buffer. */ + taskENTER_CRITICAL(); + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + + if( xSpace < xRequiredSpace ) + { + /* Clear notification state as going to wait for space. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one writer. */ + configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL ); + pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle(); + } + else + { + taskEXIT_CRITICAL(); + break; + } + } + taskEXIT_CRITICAL(); + + traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToSend = NULL; + + } while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xSpace == ( size_t ) 0 ) + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ); + + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace; +size_t xRequiredSpace = xDataLengthBytes; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) +{ + BaseType_t xShouldWrite; + size_t xReturn; + + if( xSpace == ( size_t ) 0 ) + { + /* Doesn't matter if this is a stream buffer or a message buffer, there + is no space to write. */ + xShouldWrite = pdFALSE; + } + else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 ) + { + /* This is a stream buffer, as opposed to a message buffer, so writing a + stream of bytes rather than discrete messages. Write as many bytes as + possible. */ + xShouldWrite = pdTRUE; + xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); + } + else if( xSpace >= xRequiredSpace ) + { + /* This is a message buffer, as opposed to a stream buffer, and there + is enough space to write both the message length and the message itself + into the buffer. Start by writing the length of the data, the data + itself will be written later in this function. */ + xShouldWrite = pdTRUE; + ( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* There is space available, but not enough space. */ + xShouldWrite = pdFALSE; + } + + if( xShouldWrite != pdFALSE ) + { + /* Writes the data itself. */ + xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */ + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + /* Checking if there is data and clearing the notification state must be + performed atomically. */ + taskENTER_CRITICAL(); + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* If this function was invoked by a message buffer read then + xBytesToStoreMessageLength holds the number of bytes used to hold + the length of the next discrete message. If this function was + invoked by a stream buffer read then xBytesToStoreMessageLength will + be 0. */ + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Clear notification state as going to wait for data. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one reader. */ + configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL ); + pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Wait for data to be available. */ + traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToReceive = NULL; + + /* Recheck the data available after blocking. */ + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); + sbRECEIVE_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ); + mtCOVERAGE_TEST_MARKER(); + } + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xBytesAvailable, xOriginalTail; +configMESSAGE_BUFFER_LENGTH_TYPE xTempReturn; + + configASSERT( pxStreamBuffer ); + + /* Ensure the stream buffer is being used as a message buffer. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + if( xBytesAvailable > sbBYTES_TO_STORE_MESSAGE_LENGTH ) + { + /* The number of bytes available is greater than the number of bytes + required to hold the length of the next message, so another message + is available. Return its length without removing the length bytes + from the buffer. A copy of the tail is stored so the buffer can be + returned to its prior state as the message is not actually being + removed from the buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempReturn, sbBYTES_TO_STORE_MESSAGE_LENGTH, xBytesAvailable ); + xReturn = ( size_t ) xTempReturn; + pxStreamBuffer->xTail = xOriginalTail; + } + else + { + /* The minimum amount of bytes in a message buffer is + ( sbBYTES_TO_STORE_MESSAGE_LENGTH + 1 ), so if xBytesAvailable is + less than sbBYTES_TO_STORE_MESSAGE_LENGTH the only other valid + value is 0. */ + configASSERT( xBytesAvailable == 0 ); + xReturn = 0; + } + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ); + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) +{ +size_t xOriginalTail, xReceivedLength, xNextMessageLength; +configMESSAGE_BUFFER_LENGTH_TYPE xTempNextMessageLength; + + if( xBytesToStoreMessageLength != ( size_t ) 0 ) + { + /* A discrete message is being received. First receive the length + of the message. A copy of the tail is stored so the buffer can be + returned to its prior state if the length of the message is too + large for the provided buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable ); + xNextMessageLength = ( size_t ) xTempNextMessageLength; + + /* Reduce the number of bytes available by the number of bytes just + read out. */ + xBytesAvailable -= xBytesToStoreMessageLength; + + /* Check there is enough space in the buffer provided by the + user. */ + if( xNextMessageLength > xBufferLengthBytes ) + { + /* The user has provided insufficient space to read the message + so return the buffer to its previous state (so the length of + the message is in the buffer again). */ + pxStreamBuffer->xTail = xOriginalTail; + xNextMessageLength = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* A stream of bytes is being received (as opposed to a discrete + message), so read as many bytes as possible. */ + xNextMessageLength = xBufferLengthBytes; + } + + /* Read the actual data. */ + xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */ + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +size_t xTail; + + configASSERT( pxStreamBuffer ); + + /* True if no bytes are available. */ + xTail = pxStreamBuffer->xTail; + if( pxStreamBuffer->xHead == xTail ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) +{ +BaseType_t xReturn; +size_t xBytesToStoreMessageLength; +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + /* This generic version of the receive function is used by both message + buffers, which store discrete messages, and stream buffers, which store a + continuous stream of bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + /* True if the available space equals zero. */ + if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) +{ +size_t xNextHead, xFirstLength; + + configASSERT( xCount > ( size_t ) 0 ); + + xNextHead = pxStreamBuffer->xHead; + + /* Calculate the number of bytes that can be added in the first write - + which may be less than the total number of bytes that need to be added if + the buffer will wrap back to the beginning. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount ); + + /* Write as many bytes as can be written in the first write. */ + configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the number of bytes written was less than the number that could be + written in the first write... */ + if( xCount > xFirstLength ) + { + /* ...then write the remaining bytes to the start of the buffer. */ + configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xNextHead += xCount; + if( xNextHead >= pxStreamBuffer->xLength ) + { + xNextHead -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxStreamBuffer->xHead = xNextHead; + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) +{ +size_t xCount, xFirstLength, xNextTail; + + /* Use the minimum of the wanted bytes and the available bytes. */ + xCount = configMIN( xBytesAvailable, xMaxCount ); + + if( xCount > ( size_t ) 0 ) + { + xNextTail = pxStreamBuffer->xTail; + + /* Calculate the number of bytes that can be read - which may be + less than the number wanted if the data wraps around to the start of + the buffer. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount ); + + /* Obtain the number of bytes it is possible to obtain in the first + read. Asserts check bounds of read and write. */ + configASSERT( xFirstLength <= xMaxCount ); + configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the total number of wanted bytes is greater than the number + that could be read in the first read... */ + if( xCount > xFirstLength ) + { + /*...then read the remaining bytes from the start of the buffer. */ + configASSERT( xCount <= xMaxCount ); + ( void ) memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Move the tail pointer to effectively remove the data read from + the buffer. */ + xNextTail += xCount; + + if( xNextTail >= pxStreamBuffer->xLength ) + { + xNextTail -= pxStreamBuffer->xLength; + } + + pxStreamBuffer->xTail = xNextTail; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) +{ +/* Returns the distance between xTail and xHead. */ +size_t xCount; + + xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead; + xCount -= pxStreamBuffer->xTail; + if ( xCount >= pxStreamBuffer->xLength ) + { + xCount -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) +{ + /* Assert here is deliberately writing to the entire buffer to ensure it can + be written to without generating exceptions, and is setting the buffer to a + known value to assist in development/debugging. */ + #if( configASSERT_DEFINED == 1 ) + { + /* The value written just has to be identifiable when looking at the + memory. Don't use 0xA5 as that is the stack fill value and could + result in confusion as to what is actually being observed. */ + const BaseType_t xWriteValue = 0x55; + configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); + } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ + #endif + + ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + pxStreamBuffer->pucBuffer = pucBuffer; + pxStreamBuffer->xLength = xBufferSizeBytes; + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; + pxStreamBuffer->ucFlags = ucFlags; +} + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) + { + return xStreamBuffer->uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) + { + xStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) + { + return ( xStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ); + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/tasks.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/tasks.c new file mode 100644 index 000000000..db0516d70 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/tasks.c @@ -0,0 +1,5214 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "stack_macros.h" + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting +functions but without including stdio.h here. */ +#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) + /* At the bottom of this file are two optional functions that can be used + to generate human readable text from the raw data generated by the + uxTaskGetSystemState() function. Note the formatting functions are provided + for convenience only, and are NOT considered part of the kernel. */ + #include +#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define taskYIELD_IF_USING_PREEMPTION() +#else + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* Values that can be assigned to the ucNotifyState member of the TCB. */ +#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) +#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) +#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 ) + +/* + * The value used to fill the stack of a task when the task is created. This + * is used purely for checking the high water mark for tasks. + */ +#define tskSTACK_FILL_BYTE ( 0xa5U ) + +/* Bits used to recored how a task's stack and TCB were allocated. */ +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + +/* If any of the following are set then task stacks are filled with a known +value so the high water mark can be determined. If none of the following are +set then don't fill the stack so there is no unnecessary dependency on memset. */ +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 +#else + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 +#endif + +/* + * Macros used by vListTask to indicate which state a task is in. + */ +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* + * Some kernel aware debuggers require the data the debugger needs access to be + * global, rather than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + +/* The name allocated to the Idle task. This can be overridden by defining +configIDLE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configIDLE_TASK_NAME + #define configIDLE_TASK_NAME "IDLE" +#endif + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is + performed in a generic way that is not optimised to any particular + microcontroller architecture. */ + + /* uxTopReadyPriority holds the priority of the highest priority ready + state task. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) \ + { \ + if( ( uxPriority ) > uxTopReadyPriority ) \ + { \ + uxTopReadyPriority = ( uxPriority ); \ + } \ + } /* taskRECORD_READY_PRIORITY */ + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + + /*-----------------------------------------------------------*/ + + /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as + they are only required when a port optimised method of task selection is + being used. */ + #define taskRESET_READY_PRIORITY( uxPriority ) + #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is + performed in a way that is tailored to the particular microcontroller + architecture being used. */ + + /* A port optimised version is provided. Call the port defined macros. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority; \ + \ + /* Find the highest priority list that contains ready tasks. */ \ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + + /*-----------------------------------------------------------*/ + + /* A port optimised version is provided, call it only if the TCB being reset + is being referenced from a ready list. If it is referenced from a delayed + or suspended list then it won't be in a ready list. */ + #define taskRESET_READY_PRIORITY( uxPriority ) \ + { \ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ + { \ + portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ + } \ + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick +count overflows. */ +#define taskSWITCH_DELAYED_LISTS() \ +{ \ + List_t *pxTemp; \ + \ + /* The delayed tasks list should be empty when the lists are switched. */ \ + configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \ + \ + pxTemp = pxDelayedTaskList; \ + pxDelayedTaskList = pxOverflowDelayedTaskList; \ + pxOverflowDelayedTaskList = pxTemp; \ + xNumOfOverflows++; \ + prvResetNextTaskUnblockTime(); \ +} + +/*-----------------------------------------------------------*/ + +/* + * Place the task represented by pxTCB into the appropriate ready list for + * the task. It is inserted at the end of the list. + */ +#define prvAddTaskToReadyList( pxTCB ) \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +/*-----------------------------------------------------------*/ + +/* + * Several functions take an TaskHandle_t parameter that can optionally be NULL, + * where NULL is used to indicate that the handle of the currently executing + * task should be used in place of the parameter. This macro simply checks to + * see if the parameter is NULL and returns a pointer to the appropriate TCB. + */ +#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) ) + +/* The item value of the event list item is normally used to hold the priority +of the task to which it belongs (coded to allow it to be held in reverse +priority order). However, it is occasionally borrowed for other purposes. It +is important its value is not updated due to a task priority change while it is +being used for another purpose. The following bit definition is used to inform +the scheduler that the value should not be changed - in which case it is the +responsibility of whichever module is using the value to ensure it gets set back +to its original value when it is released. */ +#if( configUSE_16_BIT_TICKS == 1 ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U +#else + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#endif + +/* + * Task control block. A task control block (TCB) is allocated for each task, + * and stores task state information, including a pointer to the task's context + * (the task's run time environment, including register values) + */ +typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + #endif + + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ + StackType_t *pxStack; /*< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */ + #endif + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + #endif + + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxMutexesHeld; + #endif + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + TaskHookFunction_t pxTaskTag; + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + + #if( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + /* Allocate a Newlib reent structure that is specific to this task. + Note Newlib support has been included by popular demand, but is not + used by the FreeRTOS maintainers themselves. FreeRTOS is not + responsible for resulting newlib operation. User must be familiar with + newlib and must provide system-wide implementations of the necessary + stubs. Be warned that (at the time of writing) the current newlib design + implements a system-wide malloc() that must be provided with locks. */ + struct _reent xNewLib_reent; + #endif + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + volatile uint32_t ulNotifiedValue; + volatile uint8_t ucNotifyState; + #endif + + /* See the comments in FreeRTOS.h with the definition of + tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDelayAborted; + #endif + + #if( configUSE_POSIX_ERRNO == 1 ) + int iTaskErrno; + #endif + +} tskTCB; + +/* The old tskTCB name is maintained above then typedefed to the new TCB_t name +below to enable the use of older kernel aware debuggers. */ +typedef tskTCB TCB_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; + +/* Lists for ready and blocked tasks. -------------------- +xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but +doing so breaks some kernel aware debuggers and debuggers that rely on removing +the static qualifier. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ] = { 0 };/*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1 = { 0 }; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2 = { 0 }; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList = NULL; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList = NULL; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList = { 0 }; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ + +#if( INCLUDE_vTaskDelete == 1 ) + +PRIVILEGED_DATA static List_t xTasksWaitingTermination = { 0 }; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; + +#endif + +#if ( INCLUDE_vTaskSuspend == 1 ) + + PRIVILEGED_DATA static List_t xSuspendedTaskList = { 0 }; /*< Tasks that are currently suspended. */ + +#endif + +/* Global POSIX errno. Its value is changed upon context switching to match +the errno of the currently running task. */ +#if ( configUSE_POSIX_ERRNO == 1 ) + int FreeRTOS_errno = 0; +#endif + +/* Other file private variables. --------------------------------*/ +PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; +PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; +PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; +PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +/* Context switches are held pending while the scheduler is suspended. Also, +interrupts must not manipulate the xStateListItem of a TCB, or any of the +lists the xStateListItem can be referenced from, if the scheduler is suspended. +If an interrupt needs to unblock a task while the scheduler is suspended then it +moves the task's event list item into the xPendingReadyList, ready for the +kernel to move the task from the pending ready list into the real ready list +when the scheduler is unsuspended. The pending ready list itself can only be +accessed from a critical section. */ +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + /* Do not move these variables to function scope as doing so prevents the + code working with debuggers that need to remove the static qualifier. */ + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +/* Callback function prototypes. --------------------------*/ +#if( configCHECK_FOR_STACK_OVERFLOW > 0 ) + + extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName ); + +#endif + +#if( configUSE_TICK_HOOK > 0 ) + + extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +/* File private functions. --------------------------------*/ + +/** + * Utility task that simply returns pdTRUE if the task referenced by xTask is + * currently in the Suspended state, or pdFALSE if the task referenced by xTask + * is in any other state. + */ +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* INCLUDE_vTaskSuspend */ + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first task. + */ +static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; + +/* + * The idle task, which as all tasks is implemented as a never ending loop. + * The idle task is automatically created and added to the ready lists upon + * creation of the first user task. + * + * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ); + +/* + * Utility to free all memory allocated by the scheduler to hold a TCB, + * including the stack pointed to by the TCB. + * + * This does not free memory allocated by the task itself (i.e. memory + * allocated by calls to pvPortMalloc from within the tasks application code). + */ +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Used only by the idle task. This checks to see if anything has been placed + * in the list of tasks waiting to be deleted. If so the task is cleaned up + * and its TCB deleted. + */ +static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; + +/* + * The currently executing task is entering the Blocked state. Add the task to + * either the current or the overflow delayed task list. + */ +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * Fills an TaskStatus_t structure with information on each task that is + * referenced from the pxList list (which may be a ready list, a delayed list, + * a suspended list, etc.). + * + * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM + * NORMAL APPLICATION CODE. + */ +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Searches pxList for a task with name pcNameToQuery - returning a handle to + * the task if it is found, or NULL if the task is not found. + */ +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION; + +#endif + +/* + * When a task is created, the stack of the task is filled with a known value. + * This function determines the 'high water mark' of the task stack by + * determining how much of the stack remains at the original preset value. + */ +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Return the amount of time, in ticks, that will pass before the kernel will + * next move a task from the Blocked state to the Running state. + * + * This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user + * defined low power mode implementations require configUSE_TICKLESS_IDLE to be + * set to a value other than 1. + */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Set xNextTaskUnblockTime to the time at which the next Blocked state task + * will exit the Blocked state. + */ +static void prvResetNextTaskUnblockTime( void ); + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + /* + * Helper function used to pad task names with spaces when printing out + * human readable tables of task information. + */ + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; + +/* + * freertos_tasks_c_additions_init() should only be called if the user definable + * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro + * called by the function. + */ +#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + + static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION; + +#endif + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + { + TCB_t *pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTask_t equals the size of the real task + structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + } + #endif /* configASSERT_DEFINED */ + + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note + this task had a statically allocated stack in case it is + later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + does not grow into the TCB. Likewise if the stack grows up then allocate + the TCB then the stack. */ + #if( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + the implementation of the port malloc function and whether or not static + allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + The base of the stack memory stored in the TCB so the task can + be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } + } + #else /* portSTACK_GROWTH */ + { + StackType_t *pxStack; + + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + it again. */ + vPortFree( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } + } + #endif /* portSTACK_GROWTH */ + + if( pxNewTCB != NULL ) + { + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) +{ +StackType_t *pxTopOfStack; +UBaseType_t x; + + #if( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) + { + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */ + + /* Calculate the top of stack address. This depends on whether the stack + grows from high memory to low (as per the 80x86) or vice versa. + portSTACK_GROWTH is used to make the result positive or negative as required + by the port. */ + #if( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + #if( configRECORD_STACK_HIGH_ADDRESS == 1 ) + { + /* Also record the stack's high address, which may assist + debugging. */ + pxNewTCB->pxEndOfStack = pxTopOfStack; + } + #endif /* configRECORD_STACK_HIGH_ADDRESS */ + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + if( pcName != NULL ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + } + else + { + /* The task has not been given a name, so just ensure there is a NULL + terminator when it is read out. */ + pxNewTCB->pcTaskName[ 0 ] = 0x00; + } + + /* This is used as an array index so must ensure it's not too large. First + remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xStateListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) + { + pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL; + } + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + pxNewTCB->ulNotifiedValue = 0; + pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + but had been interrupted by the scheduler. The return address is set + to the start of the task function. Once the stack has been initialised + the top of stack variable is updated. */ + #if( portUSING_MPU_WRAPPERS == 1 ) + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #else /* portUSING_MPU_WRAPPERS */ + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #endif /* portUSING_MPU_WRAPPERS */ + + if( pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + if( pxCurrentTCB == NULL ) + { + /* There are no other tasks, or all the other tasks are in + the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + initialisation required. We will not recover if this call + fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + current task if it is the highest priority task to be created + so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + void vTaskDelete( TaskHandle_t xTaskToDelete ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the calling task that is + being deleted. */ + pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + + /* Remove task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + detect that the task lists need re-generating. This is done before + portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + not return. */ + uxTaskNumber++; + + if( pxTCB == pxCurrentTCB ) + { + /* A task is deleting itself. This cannot complete within the + task itself, as a context switch to another task is required. + Place the task in the termination list. The idle task will + check the termination list and free up any memory allocated by + the scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + there is a task that has been deleted and that it should therefore + check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* The pre-delete hook is primarily for the Windows simulator, + in which Windows specific clean up operations are performed, + after which it is not possible to yield away from this task - + hence xYieldPending is used to latch that a context switch is + required. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + } + else + { + --uxCurrentNumberOfTasks; + prvDeleteTCB( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } + + traceTASK_DELETE( pxTCB ); + } + taskEXIT_CRITICAL(); + + /* Force a reschedule if it is the currently running task that has just + been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelayUntil == 1 ) + + void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) + { + TickType_t xTimeToWake; + BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; + + configASSERT( pxPreviousWakeTime ); + configASSERT( ( xTimeIncrement > 0U ) ); + configASSERT( uxSchedulerSuspended == 0 ); + + vTaskSuspendAll(); + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount; + + /* Generate the tick time at which the task wants to wake. */ + xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; + + if( xConstTickCount < *pxPreviousWakeTime ) + { + /* The tick count has overflowed since this function was + lasted called. In this case the only time we should ever + actually delay is if the wake time has also overflowed, + and the wake time is greater than the tick time. When this + is the case it is as if neither time had overflowed. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The tick time has not overflowed. In this case we will + delay if either the wake time has overflowed, and/or the + tick time is less than the wake time. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Update the wake time ready for the next call. */ + *pxPreviousWakeTime = xTimeToWake; + + if( xShouldDelay != pdFALSE ) + { + traceTASK_DELAY_UNTIL( xTimeToWake ); + + /* prvAddCurrentTaskToDelayedList() needs the block time, not + the time to wake, so subtract the current tick count. */ + prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + xAlreadyYielded = xTaskResumeAll(); + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelayUntil */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + + void vTaskDelay( const TickType_t xTicksToDelay ) + { + BaseType_t xAlreadyYielded = pdFALSE; + + /* A delay time of zero just forces a reschedule. */ + if( xTicksToDelay > ( TickType_t ) 0U ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskSuspendAll(); + { + traceTASK_DELAY(); + + /* A task that is removed from the event list while the + scheduler is suspended will not get placed in the ready + list or removed from the blocked list until the scheduler + is resumed. + + This task cannot be in an event list as it is the currently + executing task. */ + prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); + } + xAlreadyYielded = xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelay */ +/*-----------------------------------------------------------*/ + +#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) ) + + eTaskState eTaskGetState( TaskHandle_t xTask ) + { + eTaskState eReturn; + List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList; + const TCB_t * const pxTCB = xTask; + + configASSERT( pxTCB ); + + if( pxTCB == pxCurrentTCB ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + { + taskENTER_CRITICAL(); + { + pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxDelayedList = pxDelayedTaskList; + pxOverflowedDelayedList = pxOverflowDelayedTaskList; + } + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + { + /* The task being queried is referenced from one of the Blocked + lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) + { + /* The task being queried is referenced from the suspended + list. Is it genuinely suspended or is it blocked + indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + { + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + /* The task does not appear on the event list item of + and of the RTOS objects, but could still be in the + blocked state if it is waiting on its notification + rather than waiting on an object. */ + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + eReturn = eBlocked; + } + else + { + eReturn = eSuspended; + } + } + #else + { + eReturn = eSuspended; + } + #endif + } + else + { + eReturn = eBlocked; + } + } + #endif + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + tasks list, or it is not referenced from any lists at + all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + Ready (including pending ready) state. */ + eReturn = eReady; + } + } + + return eReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_eTaskGetState */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the task + that called uxTaskPriorityGet() that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + taskEXIT_CRITICAL(); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn, uxSavedInterruptState; + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* If null is passed in here then it is the priority of the calling + task that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskPrioritySet == 1 ) + + void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) + { + TCB_t *pxTCB; + UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; + BaseType_t xYieldRequired = pdFALSE; + + configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); + + /* Ensure the new priority is valid. */ + if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the calling + task that is being changed. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + traceTASK_PRIORITY_SET( pxTCB, uxNewPriority ); + + #if ( configUSE_MUTEXES == 1 ) + { + uxCurrentBasePriority = pxTCB->uxBasePriority; + } + #else + { + uxCurrentBasePriority = pxTCB->uxPriority; + } + #endif + + if( uxCurrentBasePriority != uxNewPriority ) + { + /* The priority change may have readied a task of higher + priority than the calling task. */ + if( uxNewPriority > uxCurrentBasePriority ) + { + if( pxTCB != pxCurrentTCB ) + { + /* The priority of a task other than the currently + running task is being raised. Is the priority being + raised above that of the running task? */ + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The priority of the running task is being raised, + but the running task must already be the highest + priority task able to run so no yield is required. */ + } + } + else if( pxTCB == pxCurrentTCB ) + { + /* Setting the priority of the running task down means + there may now be another task of higher priority that + is ready to execute. */ + xYieldRequired = pdTRUE; + } + else + { + /* Setting the priority of any other task down does not + require a yield as the running task must be above the + new priority of the task being modified. */ + } + + /* Remember the ready list the task might be referenced from + before its uxPriority member is changed so the + taskRESET_READY_PRIORITY() macro can function correctly. */ + uxPriorityUsedOnEntry = pxTCB->uxPriority; + + #if ( configUSE_MUTEXES == 1 ) + { + /* Only change the priority being used if the task is not + currently using an inherited priority. */ + if( pxTCB->uxBasePriority == pxTCB->uxPriority ) + { + pxTCB->uxPriority = uxNewPriority; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else + { + pxTCB->uxPriority = uxNewPriority; + } + #endif + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + nothing more than change its priority variable. However, if + the task is in a ready list it needs to be removed and placed + in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + adding it to it's new ready list. As we are in a critical + section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xYieldRequired != pdFALSE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Remove compiler warning about unused variables when the port + optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskSuspend( TaskHandle_t xTaskToSuspend ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the running task that is + being suspended. */ + pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); + + traceTASK_SUSPEND( pxTCB ); + + /* Remove task from the ready/delayed list and place in the + suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + now suspended, so no notification was received. */ + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + } + #endif + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + task that is now in the Suspended state. */ + taskENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + /* The scheduler is not running, but the task that was pointed + to by pxCurrentTCB has just been suspended and pxCurrentTCB + must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCB back to + NULL so when the next task is created pxCurrentTCB will + be set to point to it no matter what its relative priority + is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) + { + BaseType_t xReturn = pdFALSE; + const TCB_t * const pxTCB = xTask; + + /* Accesses xPendingReadyList so must be called from a critical + section. */ + + /* It does not make sense to check if the calling task is suspended. */ + configASSERT( xTask ); + + /* Is the task being resumed actually in the suspended list? */ + if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* Has the task already been resumed from within an ISR? */ + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + { + /* Is it in the suspended list because it is in the Suspended + state, or because is is blocked with no timeout? */ + if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */ + { + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskResume( TaskHandle_t xTaskToResume ) + { + TCB_t * const pxTCB = xTaskToResume; + + /* It does not make sense to resume the calling task. */ + configASSERT( xTaskToResume ); + + /* The parameter cannot be NULL as it is impossible to resume the + currently executing task. */ + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + { + taskENTER_CRITICAL(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + but will leave the lists in the correct state for the + next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ + +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) + { + BaseType_t xYieldRequired = pdFALSE; + TCB_t * const pxTCB = xTaskToResume; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToResume ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Ready lists can be accessed so move the task from the + suspended list to the ready list directly. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + is held in the pending ready list until the scheduler is + unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +void vTaskStartScheduler( void ) +{ +BaseType_t xReturn; + + /* Add the idle task at the lowest priority. */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxIdleTaskTCBBuffer = NULL; + StackType_t *pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandle != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + #if ( configUSE_TIMERS == 1 ) + { + if( xReturn == pdPASS ) + { + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TIMERS */ + + if( xReturn == pdPASS ) + { + /* freertos_tasks_c_additions_init() should only be called if the user + definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is + the only macro called by the function. */ + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + { + freertos_tasks_c_additions_init(); + } + #endif + + /* Interrupts are turned off here, to ensure a tick does not occur + before or during the call to xPortStartScheduler(). The stacks of + the created tasks contain a status word with interrupts switched on + so interrupts will automatically get re-enabled when the first task + starts to run. */ + portDISABLE_INTERRUPTS(); + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to the task that will run first. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + + /* If configGENERATE_RUN_TIME_STATS is defined then the following + macro must be defined to configure the timer/counter used to generate + the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS + is set to 0 and the following line fails to build then ensure you do not + have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your + FreeRTOSConfig.h file. */ + portCONFIGURE_TIMER_FOR_RUN_TIME_STATS(); + + traceTASK_SWITCHED_IN(); + + /* Setting up the timer tick is hardware specific and thus in the + portable interface. */ + if( xPortStartScheduler() != pdFALSE ) + { + /* Should not reach here as if the scheduler is running the + function will not return. */ + } + else + { + /* Should only reach here if a task calls xTaskEndScheduler(). */ + } + } + else + { + /* This line will only be reached if the kernel could not be started, + because there was not enough FreeRTOS heap to create the idle task + or the timer task. */ + configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ); + } + + /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, + meaning xIdleTaskHandle is not used anywhere else. */ + ( void ) xIdleTaskHandle; +} +/*-----------------------------------------------------------*/ + +void vTaskEndScheduler( void ) +{ + /* Stop the scheduler interrupts and call the portable scheduler end + routine so the original ISRs can be restored if necessary. The port + layer must ensure interrupts enable bit is left in the correct state. */ + portDISABLE_INTERRUPTS(); + xSchedulerRunning = pdFALSE; + vPortEndScheduler(); +} +/*----------------------------------------------------------*/ + +void vTaskSuspendAll( void ) +{ + /* A critical section is not required as the variable is of type + BaseType_t. Please read Richard Barry's reply in the following link to a + post in the FreeRTOS support forum before reporting this as a bug! - + http://goo.gl/wu4acr */ + ++uxSchedulerSuspended; + portMEMORY_BARRIER(); +} +/*----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) + { + TickType_t xReturn; + UBaseType_t uxHigherPriorityReadyTasks = pdFALSE; + + /* uxHigherPriorityReadyTasks takes care of the case where + configUSE_PREEMPTION is 0, so there may be tasks above the idle priority + task that are in the Ready state, even though the idle task is + running. */ + #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + { + if( uxTopReadyPriority > tskIDLE_PRIORITY ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #else + { + const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01; + + /* When port optimised task selection is used the uxTopReadyPriority + variable is used as a bit map. If bits other than the least + significant bit are set then there are tasks that have a priority + above the idle priority that are in the Ready state. This takes + care of the case where the co-operative scheduler is in use. */ + if( uxTopReadyPriority > uxLeastSignificantBit ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #endif + + if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + { + xReturn = 0; + } + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + { + /* There are other idle priority tasks in the ready state. If + time slicing is used then the very next tick interrupt must be + processed. */ + xReturn = 0; + } + else if( uxHigherPriorityReadyTasks != pdFALSE ) + { + /* There are tasks in the Ready state that have a priority above the + idle priority. This path can only be reached if + configUSE_PREEMPTION is 0. */ + xReturn = 0; + } + else + { + xReturn = xNextTaskUnblockTime - xTickCount; + } + + return xReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskResumeAll( void ) +{ +TCB_t *pxTCB = NULL; +BaseType_t xAlreadyYielded = pdFALSE; + + /* If uxSchedulerSuspended is zero then this function does not match a + previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + /* It is possible that an ISR caused a task to be removed from an event + list while the scheduler was suspended. If this was the case then the + removed task will have been added to the xPendingReadyList. Once the + scheduler has been resumed it is safe to move all the pending ready + tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + --uxSchedulerSuspended; + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + { + /* Move any readied tasks from the pending list into the + appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* If the moved task has a priority higher than the current + task then a yield must be performed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + which may have prevented the next unblock time from being + re-calculated, in which case re-calculate it now. Mainly + important for low power tickless implementations, where + this can prevent an unnecessary exit from low power + state. */ + prvResetNextTaskUnblockTime(); + } + + /* If any ticks occurred while the scheduler was suspended then + they should be processed now. This ensures the tick count does + not slip, and that any delayed tasks are resumed at the correct + time. */ + { + UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */ + + if( uxPendedCounts > ( UBaseType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --uxPendedCounts; + } while( uxPendedCounts > ( UBaseType_t ) 0U ); + + uxPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPending != pdFALSE ) + { + #if( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xAlreadyYielded; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCount( void ) +{ +TickType_t xTicks; + + /* Critical section required if running on a 16 bit processor. */ + portTICK_TYPE_ENTER_CRITICAL(); + { + xTicks = xTickCount; + } + portTICK_TYPE_EXIT_CRITICAL(); + + return xTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCountFromISR( void ) +{ +TickType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxTaskGetNumberOfTasks( void ) +{ + /* A critical section is not required because the variables are of type + BaseType_t. */ + return uxCurrentNumberOfTasks; +} +/*-----------------------------------------------------------*/ + +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +TCB_t *pxTCB; + + /* If null is passed in here then the name of the calling task is being + queried. */ + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + configASSERT( pxTCB ); + return &( pxTCB->pcTaskName[ 0 ] ); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) + { + TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Check each character in the name looking for a match or + mismatch. */ + xBreakLoop = pdFALSE; + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } + } + + if( pxReturn != NULL ) + { + /* The handle has been found. */ + break; + } + + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t uxQueue = configMAX_PRIORITIES; + TCB_t* pxTCB; + + /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ + configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); + + vTaskSuspendAll(); + { + /* Search the ready lists. */ + do + { + uxQueue--; + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery ); + + if( pxTCB != NULL ) + { + /* Found the handle. */ + break; + } + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Search the delayed lists. */ + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery ); + } + + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery ); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the suspended list. */ + pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery ); + } + } + #endif + + #if( INCLUDE_vTaskDelete == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the deleted list. */ + pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery ); + } + } + #endif + } + ( void ) xTaskResumeAll(); + + return pxTCB; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) + { + UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; + + vTaskSuspendAll(); + { + /* Is there a space in the array for each task in the system? */ + if( uxArraySize >= uxCurrentNumberOfTasks ) + { + /* Fill in an TaskStatus_t structure with information on each + task in the Ready state. */ + do + { + uxQueue--; + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ); + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Fill in an TaskStatus_t structure with information on each + task in the Blocked state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ); + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ); + + #if( INCLUDE_vTaskDelete == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task that has been deleted but not yet cleaned up. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ); + } + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task in the Suspended state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ); + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1) + { + if( pulTotalRunTime != NULL ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); + #else + *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + } + } + #else + { + if( pulTotalRunTime != NULL ) + { + *pulTotalRunTime = 0; + } + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t xTaskGetIdleTaskHandle( void ) + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xIdleTaskHandle != NULL ) ); + return xIdleTaskHandle; + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +/* This conditional compilation should use inequality to 0, not equality to 1. +This is to ensure vTaskStepTick() is available when user defined low power mode +implementations require configUSE_TICKLESS_IDLE to be set to a value other than +1. */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + void vTaskStepTick( const TickType_t xTicksToJump ) + { + /* Correct the tick count value after a period during which the tick + was suppressed. Note this does *not* call the tick hook function for + each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) + { + TCB_t *pxTCB = xTask; + BaseType_t xReturn; + + configASSERT( pxTCB ); + + vTaskSuspendAll(); + { + /* A task can only be prematurely removed from the Blocked state if + it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. An + interrupt won't touch the xStateListItem because the + scheduler is suspended. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + the event list too. Interrupts can touch the event list item, + even though the scheduler is suspended, so a critical section + is used. */ + taskENTER_CRITICAL(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate context + switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should only be + performed if the unblocked task has a priority that is + equal to or higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Pend the yield to be performed when the scheduler + is unsuspended. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + else + { + xReturn = pdFAIL; + } + } + ( void ) xTaskResumeAll(); + + return xReturn; + } + +#endif /* INCLUDE_xTaskAbortDelay */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskIncrementTick( void ) +{ +TCB_t * pxTCB; +TickType_t xItemValue; +BaseType_t xSwitchRequired = pdFALSE; + + /* Called by the portable layer each time a tick interrupt occurs. + Increments the tick then checks to see if the new tick value will cause any + tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + + /* Increment the RTOS tick, switching the delayed and overflowed + delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + the queue in the order of their wake time - meaning once one task + has been found whose block time has not expired there is no need to + look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ;; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + to the maximum possible value so it is extremely + unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass + next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else + { + /* The delayed list is not empty, get the value of the + item at the head of the delayed list. This is the time + at which the task at the head of the delayed list must + be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + item value is the time at which the task at the head + of the blocked list must be removed from the Blocked + state - so record the item value in + xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + only be performed if the unblocked task has a + priority that is equal to or higher than the + currently executing task. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + processing time (time slice) if preemption is on, and the application + writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + count is being unwound (when the scheduler is being unlocked). */ + if( uxPendedTicks == ( UBaseType_t ) 0U ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + } + else + { + ++uxPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif + } + + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldPending != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + + return xSwitchRequired; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) + { + TCB_t *xTCB; + + /* If xTask is NULL then it is the task hook of the calling task that is + getting set. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xTCB->pxTaskTag = pxHookFunction; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xReturn = pxTCB->pxTaskTag; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = pxTCB->pxTaskTag; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) + { + TCB_t *xTCB; + BaseType_t xReturn; + + /* If xTask is NULL then we are calling our own task hook. */ + if( xTask == NULL ) + { + xTCB = pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + if( xTCB->pxTaskTag != NULL ) + { + xReturn = xTCB->pxTaskTag( pvParameter ); + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +void vTaskSwitchContext( void ) +{ + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + switch. */ + xYieldPending = pdTRUE; + } + else + { + xYieldPending = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + accumulated time so far. The time the task started running was + stored in ulTaskSwitchedInTime. Note that there is no overflow + protection here so count values are only valid until the timer + overflows. The guard against negative values is to protect + against suspect run time stat counter implementations - which + are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to this task. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + } +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE + SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + + /* Place the event list item of the TCB in the appropriate event list. + This is placed in the list in priority order so the highest priority task + is the first to be woken by the event. The queue that contains the event + list is locked, preventing simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event groups implementation. */ + configASSERT( uxSchedulerSuspended != 0 ); + + /* Store the item value in the event list item. It is safe to access the + event list item here as interrupts won't access the event list item of a + task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Place the event list item of the TCB at the end of the appropriate event + list. It is safe to access the event list here because it is part of an + event group implementation - and interrupts don't access event groups + directly (instead they access them indirectly by pending function calls to + the task level). */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TIMERS == 1 ) + + void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + configASSERT( pxEventList ); + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements - + it should be called with the scheduler suspended. */ + + + /* Place the event list item of the TCB in the appropriate event list. + In this case it is assume that this is the only task that is going to + be waiting on this event list, so the faster vListInsertEnd() function + can be used in place of vListInsert. */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + /* If the task should block indefinitely then set the block time to a + value that will be recognised as an indefinite delay inside the + prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +{ +TCB_t *pxUnblockedTCB; +BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + be removed as it is known to be the highest priority. Remove the TCB from + the delayed list, and add it to the ready list. + + If an event is for a queue that is locked then this function will never + get called - the lock count on the queue will get modified instead. This + means exclusive access to the event list is guaranteed here. + + This function assumes that a check has already been made to ensure that + pxEventList is not empty. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + priority than the calling task. This allows the calling task to know if + it should force a context switch now. */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) +{ +TCB_t *pxUnblockedTCB; + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + + /* Store the new item value in the event list. */ + listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Remove the event list form the event flag. Interrupts do not access + event flags. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( pxEventListItem ); + + /* Remove the task from the delayed list and add it to the ready list. The + scheduler is suspended so interrupts will not be accessing the ready + lists. */ + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The unblocked task has a priority above that of the calling task, so + a context switch is required. This function is called with the + scheduler suspended so xYieldPending is set so the context switch + occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPending = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + configASSERT( pxTimeOut ); + taskENTER_CRITICAL(); + { + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + /* For internal use only as it does not use a critical section. */ + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; +} +/*-----------------------------------------------------------*/ + +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) +{ +BaseType_t xReturn; + + configASSERT( pxTimeOut ); + configASSERT( pxTicksToWait ); + + taskENTER_CRITICAL(); + { + /* Minor optimisation. The tick count cannot change in this block. */ + const TickType_t xConstTickCount = xTickCount; + const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE ) + { + /* The delay was aborted, which is not the same as a time out, + but has the same result. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + xReturn = pdTRUE; + } + else + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + if( *pxTicksToWait == portMAX_DELAY ) + { + /* If INCLUDE_vTaskSuspend is set to 1 and the block time + specified is the maximum block time then the task should block + indefinitely, and therefore never time out. */ + xReturn = pdFALSE; + } + else + #endif + + if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */ + { + /* The tick count is greater than the time at which + vTaskSetTimeout() was called, but has also overflowed since + vTaskSetTimeOut() was called. It must have wrapped all the way + around and gone past again. This passed since vTaskSetTimeout() + was called. */ + xReturn = pdTRUE; + } + else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ + { + /* Not a genuine timeout. Adjust parameters for time remaining. */ + *pxTicksToWait -= xElapsedTime; + vTaskInternalSetTimeOutState( pxTimeOut ); + xReturn = pdFALSE; + } + else + { + *pxTicksToWait = 0; + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskMissedYield( void ) +{ + xYieldPending = pdTRUE; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) + { + UBaseType_t uxReturn; + TCB_t const *pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + uxReturn = pxTCB->uxTaskNumber; + } + else + { + uxReturn = 0U; + } + + return uxReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) + { + TCB_t * pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + pxTCB->uxTaskNumber = uxHandle; + } + } + +#endif /* configUSE_TRACE_FACILITY */ + +/* + * ----------------------------------------------------------- + * The Idle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION( prvIdleTask, pvParameters ) +{ + /* Stop warnings. */ + ( void ) pvParameters; + + /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE + SCHEDULER IS STARTED. **/ + + /* In case a task that has a secure context deletes itself, in which case + the idle task is responsible for deleting the task's secure context, if + any. */ + portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + + for( ;; ) + { + /* See if any tasks have deleted themselves - if so then the idle task + is responsible for freeing the deleted task's TCB and stack. */ + prvCheckTasksWaitingTermination(); + + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + see if any other task has become available. If we are using + preemption we don't need to do this as any task becoming available + will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + timesliced. If a task that is sharing the idle priority is ready + to run then the idle task should yield before the end of the + timeslice. + + A critical region is not required here as we are just reading from + the list, and an occasional incorrect value will not matter. If + the ready list at the idle priority contains more than one task + then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_IDLE_HOOK == 1 ) + { + extern void vApplicationIdleHook( void ); + + /* Call the user defined function from within the idle task. This + allows the application designer to add background functionality + without the overhead of a separate task. + NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationIdleHook(); + } + #endif /* configUSE_IDLE_HOOK */ + + /* This conditional compilation should use inequality to 0, not equality + to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when + user defined low power mode implementations require + configUSE_TICKLESS_IDLE to be set to a value other than 1. */ + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + TickType_t xExpectedIdleTime; + + /* It is not desirable to suspend then resume the scheduler on + each iteration of the idle task. Therefore, a preliminary + test of the expected idle time is performed without the + scheduler suspended. The result here is not necessarily + valid. */ + xExpectedIdleTime = prvGetExpectedIdleTime(); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + vTaskSuspendAll(); + { + /* Now the scheduler is suspended, the expected idle + time can be sampled again, and this time its value can + be used. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xExpectedIdleTime = prvGetExpectedIdleTime(); + + /* Define the following macro to set xExpectedIdleTime to 0 + if the application does not want + portSUPPRESS_TICKS_AND_SLEEP() to be called. */ + configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime ); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + traceLOW_POWER_IDLE_BEGIN(); + portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ); + traceLOW_POWER_IDLE_END(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICKLESS_IDLE */ + } +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE != 0 ) + + eSleepModeStatus eTaskConfirmSleepModeStatus( void ) + { + /* The idle task exists in addition to the application tasks. */ + const UBaseType_t uxNonApplicationTasks = 1; + eSleepModeStatus eReturn = eStandardSleep; + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + { + /* A task was made ready while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xYieldPending != pdFALSE ) + { + /* A yield was pended while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else + { + /* If all the tasks are in the suspended list (which might mean they + have an infinite block time rather than actually being suspended) + then it is safe to turn all clocks off and just wait for external + interrupts. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) + { + eReturn = eNoTasksWaitingTimeout; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return eReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) + { + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; + } + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) + { + void *pvReturn = NULL; + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; + } + else + { + pvReturn = NULL; + } + + return pvReturn; + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions ) + { + TCB_t *pxTCB; + + /* If null is passed in here then we are modifying the MPU settings of + the calling task. */ + pxTCB = prvGetTCBFromHandle( xTaskToModify ); + + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseTaskLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) + { + vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) ); + } + + vListInitialise( &xDelayedTaskList1 ); + vListInitialise( &xDelayedTaskList2 ); + vListInitialise( &xPendingReadyList ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + vListInitialise( &xTasksWaitingTermination ); + } + #endif /* INCLUDE_vTaskDelete */ + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + vListInitialise( &xSuspendedTaskList ); + } + #endif /* INCLUDE_vTaskSuspend */ + + /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList + using list2. */ + pxDelayedTaskList = &xDelayedTaskList1; + pxOverflowDelayedTaskList = &xDelayedTaskList2; +} +/*-----------------------------------------------------------*/ + +static void prvCheckTasksWaitingTermination( void ) +{ + + /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/ + + #if ( INCLUDE_vTaskDelete == 1 ) + { + TCB_t *pxTCB; + + /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() + being called too often in the idle task. */ + while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + taskENTER_CRITICAL(); + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); + } + } + #endif /* INCLUDE_vTaskDelete */ +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TRACE_FACILITY == 1 ) + + void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) + { + TCB_t *pxTCB; + + /* xTask is NULL then get the state of the calling task. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + + #if ( configUSE_MUTEXES == 1 ) + { + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; + } + #else + { + pxTaskStatus->uxBasePriority = 0; + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + value of eState passed into this function is eInvalid - otherwise the + state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCB ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + chance it is actually just blocked indefinitely - so really + it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + vTaskSuspendAll(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + ( void ) xTaskResumeAll(); + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) + { + configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB; + UBaseType_t uxTask = 0; + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Populate an TaskStatus_t structure within the + pxTaskStatusArray array for each task that is referenced from + pxList. See the definition of TaskStatus_t in task.h for the + meaning of each TaskStatus_t structure member. */ + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); + uxTask++; + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) + { + uint32_t ulCount = 0U; + + while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE ) + { + pucStackByte -= portSTACK_GROWTH; + ulCount++; + } + + ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */ + + return ( configSTACK_DEPTH_TYPE ) ulCount; + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + same except for their return type. Using configSTACK_DEPTH_TYPE allows the + user to determine the return type. It gets around the problem of the value + overflowing on 8-bit types without breaking backward compatibility for + applications that expect an 8-bit return type. */ + configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + configSTACK_DEPTH_TYPE uxReturn; + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are + the same except for their return type. Using configSTACK_DEPTH_TYPE + allows the user to determine the return type. It gets around the + problem of the value overflowing on 8-bit types without breaking + backward compatibility for applications that expect an 8-bit return + type. */ + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + UBaseType_t uxReturn; + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) + { + /* This call is required specifically for the TriCore port. It must be + above the vPortFree() calls. The call is also used by ports/demos that + want to allocate and clean RAM statically. */ + portCLEAN_UP_TCB( pxTCB ); + + /* Free up the memory allocated by the scheduler for the task. It is up + to the task to free any memory allocated at the application level. */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) + { + /* The task can only have been allocated dynamically - free both + the stack and TCB. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* The task could have been allocated statically or dynamically, so + check what was statically allocated before trying to free the + memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) + { + /* Both the stack and TCB were allocated dynamically, so both + must be freed. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ); + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +static void prvResetNextTaskUnblockTime( void ) +{ +TCB_t *pxTCB; + + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The new current delayed list is empty. Set xNextTaskUnblockTime to + the maximum possible value so it is extremely unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass until + there is an item in the delayed list. */ + xNextTaskUnblockTime = portMAX_DELAY; + } + else + { + /* The new current delayed list is not empty, get the value of + the item at the head of the delayed list. This is the time at + which the task at the head of the delayed list should be removed + from the Blocked state. */ + ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + /* A critical section is not required as this is not called from + an interrupt and the current TCB will always be the same for any + individual execution thread. */ + xReturn = pxCurrentTCB; + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + + BaseType_t xTaskGetSchedulerState( void ) + { + BaseType_t xReturn; + + if( xSchedulerRunning == pdFALSE ) + { + xReturn = taskSCHEDULER_NOT_STARTED; + } + else + { + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxMutexHolderTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + /* If the mutex was given back by an interrupt while the queue was + locked then the mutex holder might now be NULL. _RB_ Is this still + needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + the task attempting to obtain the mutex then it will temporarily + inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + { + /* Adjust the mutex holder state to account for its new + priority. Only reset the event list item value if the value is + not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task being modified is in the ready state it will need + to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + } + + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + + /* Inheritance occurred. */ + xReturn = pdTRUE; + } + else + { + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + priority of the task attempting to take the mutex, but the + current priority of the mutex holder is not lower than the + priority of the task attempting to take the mutex. + Therefore the mutex holder must have already inherited a + priority, but inheritance would have occurred if that had + not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + if( pxMutexHolder != NULL ) + { + /* A task can only have an inherited priority if it holds the mutex. + If the mutex is held by a task then it cannot be given from an + interrupt, and if a mutex is given by the holding task then it must + be the running state task. */ + configASSERT( pxTCB == pxCurrentTCB ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + { + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + { + /* A task can only have an inherited priority if it holds + the mutex. If the mutex is held by a task then it cannot be + given from an interrupt, and if a mutex is given by the + holding task then it must be the running state task. Remove + the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + any other purpose if this task is running, and it must be + running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + This is only actually required in the corner case whereby + multiple mutexes were held and the mutexes were given back + in an order different to that in which they were taken. + If a context switch did not occur when the first mutex was + returned, even if a task was waiting on it, then a context + switch should occur when the last mutex is returned whether + a task is waiting on it or not. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) + { + TCB_t * const pxTCB = pxMutexHolder; + UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; + const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; + + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); + + /* Determine the priority to which the priority of the task that + holds the mutex should be set. This will be the greater of the + holding task's base priority and the priority of the highest + priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) + { + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } + + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + simplification in the priority inheritance implementation. If + the task that holds the mutex is also holding other mutexes then + the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + mutex it was trying to obtain then it cannot of inherited + its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); + + /* Disinherit the priority, remembering the previous + priority to facilitate determining the subject task's + state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the running task is not the task that holds the mutex + then the task that holds the mutex could be in either the + Ready, Blocked or Suspended states. Only remove the task + from its current state list if it is in the Ready state as + the task's priority is going to change and there is one + Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + function so assert() if it is being called from an interrupt + context. Only API functions that end in "FromISR" can be used in an + interrupt. Only assert if the critical nesting count is 1 to + protect against recursive calls if the assert function also uses a + critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( pxCurrentTCB->uxCriticalNesting > 0U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portENABLE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) + { + size_t x; + + /* Start by copying the entire string. */ + strcpy( pcBuffer, pcTaskName ); + + /* Pad the end of the string with spaces to ensure columns line up when + printed out. */ + for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + { + pcBuffer[ x ] = ' '; + } + + /* Terminate. */ + pcBuffer[ x ] = ( char ) 0x00; + + /* Return the new end of string. */ + return &( pcBuffer[ x ] ); + } + +#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskList( char * pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + char cStatus; + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that + * displays task names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that + * might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, + * and limited functionality implementation of sprintf() is provided in + * many of the FreeRTOS/Demo sub-directories in a file called + * printf-stdarg.c (note printf-stdarg.c does not provide a full + * snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskList(). + */ + + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! if + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL ); + + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + switch( pxTaskStatusArray[ x ].eCurrentState ) + { + case eRunning: cStatus = tskRUNNING_CHAR; + break; + + case eReady: cStatus = tskREADY_CHAR; + break; + + case eBlocked: cStatus = tskBLOCKED_CHAR; + break; + + case eSuspended: cStatus = tskSUSPENDED_CHAR; + break; + + case eDeleted: cStatus = tskDELETED_CHAR; + break; + + case eInvalid: /* Fall through. */ + default: /* Should not get here, but it is included + to prevent static checking errors. */ + cStatus = ( char ) 0x00; + break; + } + + /* Write the task name to the string, padding with spaces so it + can be printed in tabular form more easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + /* Write the rest of the string. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + uint32_t ulTotalTime, ulStatsAsPercentage; + + #if( configUSE_TRACE_FACILITY != 1 ) + { + #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). + } + #endif + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part + * of the uxTaskGetSystemState() output into a human readable table that + * displays the amount of time each task has spent in the Running state + * in both absolute and percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library + * function that might bloat the code size, use a lot of stack, and + * provide different results on different platforms. An alternative, + * tiny, third party, and limited functionality implementation of + * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in + * a file called printf-stdarg.c (note printf-stdarg.c does not provide + * a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskGetRunTimeStats(). + */ + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! If + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime ); + + /* For percentage calculations. */ + ulTotalTime /= 100UL; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > 0UL ) + { + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + /* What percentage of the total run time has the task used? + This will always be rounded down to the nearest integer. + ulTotalRunTimeDiv100 has already been divided by 100. */ + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime; + + /* Write the task name to the string, padding with + spaces so it can be printed in tabular form more + easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + if( ulStatsAsPercentage > 0UL ) + { + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + else + { + /* If the percentage is zero here then the task has + consumed less than 1% of the total run time. */ + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +TickType_t uxTaskResetEventItemValue( void ) +{ +TickType_t uxReturn; + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + TaskHandle_t pvTaskIncrementMutexHeldCount( void ) + { + /* If xSemaphoreCreateMutex() is called before any tasks have been created + then pxCurrentTCB will be NULL. */ + if( pxCurrentTCB != NULL ) + { + ( pxCurrentTCB->uxMutexesHeld )++; + } + + return pxCurrentTCB; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) + { + uint32_t ulReturn; + + taskENTER_CRITICAL(); + { + /* Only block if the notification count is not already non-zero. */ + if( pxCurrentTCB->ulNotifiedValue == 0UL ) + { + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_TAKE_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_TAKE(); + ulReturn = pxCurrentTCB->ulNotifiedValue; + + if( ulReturn != 0UL ) + { + if( xClearCountOnExit != pdFALSE ) + { + pxCurrentTCB->ulNotifiedValue = 0UL; + } + else + { + pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + /* Only block if a notification is not already pending. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* Clear bits in the task's notification value as bits may get + set by the notifying task or interrupt. This can be used to + clear the value to zero. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry; + + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_WAIT_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_WAIT(); + + if( pulNotificationValue != NULL ) + { + /* Output the current notification value, which may or may not + have changed. */ + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue; + } + + /* If ucNotifyValue is set then either the task never entered the + blocked state (because a notification was already pending) or the + task unblocked because of a notification. Otherwise the task + unblocked because of a timeout. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* A notification was not received. */ + xReturn = pdFALSE; + } + else + { + /* A notification was already pending or a notification was + received while the task was waiting. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit; + xReturn = pdTRUE; + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) + { + TCB_t * pxTCB; + BaseType_t xReturn = pdPASS; + uint8_t ucOriginalNotifyState; + + configASSERT( xTaskToNotify ); + pxTCB = xTaskToNotify; + + taskENTER_CRITICAL(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction: + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + + break; + } + + traceTASK_NOTIFY(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked waiting for a notification then + xNextTaskUnblockTime might be set to the blocked task's time + out time. If the task is unblocked for a reason other than + a timeout xNextTaskUnblockTime is normally left unchanged, + because it will automatically get reset to a new value when + the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter + sleep mode at the earliest possible time - so reset + xNextTaskUnblockTime here to ensure it is updated at the + earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction : + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + break; + } + + traceTASK_NOTIFY_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter to an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + /* 'Giving' is equivalent to incrementing a count in a counting + semaphore. */ + ( pxTCB->ulNotifiedValue )++; + + traceTASK_NOTIFY_GIVE_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter in an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ + +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + BaseType_t xReturn; + + /* If null is passed in here then it is the calling task that is having + its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED ) + { + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + TickType_t xTaskGetIdleRunTimeCounter( void ) + { + return xIdleTaskHandle->ulRunTimeCounter; + } +#endif +/*-----------------------------------------------------------*/ + +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) +{ +TickType_t xTimeToWake; +const TickType_t xConstTickCount = xTickCount; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + /* About to enter a delayed list, so ensure the ucDelayAborted flag is + reset to pdFALSE so it can be detected as having been set to pdTRUE + when the task leaves the Blocked state. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Remove the task from the ready list before adding it to the blocked list + as the same list item is used for both lists. */ + if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* The current task must be in a ready list, so there is no need to + check, and the port reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) ) + { + /* Add the task to the suspended task list instead of a delayed task + list to ensure it is not woken by a timing event. It will block + indefinitely. */ + vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the + kernel will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow + list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list + is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the + head of the list of blocked tasks then xNextTaskUnblockTime + needs to be updated too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #else /* INCLUDE_vTaskSuspend */ + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the kernel + will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the head of the + list of blocked tasks then xNextTaskUnblockTime needs to be updated + too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */ + ( void ) xCanBlockIndefinitely; + } + #endif /* INCLUDE_vTaskSuspend */ +} + +/* Code below here allows additional code to be inserted into this source file, +especially where access to file scope functions and data is needed (for example +when performing module tests). */ + +#ifdef FREERTOS_MODULE_TEST + #include "tasks_test_access_functions.h" +#endif + + +#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) + + #include "freertos_tasks_c_additions.h" + + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + static void freertos_tasks_c_additions_init( void ) + { + FREERTOS_TASKS_C_ADDITIONS_INIT(); + } + #endif + +#endif + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/timers.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/timers.c new file mode 100644 index 000000000..59b3840d7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/FreeRTOS/Source/timers.c @@ -0,0 +1,1102 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" + +#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) + #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e9021 !e961 !e750. */ + + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. This #if is closed at the very bottom +of this file. If you want to include software timer functionality then ensure +configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#if ( configUSE_TIMERS == 1 ) + +/* Misc definitions. */ +#define tmrNO_DELAY ( TickType_t ) 0U + +/* The name assigned to the timer service task. This can be overridden by +defining trmTIMER_SERVICE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configTIMER_SERVICE_TASK_NAME + #define configTIMER_SERVICE_TASK_NAME "Tmr Svc" +#endif + +/* Bit definitions used in the ucStatus member of a timer structure. */ +#define tmrSTATUS_IS_ACTIVE ( ( uint8_t ) 0x01 ) +#define tmrSTATUS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 0x02 ) +#define tmrSTATUS_IS_AUTORELOAD ( ( uint8_t ) 0x04 ) + +/* The definition of the timers themselves. */ +typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + const char *pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks;/*<< How quickly and often the timer expires. */ + void *pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + #endif + uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ +} xTIMER; + +/* The old xTIMER name is maintained above then typedefed to the new Timer_t +name below to enable the use of older kernel aware debuggers. */ +typedef xTIMER Timer_t; + +/* The definition of messages that can be sent and received on the timer queue. +Two types of message can be queued - messages that manipulate a software timer, +and messages that request the execution of a non-timer related callback. The +two message types are defined in two separate structures, xTimerParametersType +and xCallbackParametersType respectively. */ +typedef struct tmrTimerParameters +{ + TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ +} TimerParameter_t; + + +typedef struct tmrCallbackParameters +{ + PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ + void *pvParameter1; /* << The value that will be used as the callback functions first parameter. */ + uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ +} CallbackParameters_t; + +/* The structure that contains the two message types, along with an identifier +that is used to determine which message type is valid. */ +typedef struct tmrTimerQueueMessage +{ + BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + union + { + TimerParameter_t xTimerParameters; + + /* Don't include xCallbackParameters if it is not going to be used as + it makes the structure (and therefore the timer queue) larger. */ + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + CallbackParameters_t xCallbackParameters; + #endif /* INCLUDE_xTimerPendFunctionCall */ + } u; +} DaemonTaskMessage_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ + +/* The list in which active timers are stored. Timers are referenced in expire +time order, with the nearest expiry time at the front of the list. Only the +timer service task is allowed to access these lists. +xActiveTimerList1 and xActiveTimerList2 could be at function scope but that +breaks some kernel aware debuggers, and debuggers that reply on removing the +static qualifier. */ +PRIVILEGED_DATA static List_t xActiveTimerList1 = { 0 }; +PRIVILEGED_DATA static List_t xActiveTimerList2 = { 0 }; +PRIVILEGED_DATA static List_t *pxCurrentTimerList = NULL; +PRIVILEGED_DATA static List_t *pxOverflowTimerList = NULL; + +/* A queue that is used to send commands to the timer service task. */ +PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; +PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + /* If static allocation is supported then the application must provide the + following callback function - which enables the application to optionally + provide the memory that will be used by the timer task as the task's stack + and TCB. */ + extern void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize ); + +#endif + +/* + * Initialise the infrastructure used by the timer service task if it has not + * been initialised already. + */ +static void prvCheckForValidListAndQueue( void ) PRIVILEGED_FUNCTION; + +/* + * The timer service task (daemon). Timer functionality is controlled by this + * task. Other tasks communicate with the timer service task using the + * xTimerQueue queue. + */ +static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + +/* + * Called by the timer service task to interpret and process a command it + * received on the timer queue. + */ +static void prvProcessReceivedCommands( void ) PRIVILEGED_FUNCTION; + +/* + * Insert the timer into either xActiveTimerList1, or xActiveTimerList2, + * depending on if the expire time causes a timer counter overflow. + */ +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) PRIVILEGED_FUNCTION; + +/* + * An active timer has reached its expire time. Reload the timer if it is an + * auto reload timer, then call its callback. + */ +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION; + +/* + * The tick count has overflowed. Switch the timer lists after ensuring the + * current timer list does not still reference some timers. + */ +static void prvSwitchTimerLists( void ) PRIVILEGED_FUNCTION; + +/* + * Obtain the current tick count, setting *pxTimerListsWereSwitched to pdTRUE + * if a tick count overflow occurred since prvSampleTimeNow() was last called. + */ +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) PRIVILEGED_FUNCTION; + +/* + * If the timer list contains any active timers then return the expire time of + * the timer that will expire first and set *pxListWasEmpty to false. If the + * timer list does not contain any timers then return 0 and set *pxListWasEmpty + * to pdTRUE. + */ +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * If a timer has expired, process it. Otherwise, block the timer service task + * until either a timer does expire or a command is received. + */ +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * Called after a Timer_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +BaseType_t xTimerCreateTimerTask( void ) +{ +BaseType_t xReturn = pdFAIL; + + /* This function is called when the scheduler is started if + configUSE_TIMERS is set to 1. Check that the infrastructure used by the + timer service task has been created/initialised. If timers have already + been created then the initialisation will already have been performed. */ + prvCheckForValidListAndQueue(); + + if( xTimerQueue != NULL ) + { + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxTimerTaskTCBBuffer = NULL; + StackType_t *pxTimerTaskStackBuffer = NULL; + uint32_t ulTimerTaskStackSize; + + vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize ); + xTimerTaskHandle = xTaskCreateStatic( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + ulTimerTaskStackSize, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + pxTimerTaskStackBuffer, + pxTimerTaskTCBBuffer ); + + if( xTimerTaskHandle != NULL ) + { + xReturn = pdPASS; + } + } + #else + { + xReturn = xTaskCreate( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + configTIMER_TASK_STACK_DEPTH, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + &xTimerTaskHandle ); + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( xReturn ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) + { + Timer_t *pxNewTimer; + + pxNewTimer = ( Timer_t * ) pvPortMalloc( sizeof( Timer_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of Timer_t is always a pointer to the timer's mame. */ + + if( pxNewTimer != NULL ) + { + /* Status is thus far zero as the timer is not created statically + and has not been started. The autoreload bit may get set in + prvInitialiseNewTimer. */ + pxNewTimer->ucStatus = 0x00; + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) + { + Timer_t *pxNewTimer; + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTimer_t equals the size of the real timer + structure. */ + volatile size_t xSize = sizeof( StaticTimer_t ); + configASSERT( xSize == sizeof( Timer_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* A pointer to a StaticTimer_t structure MUST be provided, use it. */ + configASSERT( pxTimerBuffer ); + pxNewTimer = ( Timer_t * ) pxTimerBuffer; /*lint !e740 !e9087 StaticTimer_t is a pointer to a Timer_t, so guaranteed to be aligned and sized correctly (checked by an assert()), so this is safe. */ + + if( pxNewTimer != NULL ) + { + /* Timers can be created statically or dynamically so note this + timer was created statically in case it is later deleted. The + autoreload bit may get set in prvInitialiseNewTimer(). */ + pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED; + + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) +{ + /* 0 is not a valid value for xTimerPeriodInTicks. */ + configASSERT( ( xTimerPeriodInTicks > 0 ) ); + + if( pxNewTimer != NULL ) + { + /* Ensure the infrastructure used by the timer service task has been + created/initialised. */ + prvCheckForValidListAndQueue(); + + /* Initialise the timer structure members using the function + parameters. */ + pxNewTimer->pcTimerName = pcTimerName; + pxNewTimer->xTimerPeriodInTicks = xTimerPeriodInTicks; + pxNewTimer->pvTimerID = pvTimerID; + pxNewTimer->pxCallbackFunction = pxCallbackFunction; + vListInitialiseItem( &( pxNewTimer->xTimerListItem ) ); + if( uxAutoReload != pdFALSE ) + { + pxNewTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + traceTIMER_CREATE( pxNewTimer ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) +{ +BaseType_t xReturn = pdFAIL; +DaemonTaskMessage_t xMessage; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + else + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) +{ + /* If xTimerGetTimerDaemonTaskHandle() is called before the scheduler has been + started, then xTimerTaskHandle will be NULL. */ + configASSERT( ( xTimerTaskHandle != NULL ) ); + return xTimerTaskHandle; +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->xTimerPeriodInTicks; +} +/*-----------------------------------------------------------*/ + +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) +{ +Timer_t * pxTimer = xTimer; + + configASSERT( xTimer ); + taskENTER_CRITICAL(); + { + if( uxAutoReload != pdFALSE ) + { + pxTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD; + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) +{ +Timer_t * pxTimer = xTimer; +TickType_t xReturn; + + configASSERT( xTimer ); + xReturn = listGET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ) ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->pcTimerName; +} +/*-----------------------------------------------------------*/ + +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) +{ +BaseType_t xResult; +Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Remove the timer from the list of active timers. A check has already + been performed to ensure the list is not empty. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* If the timer is an auto reload timer then calculate the next + expiry time and re-insert the timer in the list of active timers. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* The timer is inserted into a list using a time relative to anything + other than the current time. It will therefore be inserted into the + correct list relative to the time this task thinks it is now. */ + if( prvInsertTimerInActiveList( pxTimer, ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ), xTimeNow, xNextExpireTime ) != pdFALSE ) + { + /* The timer expired before it was added to the active timer + list. Reload it now. */ + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + mtCOVERAGE_TEST_MARKER(); + } + + /* Call the timer callback. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); +} +/*-----------------------------------------------------------*/ + +static portTASK_FUNCTION( prvTimerTask, pvParameters ) +{ +TickType_t xNextExpireTime; +BaseType_t xListWasEmpty; + + /* Just to avoid compiler warnings. */ + ( void ) pvParameters; + + #if( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) + { + extern void vApplicationDaemonTaskStartupHook( void ); + + /* Allow the application writer to execute some code in the context of + this task at the point the task starts executing. This is useful if the + application includes initialisation code that would benefit from + executing after the scheduler has been started. */ + vApplicationDaemonTaskStartupHook(); + } + #endif /* configUSE_DAEMON_TASK_STARTUP_HOOK */ + + for( ;; ) + { + /* Query the timers list to see if it contains any timers, and if so, + obtain the time at which the next timer will expire. */ + xNextExpireTime = prvGetNextExpireTime( &xListWasEmpty ); + + /* If a timer has expired, process it. Otherwise, block this task + until either a timer does expire, or a command is received. */ + prvProcessTimerOrBlockTask( xNextExpireTime, xListWasEmpty ); + + /* Empty the command queue. */ + prvProcessReceivedCommands(); + } +} +/*-----------------------------------------------------------*/ + +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) +{ +TickType_t xTimeNow; +BaseType_t xTimerListsWereSwitched; + + vTaskSuspendAll(); + { + /* Obtain the time now to make an assessment as to whether the timer + has expired or not. If obtaining the time causes the lists to switch + then don't process this timer as any timers that remained in the list + when the lists were switched will have been processed within the + prvSampleTimeNow() function. */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + if( xTimerListsWereSwitched == pdFALSE ) + { + /* The tick count has not overflowed, has the timer expired? */ + if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) + { + ( void ) xTaskResumeAll(); + prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); + } + else + { + /* The tick count has not overflowed, and the next expire + time has not been reached yet. This task should therefore + block to wait for the next expire time or a command to be + received - whichever comes first. The following line cannot + be reached unless xNextExpireTime > xTimeNow, except in the + case when the current timer list is empty. */ + if( xListWasEmpty != pdFALSE ) + { + /* The current timer list is empty - is the overflow list + also empty? */ + xListWasEmpty = listLIST_IS_EMPTY( pxOverflowTimerList ); + } + + vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); + + if( xTaskResumeAll() == pdFALSE ) + { + /* Yield to wait for either a command to arrive, or the + block time to expire. If a command arrived between the + critical section being exited and this yield then the yield + will not cause the task to block. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + ( void ) xTaskResumeAll(); + } + } +} +/*-----------------------------------------------------------*/ + +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) +{ +TickType_t xNextExpireTime; + + /* Timers are listed in expiry time order, with the head of the list + referencing the task that will expire first. Obtain the time at which + the timer with the nearest expiry time will expire. If there are no + active timers then just set the next expire time to 0. That will cause + this task to unblock when the tick count overflows, at which point the + timer lists will be switched and the next expiry time can be + re-assessed. */ + *pxListWasEmpty = listLIST_IS_EMPTY( pxCurrentTimerList ); + if( *pxListWasEmpty == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + } + else + { + /* Ensure the task unblocks when the tick count rolls over. */ + xNextExpireTime = ( TickType_t ) 0U; + } + + return xNextExpireTime; +} +/*-----------------------------------------------------------*/ + +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) +{ +TickType_t xTimeNow; +PRIVILEGED_DATA static TickType_t xLastTime = ( TickType_t ) 0U; /*lint !e956 Variable is only accessible to one task. */ + + xTimeNow = xTaskGetTickCount(); + + if( xTimeNow < xLastTime ) + { + prvSwitchTimerLists(); + *pxTimerListsWereSwitched = pdTRUE; + } + else + { + *pxTimerListsWereSwitched = pdFALSE; + } + + xLastTime = xTimeNow; + + return xTimeNow; +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) +{ +BaseType_t xProcessTimerNow = pdFALSE; + + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xNextExpiryTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + + if( xNextExpiryTime <= xTimeNow ) + { + /* Has the expiry time elapsed between the command to start/reset a + timer was issued, and the time the command was processed? */ + if( ( ( TickType_t ) ( xTimeNow - xCommandTime ) ) >= pxTimer->xTimerPeriodInTicks ) /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + { + /* The time between a command being issued and the command being + processed actually exceeds the timers period. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxOverflowTimerList, &( pxTimer->xTimerListItem ) ); + } + } + else + { + if( ( xTimeNow < xCommandTime ) && ( xNextExpiryTime >= xCommandTime ) ) + { + /* If, since the command was issued, the tick count has overflowed + but the expiry time has not, then the timer must have already passed + its expiry time and should be processed immediately. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + } + + return xProcessTimerNow; +} +/*-----------------------------------------------------------*/ + +static void prvProcessReceivedCommands( void ) +{ +DaemonTaskMessage_t xMessage; +Timer_t *pxTimer; +BaseType_t xTimerListsWereSwitched, xResult; +TickType_t xTimeNow; + + while( xQueueReceive( xTimerQueue, &xMessage, tmrNO_DELAY ) != pdFAIL ) /*lint !e603 xMessage does not have to be initialised as it is passed out, not in, and it is not used unless xQueueReceive() returns pdTRUE. */ + { + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + { + /* Negative commands are pended function calls rather than timer + commands. */ + if( xMessage.xMessageID < ( BaseType_t ) 0 ) + { + const CallbackParameters_t * const pxCallback = &( xMessage.u.xCallbackParameters ); + + /* The timer uses the xCallbackParameters member to request a + callback be executed. Check the callback is not NULL. */ + configASSERT( pxCallback ); + + /* Call the function. */ + pxCallback->pxCallbackFunction( pxCallback->pvParameter1, pxCallback->ulParameter2 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* INCLUDE_xTimerPendFunctionCall */ + + /* Commands that are positive are timer commands rather than pended + function calls. */ + if( xMessage.xMessageID >= ( BaseType_t ) 0 ) + { + /* The messages uses the xTimerParameters member to work on a + software timer. */ + pxTimer = xMessage.u.xTimerParameters.pxTimer; + + if( listIS_CONTAINED_WITHIN( NULL, &( pxTimer->xTimerListItem ) ) == pdFALSE ) /*lint !e961. The cast is only redundant when NULL is passed into the macro. */ + { + /* The timer is in a list, remove it. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceTIMER_COMMAND_RECEIVED( pxTimer, xMessage.xMessageID, xMessage.u.xTimerParameters.xMessageValue ); + + /* In this case the xTimerListsWereSwitched parameter is not used, but + it must be present in the function call. prvSampleTimeNow() must be + called after the message is received from xTimerQueue so there is no + possibility of a higher priority task adding a message to the message + queue with a time that is ahead of the timer daemon task (because it + pre-empted the timer daemon task after the xTimeNow value was set). */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + + switch( xMessage.xMessageID ) + { + case tmrCOMMAND_START : + case tmrCOMMAND_START_FROM_ISR : + case tmrCOMMAND_RESET : + case tmrCOMMAND_RESET_FROM_ISR : + case tmrCOMMAND_START_DONT_TRACE : + /* Start or restart a timer. */ + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + if( prvInsertTimerInActiveList( pxTimer, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, xTimeNow, xMessage.u.xTimerParameters.xMessageValue ) != pdFALSE ) + { + /* The timer expired before it was added to the active + timer list. Process it now. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + traceTIMER_EXPIRED( pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + break; + + case tmrCOMMAND_STOP : + case tmrCOMMAND_STOP_FROM_ISR : + /* The timer has already been removed from the active list. */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + break; + + case tmrCOMMAND_CHANGE_PERIOD : + case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR : + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + pxTimer->xTimerPeriodInTicks = xMessage.u.xTimerParameters.xMessageValue; + configASSERT( ( pxTimer->xTimerPeriodInTicks > 0 ) ); + + /* The new period does not really have a reference, and can + be longer or shorter than the old one. The command time is + therefore set to the current time, and as the period cannot + be zero the next expiry time can only be in the future, + meaning (unlike for the xTimerStart() case above) there is + no fail case that needs to be handled here. */ + ( void ) prvInsertTimerInActiveList( pxTimer, ( xTimeNow + pxTimer->xTimerPeriodInTicks ), xTimeNow, xTimeNow ); + break; + + case tmrCOMMAND_DELETE : + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* The timer has already been removed from the active list, + just free up the memory if the memory was dynamically + allocated. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) + { + vPortFree( pxTimer ); + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + } + #else + { + /* If dynamic allocation is not enabled, the memory + could not have been dynamically allocated. So there is + no need to free the memory - just mark the timer as + "not active". */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + break; + + default : + /* Don't expect to get here. */ + break; + } + } + } +} +/*-----------------------------------------------------------*/ + +static void prvSwitchTimerLists( void ) +{ +TickType_t xNextExpireTime, xReloadTime; +List_t *pxTemp; +Timer_t *pxTimer; +BaseType_t xResult; + + /* The tick count has overflowed. The timer lists must be switched. + If there are any timers still referenced from the current timer list + then they must have expired and should be processed before the lists + are switched. */ + while( listLIST_IS_EMPTY( pxCurrentTimerList ) == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + + /* Remove the timer from the list. */ + pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* Execute its callback, then send a command to restart the timer if + it is an auto-reload timer. It cannot be restarted here as the lists + have not yet been switched. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* Calculate the reload value, and if the reload value results in + the timer going into the same timer list then it has already expired + and the timer should be re-inserted into the current list so it is + processed again within this loop. Otherwise a command should be sent + to restart the timer to ensure it is only inserted into a list after + the lists have been swapped. */ + xReloadTime = ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ); + if( xReloadTime > xNextExpireTime ) + { + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xReloadTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + else + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxTemp = pxCurrentTimerList; + pxCurrentTimerList = pxOverflowTimerList; + pxOverflowTimerList = pxTemp; +} +/*-----------------------------------------------------------*/ + +static void prvCheckForValidListAndQueue( void ) +{ + /* Check that the list from which active timers are referenced, and the + queue used to communicate with the timer service, have been + initialised. */ + taskENTER_CRITICAL(); + { + if( xTimerQueue == NULL ) + { + vListInitialise( &xActiveTimerList1 ); + vListInitialise( &xActiveTimerList2 ); + pxCurrentTimerList = &xActiveTimerList1; + pxOverflowTimerList = &xActiveTimerList2; + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* The timer queue is allocated statically in case + configSUPPORT_DYNAMIC_ALLOCATION is 0. */ + static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + + xTimerQueue = xQueueCreateStatic( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, ( UBaseType_t ) sizeof( DaemonTaskMessage_t ), &( ucStaticTimerQueueStorage[ 0 ] ), &xStaticTimerQueue ); + } + #else + { + xTimerQueue = xQueueCreate( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, sizeof( DaemonTaskMessage_t ) ); + } + #endif + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + if( xTimerQueue != NULL ) + { + vQueueAddToRegistry( xTimerQueue, "TmrQ" ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configQUEUE_REGISTRY_SIZE */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) +{ +BaseType_t xReturn; +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + + /* Is the timer in the list of active timers? */ + taskENTER_CRITICAL(); + { + if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} /*lint !e818 Can't be pointer to const due to the typedef. */ +/*-----------------------------------------------------------*/ + +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) +{ +Timer_t * const pxTimer = xTimer; +void *pvReturn; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pvReturn = pxTimer->pvTimerID; + } + taskEXIT_CRITICAL(); + + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) +{ +Timer_t * const pxTimer = xTimer; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pxTimer->pvTimerID = pvNewID; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + + tracePEND_FUNC_CALL_FROM_ISR( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* This function can only be called after a timer has been created or + after the scheduler has been started because, until then, the timer + queue does not exist. */ + configASSERT( xTimerQueue ); + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + + tracePEND_FUNC_CALL( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) + { + return ( ( Timer_t * ) xTimer )->uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) + { + ( ( Timer_t * ) xTimer )->uxTimerNumber = uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. If you want to include software timer +functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#endif /* configUSE_TIMERS == 1 */ + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jconfig_template.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jconfig_template.h new file mode 100644 index 000000000..291b3c601 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jconfig_template.h @@ -0,0 +1,188 @@ +/* + * jconfig.txt + * + * Copyright (C) 1991-1994, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file documents the configuration options that are required to + * customize the JPEG software for a particular system. + * + * The actual configuration options for a particular installation are stored + * in jconfig.h. On many machines, jconfig.h can be generated automatically + * or copied from one of the "canned" jconfig files that we supply. But if + * you need to generate a jconfig.h file by hand, this file tells you how. + * + * DO NOT EDIT THIS FILE --- IT WON'T ACCOMPLISH ANYTHING. + * EDIT A COPY NAMED JCONFIG.H. + */ + +/* + * these macros provide simple implementation of the system memory + * dependent portion of the JPEG memory manager. This implementation + * assumes that no backing-store files are needed: all required space + * can be obtained from malloc(). + * This is very portable in the sense that it'll compile on almost anything, + * but you'd better have lots of main memory (or virtual memory) if you want + * to process big images. + * Note that the max_memory_to_use option is ignored by this implementation. + */ +#include "ff.h" + + +/* + * These defines indicate the memory allocation methods. + */ + +#define JMALLOC /* ex: malloc */ +#define JFREE /* ex: free */ + +/* + * These symbols indicate the properties of your machine or compiler. + * #define the symbol if yes, #undef it if no. + */ + +#define NO_GETENV +#undef USE_MSDOS_MEMMGR +#undef USE_MAC_MEMMGR +#define USE_HEAP_MEM +#define MAX_ALLOC_CHUNK 0x10000 /* 64kB */ + +/* Does your compiler support function prototypes? + * (If not, you also need to use ansi2knr, see install.txt) + */ +#define HAVE_PROTOTYPES + +/* Does your compiler support the declaration "unsigned char" ? + * How about "unsigned short" ? + */ +#define HAVE_UNSIGNED_CHAR +#define HAVE_UNSIGNED_SHORT + +/* Define "void" as "char" if your compiler doesn't know about type void. + * NOTE: be sure to define void such that "void *" represents the most general + * pointer type, e.g., that returned by malloc(). + */ +/* #define void char */ + +/* Define "const" as empty if your compiler doesn't know the "const" keyword. + */ +/* #define const */ + +/* Define this if an ordinary "char" type is unsigned. + * If you're not sure, leaving it undefined will work at some cost in speed. + * If you defined HAVE_UNSIGNED_CHAR then the speed difference is minimal. + */ +#undef CHAR_IS_UNSIGNED + +/* Define this if your system has an ANSI-conforming file. + */ +#define HAVE_STDDEF_H + +/* Define this if your system has an ANSI-conforming file. + */ +#define HAVE_STDLIB_H + +/* Define this if your system does not have an ANSI/SysV , + * but does have a BSD-style . + */ +#undef NEED_BSD_STRINGS + +/* Define this if your system does not provide typedef size_t in any of the + * ANSI-standard places (stddef.h, stdlib.h, or stdio.h), but places it in + * instead. + */ +#undef NEED_SYS_TYPES_H + +/* For 80x86 machines, you need to define NEED_FAR_POINTERS, + * unless you are using a large-data memory model or 80386 flat-memory mode. + * On less brain-damaged CPUs this symbol must not be defined. + * (Defining this symbol causes large data structures to be referenced through + * "far" pointers and to be allocated with a special version of malloc.) + */ +#undef NEED_FAR_POINTERS + +/* Define this if your linker needs global names to be unique in less + * than the first 15 characters. + */ +#undef NEED_SHORT_EXTERNAL_NAMES + +/* Although a real ANSI C compiler can deal perfectly well with pointers to + * unspecified structures (see "incomplete types" in the spec), a few pre-ANSI + * and pseudo-ANSI compilers get confused. To keep one of these bozos happy, + * define INCOMPLETE_TYPES_BROKEN. This is not recommended unless you + * actually get "missing structure definition" warnings or errors while + * compiling the JPEG code. + */ +#undef INCOMPLETE_TYPES_BROKEN + +/* Define "boolean" as unsigned char, not int, on Windows systems. + */ +#ifdef _WIN32 +#ifndef __RPCNDR_H__ /* don't conflict if rpcndr.h already read */ +typedef unsigned char boolean; +#endif +#define HAVE_BOOLEAN /* prevent jmorecfg.h from redefining it */ +#endif + + +/* + * The following options affect code selection within the JPEG library, + * but they don't need to be visible to applications using the library. + * To minimize application namespace pollution, the symbols won't be + * defined unless JPEG_INTERNALS has been defined. + */ + +#ifdef JPEG_INTERNALS + +/* Define this if your compiler implements ">>" on signed values as a logical + * (unsigned) shift; leave it undefined if ">>" is a signed (arithmetic) shift, + * which is the normal and rational definition. + */ +#undef RIGHT_SHIFT_IS_UNSIGNED + + +#endif /* JPEG_INTERNALS */ + + +/* + * The remaining options do not affect the JPEG library proper, + * but only the sample applications cjpeg/djpeg (see cjpeg.c, djpeg.c). + * Other applications can ignore these. + */ + +#ifdef JPEG_CJPEG_DJPEG + +/* These defines indicate which image (non-JPEG) file formats are allowed. */ + +#define BMP_SUPPORTED /* BMP image file format */ +#define GIF_SUPPORTED /* GIF image file format */ +#define PPM_SUPPORTED /* PBMPLUS PPM/PGM image file format */ +#undef RLE_SUPPORTED /* Utah RLE image file format */ +#define TARGA_SUPPORTED /* Targa image file format */ + +/* Define this if you want to name both input and output files on the command + * line, rather than using stdout and optionally stdin. You MUST do this if + * your system can't cope with binary I/O to stdin/stdout. See comments at + * head of cjpeg.c or djpeg.c. + */ +#undef TWO_FILE_COMMANDLINE + +/* Define this if your system needs explicit cleanup of temporary files. + * This is crucial under MS-DOS, where the temporary "files" may be areas + * of extended memory; on most other systems it's not as important. + */ +#undef NEED_SIGNAL_CATCHER + +/* By default, we open image files with fopen(...,"rb") or fopen(...,"wb"). + * This is necessary on systems that distinguish text files from binary files, + * and is harmless on most systems that don't. If you have one of the rare + * systems that complains about the "b" spec, define this symbol. + */ +#undef DONT_USE_B_MODE + +/* Define this if you want percent-done progress reports from cjpeg/djpeg. + */ +#undef PROGRESS_REPORT + +#endif /* JPEG_CJPEG_DJPEG */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdata_conf_template.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdata_conf_template.h new file mode 100644 index 000000000..9a59042ae --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdata_conf_template.h @@ -0,0 +1,59 @@ +/** + ****************************************************************************** + * @file jdata_conf_template.h + * + * @author MCD Application Team + * @brief jdata_conf file template header file using FatFs API + * This file should be copied to the application "Inc" folder and modified + * as follows: + * - Rename it to 'jdata_conf.h'. + * - update the read/write functions defines (example of implementation is + * provided based on FatFs) + * + ****************************************************************************** + * + * Copyright (c) 2019 STMicroelectronics. All rights reserved. + * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** +**/ +/* Includes ------------------------------------------------------------------*/ +#include "ff.h" + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/* + Example of implementation based on FatFS + + If JFREAD/JFWRITE prototypes are complient with the standard FatFS APIs (f_read/f_write) + only APIs re-definition is needed and no need to wrapper APIs defined in "jdata_conf_template.c" file + + #define JFREAD f_read + #define JFWRITE f_write + +*/ + + +#define JFILE FIL + +#define JMALLOC malloc +#define JFREE free + +size_t read_file (JFILE *file, uint8_t *buf, uint32_t sizeofbuf); +size_t write_file (JFILE *file, uint8_t *buf, uint32_t sizeofbuf) ; + +#define JFREAD(file,buf,sizeofbuf) \ + read_file (file,buf,sizeofbuf) + +#define JFWRITE(file,buf,sizeofbuf) \ + write_file (file,buf,sizeofbuf) + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdct.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdct.h new file mode 100644 index 000000000..360dec80c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jdct.h @@ -0,0 +1,393 @@ +/* + * jdct.h + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This include file contains common declarations for the forward and + * inverse DCT modules. These declarations are private to the DCT managers + * (jcdctmgr.c, jddctmgr.c) and the individual DCT algorithms. + * The individual DCT algorithms are kept in separate files to ease + * machine-dependent tuning (e.g., assembly coding). + */ + + +/* + * A forward DCT routine is given a pointer to an input sample array and + * a pointer to a work area of type DCTELEM[]; the DCT is to be performed + * in-place in that buffer. Type DCTELEM is int for 8-bit samples, INT32 + * for 12-bit samples. (NOTE: Floating-point DCT implementations use an + * array of type FAST_FLOAT, instead.) + * The input data is to be fetched from the sample array starting at a + * specified column. (Any row offset needed will be applied to the array + * pointer before it is passed to the FDCT code.) + * Note that the number of samples fetched by the FDCT routine is + * DCT_h_scaled_size * DCT_v_scaled_size. + * The DCT outputs are returned scaled up by a factor of 8; they therefore + * have a range of +-8K for 8-bit data, +-128K for 12-bit data. This + * convention improves accuracy in integer implementations and saves some + * work in floating-point ones. + * Quantization of the output coefficients is done by jcdctmgr.c. + */ + +#if BITS_IN_JSAMPLE == 8 +typedef int DCTELEM; /* 16 or 32 bits is fine */ +#else +typedef INT32 DCTELEM; /* must have 32 bits */ +#endif + +typedef JMETHOD(void, forward_DCT_method_ptr, (DCTELEM * data, + JSAMPARRAY sample_data, + JDIMENSION start_col)); +typedef JMETHOD(void, float_DCT_method_ptr, (FAST_FLOAT * data, + JSAMPARRAY sample_data, + JDIMENSION start_col)); + + +/* + * An inverse DCT routine is given a pointer to the input JBLOCK and a pointer + * to an output sample array. The routine must dequantize the input data as + * well as perform the IDCT; for dequantization, it uses the multiplier table + * pointed to by compptr->dct_table. The output data is to be placed into the + * sample array starting at a specified column. (Any row offset needed will + * be applied to the array pointer before it is passed to the IDCT code.) + * Note that the number of samples emitted by the IDCT routine is + * DCT_h_scaled_size * DCT_v_scaled_size. + */ + +/* typedef inverse_DCT_method_ptr is declared in jpegint.h */ + +/* + * Each IDCT routine has its own ideas about the best dct_table element type. + */ + +typedef MULTIPLIER ISLOW_MULT_TYPE; /* short or int, whichever is faster */ +#if BITS_IN_JSAMPLE == 8 +typedef MULTIPLIER IFAST_MULT_TYPE; /* 16 bits is OK, use short if faster */ +#define IFAST_SCALE_BITS 2 /* fractional bits in scale factors */ +#else +typedef INT32 IFAST_MULT_TYPE; /* need 32 bits for scaled quantizers */ +#define IFAST_SCALE_BITS 13 /* fractional bits in scale factors */ +#endif +typedef FAST_FLOAT FLOAT_MULT_TYPE; /* preferred floating type */ + + +/* + * Each IDCT routine is responsible for range-limiting its results and + * converting them to unsigned form (0..MAXJSAMPLE). The raw outputs could + * be quite far out of range if the input data is corrupt, so a bulletproof + * range-limiting step is required. We use a mask-and-table-lookup method + * to do the combined operations quickly. See the comments with + * prepare_range_limit_table (in jdmaster.c) for more info. + */ + +#define IDCT_range_limit(cinfo) ((cinfo)->sample_range_limit + CENTERJSAMPLE) + +#define RANGE_MASK (MAXJSAMPLE * 4 + 3) /* 2 bits wider than legal samples */ + + +/* Short forms of external names for systems with brain-damaged linkers. */ + +#ifdef NEED_SHORT_EXTERNAL_NAMES +#define jpeg_fdct_islow jFDislow +#define jpeg_fdct_ifast jFDifast +#define jpeg_fdct_float jFDfloat +#define jpeg_fdct_7x7 jFD7x7 +#define jpeg_fdct_6x6 jFD6x6 +#define jpeg_fdct_5x5 jFD5x5 +#define jpeg_fdct_4x4 jFD4x4 +#define jpeg_fdct_3x3 jFD3x3 +#define jpeg_fdct_2x2 jFD2x2 +#define jpeg_fdct_1x1 jFD1x1 +#define jpeg_fdct_9x9 jFD9x9 +#define jpeg_fdct_10x10 jFD10x10 +#define jpeg_fdct_11x11 jFD11x11 +#define jpeg_fdct_12x12 jFD12x12 +#define jpeg_fdct_13x13 jFD13x13 +#define jpeg_fdct_14x14 jFD14x14 +#define jpeg_fdct_15x15 jFD15x15 +#define jpeg_fdct_16x16 jFD16x16 +#define jpeg_fdct_16x8 jFD16x8 +#define jpeg_fdct_14x7 jFD14x7 +#define jpeg_fdct_12x6 jFD12x6 +#define jpeg_fdct_10x5 jFD10x5 +#define jpeg_fdct_8x4 jFD8x4 +#define jpeg_fdct_6x3 jFD6x3 +#define jpeg_fdct_4x2 jFD4x2 +#define jpeg_fdct_2x1 jFD2x1 +#define jpeg_fdct_8x16 jFD8x16 +#define jpeg_fdct_7x14 jFD7x14 +#define jpeg_fdct_6x12 jFD6x12 +#define jpeg_fdct_5x10 jFD5x10 +#define jpeg_fdct_4x8 jFD4x8 +#define jpeg_fdct_3x6 jFD3x6 +#define jpeg_fdct_2x4 jFD2x4 +#define jpeg_fdct_1x2 jFD1x2 +#define jpeg_idct_islow jRDislow +#define jpeg_idct_ifast jRDifast +#define jpeg_idct_float jRDfloat +#define jpeg_idct_7x7 jRD7x7 +#define jpeg_idct_6x6 jRD6x6 +#define jpeg_idct_5x5 jRD5x5 +#define jpeg_idct_4x4 jRD4x4 +#define jpeg_idct_3x3 jRD3x3 +#define jpeg_idct_2x2 jRD2x2 +#define jpeg_idct_1x1 jRD1x1 +#define jpeg_idct_9x9 jRD9x9 +#define jpeg_idct_10x10 jRD10x10 +#define jpeg_idct_11x11 jRD11x11 +#define jpeg_idct_12x12 jRD12x12 +#define jpeg_idct_13x13 jRD13x13 +#define jpeg_idct_14x14 jRD14x14 +#define jpeg_idct_15x15 jRD15x15 +#define jpeg_idct_16x16 jRD16x16 +#define jpeg_idct_16x8 jRD16x8 +#define jpeg_idct_14x7 jRD14x7 +#define jpeg_idct_12x6 jRD12x6 +#define jpeg_idct_10x5 jRD10x5 +#define jpeg_idct_8x4 jRD8x4 +#define jpeg_idct_6x3 jRD6x3 +#define jpeg_idct_4x2 jRD4x2 +#define jpeg_idct_2x1 jRD2x1 +#define jpeg_idct_8x16 jRD8x16 +#define jpeg_idct_7x14 jRD7x14 +#define jpeg_idct_6x12 jRD6x12 +#define jpeg_idct_5x10 jRD5x10 +#define jpeg_idct_4x8 jRD4x8 +#define jpeg_idct_3x6 jRD3x8 +#define jpeg_idct_2x4 jRD2x4 +#define jpeg_idct_1x2 jRD1x2 +#endif /* NEED_SHORT_EXTERNAL_NAMES */ + +/* Extern declarations for the forward and inverse DCT routines. */ + +EXTERN(void) jpeg_fdct_islow + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_ifast + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_float + JPP((FAST_FLOAT * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_7x7 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_6x6 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_5x5 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_4x4 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_3x3 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_2x2 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_1x1 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_9x9 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_10x10 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_11x11 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_12x12 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_13x13 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_14x14 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_15x15 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_16x16 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_16x8 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_14x7 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_12x6 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_10x5 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_8x4 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_6x3 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_4x2 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_2x1 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_8x16 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_7x14 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_6x12 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_5x10 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_4x8 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_3x6 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_2x4 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); +EXTERN(void) jpeg_fdct_1x2 + JPP((DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)); + +EXTERN(void) jpeg_idct_islow + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_ifast + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_float + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_7x7 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_6x6 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_5x5 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_4x4 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_3x3 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_2x2 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_1x1 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_9x9 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_10x10 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_11x11 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_12x12 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_13x13 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_14x14 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_15x15 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_16x16 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_16x8 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_14x7 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_12x6 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_10x5 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_8x4 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_6x3 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_4x2 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_2x1 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_8x16 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_7x14 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_6x12 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_5x10 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_4x8 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_3x6 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_2x4 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); +EXTERN(void) jpeg_idct_1x2 + JPP((j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col)); + + +/* + * Macros for handling fixed-point arithmetic; these are used by many + * but not all of the DCT/IDCT modules. + * + * All values are expected to be of type INT32. + * Fractional constants are scaled left by CONST_BITS bits. + * CONST_BITS is defined within each module using these macros, + * and may differ from one module to the next. + */ + +#define ONE ((INT32) 1) +#define CONST_SCALE (ONE << CONST_BITS) + +/* Convert a positive real constant to an integer scaled by CONST_SCALE. + * Caution: some C compilers fail to reduce "FIX(constant)" at compile time, + * thus causing a lot of useless floating-point operations at run time. + */ + +#define FIX(x) ((INT32) ((x) * CONST_SCALE + 0.5)) + +/* Descale and correctly round an INT32 value that's scaled by N bits. + * We assume RIGHT_SHIFT rounds towards minus infinity, so adding + * the fudge factor is correct for either sign of X. + */ + +#define DESCALE(x,n) RIGHT_SHIFT((x) + (ONE << ((n)-1)), n) + +/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result. + * This macro is used only when the two inputs will actually be no more than + * 16 bits wide, so that a 16x16->32 bit multiply can be used instead of a + * full 32x32 multiply. This provides a useful speedup on many machines. + * Unfortunately there is no way to specify a 16x16->32 multiply portably + * in C, but some C compilers will do the right thing if you provide the + * correct combination of casts. + */ + +#ifdef SHORTxSHORT_32 /* may work if 'int' is 32 bits */ +#define MULTIPLY16C16(var,const) (((INT16) (var)) * ((INT16) (const))) +#endif +#ifdef SHORTxLCONST_32 /* known to work with Microsoft C 6.0 */ +#define MULTIPLY16C16(var,const) (((INT16) (var)) * ((INT32) (const))) +#endif + +#ifndef MULTIPLY16C16 /* default definition */ +#define MULTIPLY16C16(var,const) ((var) * (const)) +#endif + +/* Same except both inputs are variables. */ + +#ifdef SHORTxSHORT_32 /* may work if 'int' is 32 bits */ +#define MULTIPLY16V16(var1,var2) (((INT16) (var1)) * ((INT16) (var2))) +#endif + +#ifndef MULTIPLY16V16 /* default definition */ +#define MULTIPLY16V16(var1,var2) ((var1) * (var2)) +#endif diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jerror.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jerror.h new file mode 100644 index 000000000..1cfb2b19d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jerror.h @@ -0,0 +1,304 @@ +/* + * jerror.h + * + * Copyright (C) 1994-1997, Thomas G. Lane. + * Modified 1997-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file defines the error and message codes for the JPEG library. + * Edit this file to add new codes, or to translate the message strings to + * some other language. + * A set of error-reporting macros are defined too. Some applications using + * the JPEG library may wish to include this file to get the error codes + * and/or the macros. + */ + +/* + * To define the enum list of message codes, include this file without + * defining macro JMESSAGE. To create a message string table, include it + * again with a suitable JMESSAGE definition (see jerror.c for an example). + */ +#ifndef JMESSAGE +#ifndef JERROR_H +/* First time through, define the enum list */ +#define JMAKE_ENUM_LIST +#else +/* Repeated inclusions of this file are no-ops unless JMESSAGE is defined */ +#define JMESSAGE(code,string) +#endif /* JERROR_H */ +#endif /* JMESSAGE */ + +#ifdef JMAKE_ENUM_LIST + +typedef enum { + +#define JMESSAGE(code,string) code , + +#endif /* JMAKE_ENUM_LIST */ + +JMESSAGE(JMSG_NOMESSAGE, "Bogus message code %d") /* Must be first entry! */ + +/* For maintenance convenience, list is alphabetical by message code name */ +JMESSAGE(JERR_BAD_ALIGN_TYPE, "ALIGN_TYPE is wrong, please fix") +JMESSAGE(JERR_BAD_ALLOC_CHUNK, "MAX_ALLOC_CHUNK is wrong, please fix") +JMESSAGE(JERR_BAD_BUFFER_MODE, "Bogus buffer control mode") +JMESSAGE(JERR_BAD_COMPONENT_ID, "Invalid component ID %d in SOS") +JMESSAGE(JERR_BAD_CROP_SPEC, "Invalid crop request") +JMESSAGE(JERR_BAD_DCT_COEF, "DCT coefficient out of range") +JMESSAGE(JERR_BAD_DCTSIZE, "DCT scaled block size %dx%d not supported") +JMESSAGE(JERR_BAD_DROP_SAMPLING, + "Component index %d: mismatching sampling ratio %d:%d, %d:%d, %c") +JMESSAGE(JERR_BAD_HUFF_TABLE, "Bogus Huffman table definition") +JMESSAGE(JERR_BAD_IN_COLORSPACE, "Bogus input colorspace") +JMESSAGE(JERR_BAD_J_COLORSPACE, "Bogus JPEG colorspace") +JMESSAGE(JERR_BAD_LENGTH, "Bogus marker length") +JMESSAGE(JERR_BAD_LIB_VERSION, + "Wrong JPEG library version: library is %d, caller expects %d") +JMESSAGE(JERR_BAD_MCU_SIZE, "Sampling factors too large for interleaved scan") +JMESSAGE(JERR_BAD_POOL_ID, "Invalid memory pool code %d") +JMESSAGE(JERR_BAD_PRECISION, "Unsupported JPEG data precision %d") +JMESSAGE(JERR_BAD_PROGRESSION, + "Invalid progressive parameters Ss=%d Se=%d Ah=%d Al=%d") +JMESSAGE(JERR_BAD_PROG_SCRIPT, + "Invalid progressive parameters at scan script entry %d") +JMESSAGE(JERR_BAD_SAMPLING, "Bogus sampling factors") +JMESSAGE(JERR_BAD_SCAN_SCRIPT, "Invalid scan script at entry %d") +JMESSAGE(JERR_BAD_STATE, "Improper call to JPEG library in state %d") +JMESSAGE(JERR_BAD_STRUCT_SIZE, + "JPEG parameter struct mismatch: library thinks size is %u, caller expects %u") +JMESSAGE(JERR_BAD_VIRTUAL_ACCESS, "Bogus virtual array access") +JMESSAGE(JERR_BUFFER_SIZE, "Buffer passed to JPEG library is too small") +JMESSAGE(JERR_CANT_SUSPEND, "Suspension not allowed here") +JMESSAGE(JERR_CCIR601_NOTIMPL, "CCIR601 sampling not implemented yet") +JMESSAGE(JERR_COMPONENT_COUNT, "Too many color components: %d, max %d") +JMESSAGE(JERR_CONVERSION_NOTIMPL, "Unsupported color conversion request") +JMESSAGE(JERR_DAC_INDEX, "Bogus DAC index %d") +JMESSAGE(JERR_DAC_VALUE, "Bogus DAC value 0x%x") +JMESSAGE(JERR_DHT_INDEX, "Bogus DHT index %d") +JMESSAGE(JERR_DQT_INDEX, "Bogus DQT index %d") +JMESSAGE(JERR_EMPTY_IMAGE, "Empty JPEG image (DNL not supported)") +JMESSAGE(JERR_EMS_READ, "Read from EMS failed") +JMESSAGE(JERR_EMS_WRITE, "Write to EMS failed") +JMESSAGE(JERR_EOI_EXPECTED, "Didn't expect more than one scan") +JMESSAGE(JERR_FILE_READ, "Input file read error") +JMESSAGE(JERR_FILE_WRITE, "Output file write error --- out of disk space?") +JMESSAGE(JERR_FRACT_SAMPLE_NOTIMPL, "Fractional sampling not implemented yet") +JMESSAGE(JERR_HUFF_CLEN_OVERFLOW, "Huffman code size table overflow") +JMESSAGE(JERR_HUFF_MISSING_CODE, "Missing Huffman code table entry") +JMESSAGE(JERR_IMAGE_TOO_BIG, "Maximum supported image dimension is %u pixels") +JMESSAGE(JERR_INPUT_EMPTY, "Empty input file") +JMESSAGE(JERR_INPUT_EOF, "Premature end of input file") +JMESSAGE(JERR_MISMATCHED_QUANT_TABLE, + "Cannot transcode due to multiple use of quantization table %d") +JMESSAGE(JERR_MISSING_DATA, "Scan script does not transmit all data") +JMESSAGE(JERR_MODE_CHANGE, "Invalid color quantization mode change") +JMESSAGE(JERR_NOTIMPL, "Not implemented yet") +JMESSAGE(JERR_NOT_COMPILED, "Requested feature was omitted at compile time") +JMESSAGE(JERR_NO_ARITH_TABLE, "Arithmetic table 0x%02x was not defined") +JMESSAGE(JERR_NO_BACKING_STORE, "Backing store not supported") +JMESSAGE(JERR_NO_HUFF_TABLE, "Huffman table 0x%02x was not defined") +JMESSAGE(JERR_NO_IMAGE, "JPEG datastream contains no image") +JMESSAGE(JERR_NO_QUANT_TABLE, "Quantization table 0x%02x was not defined") +JMESSAGE(JERR_NO_SOI, "Not a JPEG file: starts with 0x%02x 0x%02x") +JMESSAGE(JERR_OUT_OF_MEMORY, "Insufficient memory (case %d)") +JMESSAGE(JERR_QUANT_COMPONENTS, + "Cannot quantize more than %d color components") +JMESSAGE(JERR_QUANT_FEW_COLORS, "Cannot quantize to fewer than %d colors") +JMESSAGE(JERR_QUANT_MANY_COLORS, "Cannot quantize to more than %d colors") +JMESSAGE(JERR_SOF_DUPLICATE, "Invalid JPEG file structure: two SOF markers") +JMESSAGE(JERR_SOF_NO_SOS, "Invalid JPEG file structure: missing SOS marker") +JMESSAGE(JERR_SOF_UNSUPPORTED, "Unsupported JPEG process: SOF type 0x%02x") +JMESSAGE(JERR_SOI_DUPLICATE, "Invalid JPEG file structure: two SOI markers") +JMESSAGE(JERR_SOS_NO_SOF, "Invalid JPEG file structure: SOS before SOF") +JMESSAGE(JERR_TFILE_CREATE, "Failed to create temporary file %s") +JMESSAGE(JERR_TFILE_READ, "Read failed on temporary file") +JMESSAGE(JERR_TFILE_SEEK, "Seek failed on temporary file") +JMESSAGE(JERR_TFILE_WRITE, + "Write failed on temporary file --- out of disk space?") +JMESSAGE(JERR_TOO_LITTLE_DATA, "Application transferred too few scanlines") +JMESSAGE(JERR_UNKNOWN_MARKER, "Unsupported marker type 0x%02x") +JMESSAGE(JERR_VIRTUAL_BUG, "Virtual array controller messed up") +JMESSAGE(JERR_WIDTH_OVERFLOW, "Image too wide for this implementation") +JMESSAGE(JERR_XMS_READ, "Read from XMS failed") +JMESSAGE(JERR_XMS_WRITE, "Write to XMS failed") +JMESSAGE(JMSG_COPYRIGHT, JCOPYRIGHT) +JMESSAGE(JMSG_VERSION, JVERSION) +JMESSAGE(JTRC_16BIT_TABLES, + "Caution: quantization tables are too coarse for baseline JPEG") +JMESSAGE(JTRC_ADOBE, + "Adobe APP14 marker: version %d, flags 0x%04x 0x%04x, transform %d") +JMESSAGE(JTRC_APP0, "Unknown APP0 marker (not JFIF), length %u") +JMESSAGE(JTRC_APP14, "Unknown APP14 marker (not Adobe), length %u") +JMESSAGE(JTRC_DAC, "Define Arithmetic Table 0x%02x: 0x%02x") +JMESSAGE(JTRC_DHT, "Define Huffman Table 0x%02x") +JMESSAGE(JTRC_DQT, "Define Quantization Table %d precision %d") +JMESSAGE(JTRC_DRI, "Define Restart Interval %u") +JMESSAGE(JTRC_EMS_CLOSE, "Freed EMS handle %u") +JMESSAGE(JTRC_EMS_OPEN, "Obtained EMS handle %u") +JMESSAGE(JTRC_EOI, "End Of Image") +JMESSAGE(JTRC_HUFFBITS, " %3d %3d %3d %3d %3d %3d %3d %3d") +JMESSAGE(JTRC_JFIF, "JFIF APP0 marker: version %d.%02d, density %dx%d %d") +JMESSAGE(JTRC_JFIF_BADTHUMBNAILSIZE, + "Warning: thumbnail image size does not match data length %u") +JMESSAGE(JTRC_JFIF_EXTENSION, + "JFIF extension marker: type 0x%02x, length %u") +JMESSAGE(JTRC_JFIF_THUMBNAIL, " with %d x %d thumbnail image") +JMESSAGE(JTRC_MISC_MARKER, "Miscellaneous marker 0x%02x, length %u") +JMESSAGE(JTRC_PARMLESS_MARKER, "Unexpected marker 0x%02x") +JMESSAGE(JTRC_QUANTVALS, " %4u %4u %4u %4u %4u %4u %4u %4u") +JMESSAGE(JTRC_QUANT_3_NCOLORS, "Quantizing to %d = %d*%d*%d colors") +JMESSAGE(JTRC_QUANT_NCOLORS, "Quantizing to %d colors") +JMESSAGE(JTRC_QUANT_SELECTED, "Selected %d colors for quantization") +JMESSAGE(JTRC_RECOVERY_ACTION, "At marker 0x%02x, recovery action %d") +JMESSAGE(JTRC_RST, "RST%d") +JMESSAGE(JTRC_SMOOTH_NOTIMPL, + "Smoothing not supported with nonstandard sampling ratios") +JMESSAGE(JTRC_SOF, "Start Of Frame 0x%02x: width=%u, height=%u, components=%d") +JMESSAGE(JTRC_SOF_COMPONENT, " Component %d: %dhx%dv q=%d") +JMESSAGE(JTRC_SOI, "Start of Image") +JMESSAGE(JTRC_SOS, "Start Of Scan: %d components") +JMESSAGE(JTRC_SOS_COMPONENT, " Component %d: dc=%d ac=%d") +JMESSAGE(JTRC_SOS_PARAMS, " Ss=%d, Se=%d, Ah=%d, Al=%d") +JMESSAGE(JTRC_TFILE_CLOSE, "Closed temporary file %s") +JMESSAGE(JTRC_TFILE_OPEN, "Opened temporary file %s") +JMESSAGE(JTRC_THUMB_JPEG, + "JFIF extension marker: JPEG-compressed thumbnail image, length %u") +JMESSAGE(JTRC_THUMB_PALETTE, + "JFIF extension marker: palette thumbnail image, length %u") +JMESSAGE(JTRC_THUMB_RGB, + "JFIF extension marker: RGB thumbnail image, length %u") +JMESSAGE(JTRC_UNKNOWN_IDS, + "Unrecognized component IDs %d %d %d, assuming YCbCr") +JMESSAGE(JTRC_XMS_CLOSE, "Freed XMS handle %u") +JMESSAGE(JTRC_XMS_OPEN, "Obtained XMS handle %u") +JMESSAGE(JWRN_ADOBE_XFORM, "Unknown Adobe color transform code %d") +JMESSAGE(JWRN_ARITH_BAD_CODE, "Corrupt JPEG data: bad arithmetic code") +JMESSAGE(JWRN_BOGUS_PROGRESSION, + "Inconsistent progression sequence for component %d coefficient %d") +JMESSAGE(JWRN_EXTRANEOUS_DATA, + "Corrupt JPEG data: %u extraneous bytes before marker 0x%02x") +JMESSAGE(JWRN_HIT_MARKER, "Corrupt JPEG data: premature end of data segment") +JMESSAGE(JWRN_HUFF_BAD_CODE, "Corrupt JPEG data: bad Huffman code") +JMESSAGE(JWRN_JFIF_MAJOR, "Warning: unknown JFIF revision number %d.%02d") +JMESSAGE(JWRN_JPEG_EOF, "Premature end of JPEG file") +JMESSAGE(JWRN_MUST_RESYNC, + "Corrupt JPEG data: found marker 0x%02x instead of RST%d") +JMESSAGE(JWRN_NOT_SEQUENTIAL, "Invalid SOS parameters for sequential JPEG") +JMESSAGE(JWRN_TOO_MUCH_DATA, "Application transferred too many scanlines") + +#ifdef JMAKE_ENUM_LIST + + JMSG_LASTMSGCODE +} J_MESSAGE_CODE; + +#undef JMAKE_ENUM_LIST +#endif /* JMAKE_ENUM_LIST */ + +/* Zap JMESSAGE macro so that future re-inclusions do nothing by default */ +#undef JMESSAGE + + +#ifndef JERROR_H +#define JERROR_H + +/* Macros to simplify using the error and trace message stuff */ +/* The first parameter is either type of cinfo pointer */ + +/* Fatal errors (print message and exit) */ +#define ERREXIT(cinfo,code) \ + ((cinfo)->err->msg_code = (code), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXIT1(cinfo,code,p1) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXIT2(cinfo,code,p1,p2) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXIT3(cinfo,code,p1,p2,p3) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (cinfo)->err->msg_parm.i[2] = (p3), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXIT4(cinfo,code,p1,p2,p3,p4) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (cinfo)->err->msg_parm.i[2] = (p3), \ + (cinfo)->err->msg_parm.i[3] = (p4), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXIT6(cinfo,code,p1,p2,p3,p4,p5,p6) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (cinfo)->err->msg_parm.i[2] = (p3), \ + (cinfo)->err->msg_parm.i[3] = (p4), \ + (cinfo)->err->msg_parm.i[4] = (p5), \ + (cinfo)->err->msg_parm.i[5] = (p6), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) +#define ERREXITS(cinfo,code,str) \ + ((cinfo)->err->msg_code = (code), \ + strncpy((cinfo)->err->msg_parm.s, (str), JMSG_STR_PARM_MAX), \ + (*(cinfo)->err->error_exit) ((j_common_ptr) (cinfo))) + +#define MAKESTMT(stuff) do { stuff } while (0) + +/* Nonfatal errors (we can keep going, but the data is probably corrupt) */ +#define WARNMS(cinfo,code) \ + ((cinfo)->err->msg_code = (code), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), -1)) +#define WARNMS1(cinfo,code,p1) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), -1)) +#define WARNMS2(cinfo,code,p1,p2) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), -1)) + +/* Informational/debugging messages */ +#define TRACEMS(cinfo,lvl,code) \ + ((cinfo)->err->msg_code = (code), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl))) +#define TRACEMS1(cinfo,lvl,code,p1) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl))) +#define TRACEMS2(cinfo,lvl,code,p1,p2) \ + ((cinfo)->err->msg_code = (code), \ + (cinfo)->err->msg_parm.i[0] = (p1), \ + (cinfo)->err->msg_parm.i[1] = (p2), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl))) +#define TRACEMS3(cinfo,lvl,code,p1,p2,p3) \ + MAKESTMT(int * _mp = (cinfo)->err->msg_parm.i; \ + _mp[0] = (p1); _mp[1] = (p2); _mp[2] = (p3); \ + (cinfo)->err->msg_code = (code); \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl)); ) +#define TRACEMS4(cinfo,lvl,code,p1,p2,p3,p4) \ + MAKESTMT(int * _mp = (cinfo)->err->msg_parm.i; \ + _mp[0] = (p1); _mp[1] = (p2); _mp[2] = (p3); _mp[3] = (p4); \ + (cinfo)->err->msg_code = (code); \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl)); ) +#define TRACEMS5(cinfo,lvl,code,p1,p2,p3,p4,p5) \ + MAKESTMT(int * _mp = (cinfo)->err->msg_parm.i; \ + _mp[0] = (p1); _mp[1] = (p2); _mp[2] = (p3); _mp[3] = (p4); \ + _mp[4] = (p5); \ + (cinfo)->err->msg_code = (code); \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl)); ) +#define TRACEMS8(cinfo,lvl,code,p1,p2,p3,p4,p5,p6,p7,p8) \ + MAKESTMT(int * _mp = (cinfo)->err->msg_parm.i; \ + _mp[0] = (p1); _mp[1] = (p2); _mp[2] = (p3); _mp[3] = (p4); \ + _mp[4] = (p5); _mp[5] = (p6); _mp[6] = (p7); _mp[7] = (p8); \ + (cinfo)->err->msg_code = (code); \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl)); ) +#define TRACEMSS(cinfo,lvl,code,str) \ + ((cinfo)->err->msg_code = (code), \ + strncpy((cinfo)->err->msg_parm.s, (str), JMSG_STR_PARM_MAX), \ + (*(cinfo)->err->emit_message) ((j_common_ptr) (cinfo), (lvl))) + +#endif /* JERROR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jinclude.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jinclude.h new file mode 100644 index 000000000..15159807b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jinclude.h @@ -0,0 +1,82 @@ +/* + * jinclude.h + * + * Copyright (C) 1991-1994, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file exists to provide a single place to fix any problems with + * including the wrong system include files. (Common problems are taken + * care of by the standard jconfig symbols, but on really weird systems + * you may have to edit this file.) + * + * NOTE: this file is NOT intended to be included by applications using the + * JPEG library. Most applications need only include jpeglib.h. + */ + + +/* Include auto-config file to find out which system include files we need. */ + +#include "jconfig.h" /* auto configuration options */ +#define JCONFIG_INCLUDED /* so that jpeglib.h doesn't do it again */ + + + +/* + * We need the NULL macro and size_t typedef. + * On an ANSI-conforming system it is sufficient to include . + * Otherwise, we get them from or ; we may have to + * pull in as well. + * Note that the core JPEG library does not require ; + * only the default error handler and data source/destination modules do. + * But we must pull it in because of the references to JFILE in jpeglib.h. + * You can remove those references if you want to compile without . + */ + +#ifdef HAVE_STDDEF_H +#include +#endif + +#ifdef HAVE_STDLIB_H +#include +#endif + +#ifdef NEED_SYS_TYPES_H +#include +#endif + +#include + +/* + * We need memory copying and zeroing functions, plus strncpy(). + * ANSI and System V implementations declare these in . + * BSD doesn't have the mem() functions, but it does have bcopy()/bzero(). + * Some systems may declare memset and memcpy in . + * + * NOTE: we assume the size parameters to these functions are of type size_t. + * Change the casts in these macros if not! + */ + +#ifdef NEED_BSD_STRINGS + +#include +#define MEMZERO(target,size) bzero((void *)(target), (size_t)(size)) +#define MEMCOPY(dest,src,size) bcopy((const void *)(src), (void *)(dest), (size_t)(size)) + +#else /* not BSD, assume ANSI/SysV string lib */ + +#include +#define MEMZERO(target,size) memset((void *)(target), 0, (size_t)(size)) +#define MEMCOPY(dest,src,size) memcpy((void *)(dest), (const void *)(src), (size_t)(size)) + +#endif + +/* + * In ANSI C, and indeed any rational implementation, size_t is also the + * type returned by sizeof(). However, it seems there are some irrational + * implementations out there, in which sizeof() returns an int even though + * size_t is defined as long or unsigned long. To ensure consistent results + * we always use this SIZEOF() macro in place of using sizeof() directly. + */ + +#define SIZEOF(object) ((size_t) sizeof(object)) diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmemsys.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmemsys.h new file mode 100644 index 000000000..10298efd7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmemsys.h @@ -0,0 +1,201 @@ +/* + * jmemsys.h + * + * Copyright (C) 1992-1997, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This include file defines the interface between the system-independent + * and system-dependent portions of the JPEG memory manager. No other + * modules need include it. (The system-independent portion is jmemmgr.c; + * there are several different versions of the system-dependent portion.) + * + * This file works as-is for the system-dependent memory managers supplied + * in the IJG distribution. You may need to modify it if you write a + * custom memory manager. If system-dependent changes are needed in + * this file, the best method is to #ifdef them based on a configuration + * symbol supplied in jconfig.h, as we have done with USE_MSDOS_MEMMGR + * and USE_MAC_MEMMGR. + */ + + +/* Short forms of external names for systems with brain-damaged linkers. */ + +#ifdef NEED_SHORT_EXTERNAL_NAMES +#define jpeg_get_small jGetSmall +#define jpeg_free_small jFreeSmall +#define jpeg_get_large jGetLarge +#define jpeg_free_large jFreeLarge +#define jpeg_mem_available jMemAvail +#define jpeg_open_backing_store jOpenBackStore +#define jpeg_mem_init jMemInit +#define jpeg_mem_term jMemTerm +#endif /* NEED_SHORT_EXTERNAL_NAMES */ + + +/* + * These two functions are used to allocate and release small chunks of + * memory. (Typically the total amount requested through jpeg_get_small is + * no more than 20K or so; this will be requested in chunks of a few K each.) + * Behavior should be the same as for the standard library functions malloc + * and free; in particular, jpeg_get_small must return NULL on failure. + * On most systems, these ARE malloc and free. jpeg_free_small is passed the + * size of the object being freed, just in case it's needed. + * On an 80x86 machine using small-data memory model, these manage near heap. + */ + +EXTERN(void *) jpeg_get_small JPP((j_common_ptr cinfo, size_t sizeofobject)); +EXTERN(void) jpeg_free_small JPP((j_common_ptr cinfo, void * object, + size_t sizeofobject)); + +/* + * These two functions are used to allocate and release large chunks of + * memory (up to the total free space designated by jpeg_mem_available). + * The interface is the same as above, except that on an 80x86 machine, + * far pointers are used. On most other machines these are identical to + * the jpeg_get/free_small routines; but we keep them separate anyway, + * in case a different allocation strategy is desirable for large chunks. + */ + +EXTERN(void FAR *) jpeg_get_large JPP((j_common_ptr cinfo, + size_t sizeofobject)); +EXTERN(void) jpeg_free_large JPP((j_common_ptr cinfo, void FAR * object, + size_t sizeofobject)); + +/* + * The macro MAX_ALLOC_CHUNK designates the maximum number of bytes that may + * be requested in a single call to jpeg_get_large (and jpeg_get_small for that + * matter, but that case should never come into play). This macro is needed + * to model the 64Kb-segment-size limit of far addressing on 80x86 machines. + * On those machines, we expect that jconfig.h will provide a proper value. + * On machines with 32-bit flat address spaces, any large constant may be used. + * + * NB: jmemmgr.c expects that MAX_ALLOC_CHUNK will be representable as type + * size_t and will be a multiple of sizeof(align_type). + */ + +#ifndef MAX_ALLOC_CHUNK /* may be overridden in jconfig.h */ +#define MAX_ALLOC_CHUNK 1000000000L +#endif + +/* + * This routine computes the total space still available for allocation by + * jpeg_get_large. If more space than this is needed, backing store will be + * used. NOTE: any memory already allocated must not be counted. + * + * There is a minimum space requirement, corresponding to the minimum + * feasible buffer sizes; jmemmgr.c will request that much space even if + * jpeg_mem_available returns zero. The maximum space needed, enough to hold + * all working storage in memory, is also passed in case it is useful. + * Finally, the total space already allocated is passed. If no better + * method is available, cinfo->mem->max_memory_to_use - already_allocated + * is often a suitable calculation. + * + * It is OK for jpeg_mem_available to underestimate the space available + * (that'll just lead to more backing-store access than is really necessary). + * However, an overestimate will lead to failure. Hence it's wise to subtract + * a slop factor from the true available space. 5% should be enough. + * + * On machines with lots of virtual memory, any large constant may be returned. + * Conversely, zero may be returned to always use the minimum amount of memory. + */ + +EXTERN(long) jpeg_mem_available JPP((j_common_ptr cinfo, + long min_bytes_needed, + long max_bytes_needed, + long already_allocated)); + + +/* + * This structure holds whatever state is needed to access a single + * backing-store object. The read/write/close method pointers are called + * by jmemmgr.c to manipulate the backing-store object; all other fields + * are private to the system-dependent backing store routines. + */ + +#define TEMP_NAME_LENGTH 64 /* max length of a temporary file's name */ + + +#ifdef USE_MSDOS_MEMMGR /* DOS-specific junk */ + +typedef unsigned short XMSH; /* type of extended-memory handles */ +typedef unsigned short EMSH; /* type of expanded-memory handles */ + +typedef union { + short file_handle; /* DOS file handle if it's a temp file */ + XMSH xms_handle; /* handle if it's a chunk of XMS */ + EMSH ems_handle; /* handle if it's a chunk of EMS */ +} handle_union; + +#endif /* USE_MSDOS_MEMMGR */ + +#ifdef USE_MAC_MEMMGR /* Mac-specific junk */ +#include +#endif /* USE_MAC_MEMMGR */ + + +typedef struct backing_store_struct * backing_store_ptr; + +typedef struct backing_store_struct { + /* Methods for reading/writing/closing this backing-store object */ + JMETHOD(void, read_backing_store, (j_common_ptr cinfo, + backing_store_ptr info, + void FAR * buffer_address, + long file_offset, long byte_count)); + JMETHOD(void, write_backing_store, (j_common_ptr cinfo, + backing_store_ptr info, + void FAR * buffer_address, + long file_offset, long byte_count)); + JMETHOD(void, close_backing_store, (j_common_ptr cinfo, + backing_store_ptr info)); + + /* Private fields for system-dependent backing-store management */ +#ifdef USE_MSDOS_MEMMGR + /* For the MS-DOS manager (jmemdos.c), we need: */ + handle_union handle; /* reference to backing-store storage object */ + char temp_name[TEMP_NAME_LENGTH]; /* name if it's a file */ +#else +#ifdef USE_MAC_MEMMGR + /* For the Mac manager (jmemmac.c), we need: */ + short temp_file; /* file reference number to temp file */ + FSSpec tempSpec; /* the FSSpec for the temp file */ + char temp_name[TEMP_NAME_LENGTH]; /* name if it's a file */ +#else +#ifdef USE_HEAP_MEM +#else + /* For a typical implementation with temp files, we need: */ + JFILE * temp_file; /* stdio reference to temp file */ + char temp_name[TEMP_NAME_LENGTH]; /* name of temp file */ +#endif +#endif +#endif +} backing_store_info; + + +/* + * Initial opening of a backing-store object. This must fill in the + * read/write/close pointers in the object. The read/write routines + * may take an error exit if the specified maximum file size is exceeded. + * (If jpeg_mem_available always returns a large value, this routine can + * just take an error exit.) + */ + +EXTERN(void) jpeg_open_backing_store JPP((j_common_ptr cinfo, + backing_store_ptr info, + long total_bytes_needed)); + + +/* + * These routines take care of any system-dependent initialization and + * cleanup required. jpeg_mem_init will be called before anything is + * allocated (and, therefore, nothing in cinfo is of use except the error + * manager pointer). It should return a suitable default value for + * max_memory_to_use; this may subsequently be overridden by the surrounding + * application. (Note that max_memory_to_use is only important if + * jpeg_mem_available chooses to consult it ... no one else will.) + * jpeg_mem_term may assume that all requested memory has been freed and that + * all opened backing-store objects have been closed. + */ + +EXTERN(long) jpeg_mem_init JPP((j_common_ptr cinfo)); +EXTERN(void) jpeg_mem_term JPP((j_common_ptr cinfo)); diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmorecfg_template.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmorecfg_template.h new file mode 100644 index 000000000..f390b2c18 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jmorecfg_template.h @@ -0,0 +1,380 @@ +/* + * jmorecfg.h + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains additional configuration options that customize the + * JPEG software for special applications or support machine-dependent + * optimizations. Most users will not need to touch this file. + */ + + +/* + * Define BITS_IN_JSAMPLE as either + * 8 for 8-bit sample values (the usual setting) + * 12 for 12-bit sample values + * Only 8 and 12 are legal data precisions for lossy JPEG according to the + * JPEG standard, and the IJG code does not support anything else! + * We do not support run-time selection of data precision, sorry. + */ + +#define BITS_IN_JSAMPLE 8 /* use 8 or 12 */ + + +/* + * Maximum number of components (color channels) allowed in JPEG image. + * To meet the letter of the JPEG spec, set this to 255. However, darn + * few applications need more than 4 channels (maybe 5 for CMYK + alpha + * mask). We recommend 10 as a reasonable compromise; use 4 if you are + * really short on memory. (Each allowed component costs a hundred or so + * bytes of storage, whether actually used in an image or not.) + */ + +#define MAX_COMPONENTS 10 /* maximum number of image components */ + + +/* + * Basic data types. + * You may need to change these if you have a machine with unusual data + * type sizes; for example, "char" not 8 bits, "short" not 16 bits, + * or "long" not 32 bits. We don't care whether "int" is 16 or 32 bits, + * but it had better be at least 16. + */ + +/* Representation of a single sample (pixel element value). + * We frequently allocate large arrays of these, so it's important to keep + * them small. But if you have memory to burn and access to char or short + * arrays is very slow on your hardware, you might want to change these. + */ + +#if BITS_IN_JSAMPLE == 8 +/* JSAMPLE should be the smallest type that will hold the values 0..255. + * You can use a signed char by having GETJSAMPLE mask it with 0xFF. + */ + +#ifdef HAVE_UNSIGNED_CHAR + +typedef unsigned char JSAMPLE; +#define GETJSAMPLE(value) ((int) (value)) + +#else /* not HAVE_UNSIGNED_CHAR */ + +typedef char JSAMPLE; +#ifdef CHAR_IS_UNSIGNED +#define GETJSAMPLE(value) ((int) (value)) +#else +#define GETJSAMPLE(value) ((int) (value) & 0xFF) +#endif /* CHAR_IS_UNSIGNED */ + +#endif /* HAVE_UNSIGNED_CHAR */ + +#define MAXJSAMPLE 255 +#define CENTERJSAMPLE 128 + +#endif /* BITS_IN_JSAMPLE == 8 */ + + +#if BITS_IN_JSAMPLE == 12 +/* JSAMPLE should be the smallest type that will hold the values 0..4095. + * On nearly all machines "short" will do nicely. + */ + +typedef short JSAMPLE; +#define GETJSAMPLE(value) ((int) (value)) + +#define MAXJSAMPLE 4095 +#define CENTERJSAMPLE 2048 + +#endif /* BITS_IN_JSAMPLE == 12 */ + + +/* Representation of a DCT frequency coefficient. + * This should be a signed value of at least 16 bits; "short" is usually OK. + * Again, we allocate large arrays of these, but you can change to int + * if you have memory to burn and "short" is really slow. + */ + +typedef short JCOEF; + + +/* Compressed datastreams are represented as arrays of JOCTET. + * These must be EXACTLY 8 bits wide, at least once they are written to + * external storage. Note that when using the stdio data source/destination + * managers, this is also the data type passed to fread/fwrite. + */ + +#ifdef HAVE_UNSIGNED_CHAR + +typedef unsigned char JOCTET; +#define GETJOCTET(value) (value) + +#else /* not HAVE_UNSIGNED_CHAR */ + +typedef char JOCTET; +#ifdef CHAR_IS_UNSIGNED +#define GETJOCTET(value) (value) +#else +#define GETJOCTET(value) ((value) & 0xFF) +#endif /* CHAR_IS_UNSIGNED */ + +#endif /* HAVE_UNSIGNED_CHAR */ + + +/* These typedefs are used for various table entries and so forth. + * They must be at least as wide as specified; but making them too big + * won't cost a huge amount of memory, so we don't provide special + * extraction code like we did for JSAMPLE. (In other words, these + * typedefs live at a different point on the speed/space tradeoff curve.) + */ + +/* UINT8 must hold at least the values 0..255. */ + +#ifdef HAVE_UNSIGNED_CHAR +typedef unsigned char UINT8; +#else /* not HAVE_UNSIGNED_CHAR */ +#ifdef CHAR_IS_UNSIGNED +typedef char UINT8; +#else /* not CHAR_IS_UNSIGNED */ +typedef short UINT8; +#endif /* CHAR_IS_UNSIGNED */ +#endif /* HAVE_UNSIGNED_CHAR */ + +/* UINT16 must hold at least the values 0..65535. */ + +#ifdef HAVE_UNSIGNED_SHORT +typedef unsigned short UINT16; +#else /* not HAVE_UNSIGNED_SHORT */ +typedef unsigned int UINT16; +#endif /* HAVE_UNSIGNED_SHORT */ + +/* INT16 must hold at least the values -32768..32767. */ + +#ifndef XMD_H /* X11/xmd.h correctly defines INT16 */ +typedef short INT16; +#endif + +/* INT32 must hold at least signed 32-bit values. */ + +#ifndef XMD_H /* X11/xmd.h correctly defines INT32 */ +#ifndef _BASETSD_H_ /* Microsoft defines it in basetsd.h */ +#ifndef _BASETSD_H /* MinGW is slightly different */ +#ifndef QGLOBAL_H /* Qt defines it in qglobal.h */ +typedef long INT32; +#endif +#endif +#endif +#endif + +/* Datatype used for image dimensions. The JPEG standard only supports + * images up to 64K*64K due to 16-bit fields in SOF markers. Therefore + * "unsigned int" is sufficient on all machines. However, if you need to + * handle larger images and you don't mind deviating from the spec, you + * can change this datatype. + */ + +typedef unsigned int JDIMENSION; + +#define JPEG_MAX_DIMENSION 65500L /* a tad under 64K to prevent overflows */ + + +/* These macros are used in all function definitions and extern declarations. + * You could modify them if you need to change function linkage conventions; + * in particular, you'll need to do that to make the library a Windows DLL. + * Another application is to make all functions global for use with debuggers + * or code profilers that require it. + */ + +/* a function called through method pointers: */ +#define METHODDEF(type) static type +/* a function used only in its module: */ +#define LOCAL(type) static type +/* a function referenced thru EXTERNs: */ +#define GLOBAL(type) type +/* a reference to a GLOBAL function: */ +#define EXTERN(type) extern type + + +/* This macro is used to declare a "method", that is, a function pointer. + * We want to supply prototype parameters if the compiler can cope. + * Note that the arglist parameter must be parenthesized! + * Again, you can customize this if you need special linkage keywords. + */ + +#ifdef HAVE_PROTOTYPES +#define JMETHOD(type,methodname,arglist) type (*methodname) arglist +#else +#define JMETHOD(type,methodname,arglist) type (*methodname) () +#endif + + +/* Here is the pseudo-keyword for declaring pointers that must be "far" + * on 80x86 machines. Most of the specialized coding for 80x86 is handled + * by just saying "FAR *" where such a pointer is needed. In a few places + * explicit coding is needed; see uses of the NEED_FAR_POINTERS symbol. + */ + +#ifndef FAR +#ifdef NEED_FAR_POINTERS +#define FAR far +#else +#define FAR +#endif +#endif + + +/* + * On a few systems, type boolean and/or its values FALSE, TRUE may appear + * in standard header files. Or you may have conflicts with application- + * specific header files that you want to include together with these files. + * Defining HAVE_BOOLEAN before including jpeglib.h should make it work. + */ + +#ifndef HAVE_BOOLEAN +typedef int boolean; +#endif +#ifndef FALSE /* in case these macros already exist */ +#define FALSE 0 /* values of boolean */ +#endif +#ifndef TRUE +#define TRUE 1 +#endif + + +/* + * The remaining options affect code selection within the JPEG library, + * but they don't need to be visible to most applications using the library. + * To minimize application namespace pollution, the symbols won't be + * defined unless JPEG_INTERNALS or JPEG_INTERNAL_OPTIONS has been defined. + */ + +#ifdef JPEG_INTERNALS +#define JPEG_INTERNAL_OPTIONS +#endif + +#ifdef JPEG_INTERNAL_OPTIONS + + +/* + * These defines indicate whether to include various optional functions. + * Undefining some of these symbols will produce a smaller but less capable + * library. Note that you can leave certain source files out of the + * compilation/linking process if you've #undef'd the corresponding symbols. + * (You may HAVE to do that if your compiler doesn't like null source files.) + */ + +/* Capability options common to encoder and decoder: */ + +#define DCT_ISLOW_SUPPORTED /* slow but accurate integer algorithm */ +#define DCT_IFAST_SUPPORTED /* faster, less accurate integer method */ +#define DCT_FLOAT_SUPPORTED /* floating-point: accurate, fast on fast HW */ + +/* Encoder capability options: */ + +#define C_ARITH_CODING_SUPPORTED /* Arithmetic coding back end? */ +#undef C_MULTISCAN_FILES_SUPPORTED /* Multiple-scan JPEG files? */ +#undef C_PROGRESSIVE_SUPPORTED /* Progressive JPEG? (Requires MULTISCAN)*/ +#undef DCT_SCALING_SUPPORTED /* Input rescaling via DCT? (Requires DCT_ISLOW)*/ +#undef ENTROPY_OPT_SUPPORTED /* Optimization of entropy coding parms? */ +/* Note: if you selected 12-bit data precision, it is dangerous to turn off + * ENTROPY_OPT_SUPPORTED. The standard Huffman tables are only good for 8-bit + * precision, so jchuff.c normally uses entropy optimization to compute + * usable tables for higher precision. If you don't want to do optimization, + * you'll have to supply different default Huffman tables. + * The exact same statements apply for progressive JPEG: the default tables + * don't work for progressive mode. (This may get fixed, however.) + */ +#define INPUT_SMOOTHING_SUPPORTED /* Input image smoothing option? */ + +/* Decoder capability options: */ + +#define D_ARITH_CODING_SUPPORTED /* Arithmetic coding back end? */ +#undef D_MULTISCAN_FILES_SUPPORTED /* Multiple-scan JPEG files? */ +#undef D_PROGRESSIVE_SUPPORTED /* Progressive JPEG? (Requires MULTISCAN)*/ +#undef IDCT_SCALING_SUPPORTED /* Output rescaling via IDCT? */ +#undef SAVE_MARKERS_SUPPORTED /* jpeg_save_markers() needed? */ +#undef BLOCK_SMOOTHING_SUPPORTED /* Block smoothing? (Progressive only) */ +#undef UPSAMPLE_SCALING_SUPPORTED /* Output rescaling at upsample stage? */ +#define UPSAMPLE_MERGING_SUPPORTED /* Fast path for sloppy upsampling? */ +#define QUANT_1PASS_SUPPORTED /* 1-pass color quantization? */ +#define QUANT_2PASS_SUPPORTED /* 2-pass color quantization? */ + +/* more capability options later, no doubt */ + + +/* + * Ordering of RGB data in scanlines passed to or from the application. + * If your application wants to deal with data in the order B,G,R, just + * change these macros. You can also deal with formats such as R,G,B,X + * (one extra byte per pixel) by changing RGB_PIXELSIZE. Note that changing + * the offsets will also change the order in which colormap data is organized. + * RESTRICTIONS: + * 1. The sample applications cjpeg,djpeg do NOT support modified RGB formats. + * 2. The color quantizer modules will not behave desirably if RGB_PIXELSIZE + * is not 3 (they don't understand about dummy color components!). So you + * can't use color quantization if you change that value. + */ + +#define RGB_RED 0 /* Offset of Red in an RGB scanline element */ +#define RGB_GREEN 1 /* Offset of Green */ +#define RGB_BLUE 2 /* Offset of Blue */ +#define RGB_PIXELSIZE 3 /* JSAMPLEs per RGB scanline element */ + + +/* Definitions for speed-related optimizations. */ + + +/* If your compiler supports inline functions, define INLINE + * as the inline keyword; otherwise define it as empty. + */ + +#ifndef INLINE +#if defined ( __CC_ARM ) + #define INLINE __inline /*!< inline keyword for ARM Compiler */ + +#elif defined ( __ICCARM__ ) + #define INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */ + +#elif defined ( __GNUC__ ) + #define INLINE inline /*!< inline keyword for GNU Compiler */ + +#elif defined ( __TASKING__ ) + #define INLINE inline /*!< inline keyword for TASKING Compiler */ +#endif +#ifndef INLINE +#define INLINE /* default is to define it as empty */ +#endif +#endif + + + + +/* On some machines (notably 68000 series) "int" is 32 bits, but multiplying + * two 16-bit shorts is faster than multiplying two ints. Define MULTIPLIER + * as short on such a machine. MULTIPLIER must be at least 16 bits wide. + */ + +#ifndef MULTIPLIER +#define MULTIPLIER int /* type for fastest integer multiply */ +#endif + + +/* FAST_FLOAT should be either float or double, whichever is done faster + * by your compiler. (Note that this type is only used in the floating point + * DCT routines, so it only matters if you've defined DCT_FLOAT_SUPPORTED.) + * Typically, float is faster in ANSI C compilers, while double is faster in + * pre-ANSI compilers (because they insist on converting to double anyway). + * The code below therefore chooses float if we have ANSI-style prototypes. + */ + +#ifndef FAST_FLOAT +#ifdef HAVE_PROTOTYPES +#define FAST_FLOAT float +#else +#define FAST_FLOAT double +#endif +#endif + +#endif /* JPEG_INTERNAL_OPTIONS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpegint.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpegint.h new file mode 100644 index 000000000..c0d5c1420 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpegint.h @@ -0,0 +1,426 @@ +/* + * jpegint.h + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file provides common declarations for the various JPEG modules. + * These declarations are considered internal to the JPEG library; most + * applications using the library shouldn't need to include this file. + */ + + +/* Declarations for both compression & decompression */ + +typedef enum { /* Operating modes for buffer controllers */ + JBUF_PASS_THRU, /* Plain stripwise operation */ + /* Remaining modes require a full-image buffer to have been created */ + JBUF_SAVE_SOURCE, /* Run source subobject only, save output */ + JBUF_CRANK_DEST, /* Run dest subobject only, using saved data */ + JBUF_SAVE_AND_PASS /* Run both subobjects, save output */ +} J_BUF_MODE; + +/* Values of global_state field (jdapi.c has some dependencies on ordering!) */ +#define CSTATE_START 100 /* after create_compress */ +#define CSTATE_SCANNING 101 /* start_compress done, write_scanlines OK */ +#define CSTATE_RAW_OK 102 /* start_compress done, write_raw_data OK */ +#define CSTATE_WRCOEFS 103 /* jpeg_write_coefficients done */ +#define DSTATE_START 200 /* after create_decompress */ +#define DSTATE_INHEADER 201 /* reading header markers, no SOS yet */ +#define DSTATE_READY 202 /* found SOS, ready for start_decompress */ +#define DSTATE_PRELOAD 203 /* reading multiscan file in start_decompress*/ +#define DSTATE_PRESCAN 204 /* performing dummy pass for 2-pass quant */ +#define DSTATE_SCANNING 205 /* start_decompress done, read_scanlines OK */ +#define DSTATE_RAW_OK 206 /* start_decompress done, read_raw_data OK */ +#define DSTATE_BUFIMAGE 207 /* expecting jpeg_start_output */ +#define DSTATE_BUFPOST 208 /* looking for SOS/EOI in jpeg_finish_output */ +#define DSTATE_RDCOEFS 209 /* reading file in jpeg_read_coefficients */ +#define DSTATE_STOPPING 210 /* looking for EOI in jpeg_finish_decompress */ + + +/* Declarations for compression modules */ + +/* Master control module */ +struct jpeg_comp_master { + JMETHOD(void, prepare_for_pass, (j_compress_ptr cinfo)); + JMETHOD(void, pass_startup, (j_compress_ptr cinfo)); + JMETHOD(void, finish_pass, (j_compress_ptr cinfo)); + + /* State variables made visible to other modules */ + boolean call_pass_startup; /* True if pass_startup must be called */ + boolean is_last_pass; /* True during last pass */ +}; + +/* Main buffer control (downsampled-data buffer) */ +struct jpeg_c_main_controller { + JMETHOD(void, start_pass, (j_compress_ptr cinfo, J_BUF_MODE pass_mode)); + JMETHOD(void, process_data, (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail)); +}; + +/* Compression preprocessing (downsampling input buffer control) */ +struct jpeg_c_prep_controller { + JMETHOD(void, start_pass, (j_compress_ptr cinfo, J_BUF_MODE pass_mode)); + JMETHOD(void, pre_process_data, (j_compress_ptr cinfo, + JSAMPARRAY input_buf, + JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail, + JSAMPIMAGE output_buf, + JDIMENSION *out_row_group_ctr, + JDIMENSION out_row_groups_avail)); +}; + +/* Coefficient buffer control */ +struct jpeg_c_coef_controller { + JMETHOD(void, start_pass, (j_compress_ptr cinfo, J_BUF_MODE pass_mode)); + JMETHOD(boolean, compress_data, (j_compress_ptr cinfo, + JSAMPIMAGE input_buf)); +}; + +/* Colorspace conversion */ +struct jpeg_color_converter { + JMETHOD(void, start_pass, (j_compress_ptr cinfo)); + JMETHOD(void, color_convert, (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows)); +}; + +/* Downsampling */ +struct jpeg_downsampler { + JMETHOD(void, start_pass, (j_compress_ptr cinfo)); + JMETHOD(void, downsample, (j_compress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION in_row_index, + JSAMPIMAGE output_buf, + JDIMENSION out_row_group_index)); + + boolean need_context_rows; /* TRUE if need rows above & below */ +}; + +/* Forward DCT (also controls coefficient quantization) */ +typedef JMETHOD(void, forward_DCT_ptr, + (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY sample_data, JBLOCKROW coef_blocks, + JDIMENSION start_row, JDIMENSION start_col, + JDIMENSION num_blocks)); + +struct jpeg_forward_dct { + JMETHOD(void, start_pass, (j_compress_ptr cinfo)); + /* It is useful to allow each component to have a separate FDCT method. */ + forward_DCT_ptr forward_DCT[MAX_COMPONENTS]; +}; + +/* Entropy encoding */ +struct jpeg_entropy_encoder { + JMETHOD(void, start_pass, (j_compress_ptr cinfo, boolean gather_statistics)); + JMETHOD(boolean, encode_mcu, (j_compress_ptr cinfo, JBLOCKROW *MCU_data)); + JMETHOD(void, finish_pass, (j_compress_ptr cinfo)); +}; + +/* Marker writing */ +struct jpeg_marker_writer { + JMETHOD(void, write_file_header, (j_compress_ptr cinfo)); + JMETHOD(void, write_frame_header, (j_compress_ptr cinfo)); + JMETHOD(void, write_scan_header, (j_compress_ptr cinfo)); + JMETHOD(void, write_file_trailer, (j_compress_ptr cinfo)); + JMETHOD(void, write_tables_only, (j_compress_ptr cinfo)); + /* These routines are exported to allow insertion of extra markers */ + /* Probably only COM and APPn markers should be written this way */ + JMETHOD(void, write_marker_header, (j_compress_ptr cinfo, int marker, + unsigned int datalen)); + JMETHOD(void, write_marker_byte, (j_compress_ptr cinfo, int val)); +}; + + +/* Declarations for decompression modules */ + +/* Master control module */ +struct jpeg_decomp_master { + JMETHOD(void, prepare_for_output_pass, (j_decompress_ptr cinfo)); + JMETHOD(void, finish_output_pass, (j_decompress_ptr cinfo)); + + /* State variables made visible to other modules */ + boolean is_dummy_pass; /* True during 1st pass for 2-pass quant */ +}; + +/* Input control module */ +struct jpeg_input_controller { + JMETHOD(int, consume_input, (j_decompress_ptr cinfo)); + JMETHOD(void, reset_input_controller, (j_decompress_ptr cinfo)); + JMETHOD(void, start_input_pass, (j_decompress_ptr cinfo)); + JMETHOD(void, finish_input_pass, (j_decompress_ptr cinfo)); + + /* State variables made visible to other modules */ + boolean has_multiple_scans; /* True if file has multiple scans */ + boolean eoi_reached; /* True when EOI has been consumed */ +}; + +/* Main buffer control (downsampled-data buffer) */ +struct jpeg_d_main_controller { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo, J_BUF_MODE pass_mode)); + JMETHOD(void, process_data, (j_decompress_ptr cinfo, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); +}; + +/* Coefficient buffer control */ +struct jpeg_d_coef_controller { + JMETHOD(void, start_input_pass, (j_decompress_ptr cinfo)); + JMETHOD(int, consume_data, (j_decompress_ptr cinfo)); + JMETHOD(void, start_output_pass, (j_decompress_ptr cinfo)); + JMETHOD(int, decompress_data, (j_decompress_ptr cinfo, + JSAMPIMAGE output_buf)); + /* Pointer to array of coefficient virtual arrays, or NULL if none */ + jvirt_barray_ptr *coef_arrays; +}; + +/* Decompression postprocessing (color quantization buffer control) */ +struct jpeg_d_post_controller { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo, J_BUF_MODE pass_mode)); + JMETHOD(void, post_process_data, (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, + JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, + JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); +}; + +/* Marker reading & parsing */ +struct jpeg_marker_reader { + JMETHOD(void, reset_marker_reader, (j_decompress_ptr cinfo)); + /* Read markers until SOS or EOI. + * Returns same codes as are defined for jpeg_consume_input: + * JPEG_SUSPENDED, JPEG_REACHED_SOS, or JPEG_REACHED_EOI. + */ + JMETHOD(int, read_markers, (j_decompress_ptr cinfo)); + /* Read a restart marker --- exported for use by entropy decoder only */ + jpeg_marker_parser_method read_restart_marker; + + /* State of marker reader --- nominally internal, but applications + * supplying COM or APPn handlers might like to know the state. + */ + boolean saw_SOI; /* found SOI? */ + boolean saw_SOF; /* found SOF? */ + int next_restart_num; /* next restart number expected (0-7) */ + unsigned int discarded_bytes; /* # of bytes skipped looking for a marker */ +}; + +/* Entropy decoding */ +struct jpeg_entropy_decoder { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo)); + JMETHOD(boolean, decode_mcu, (j_decompress_ptr cinfo, + JBLOCKROW *MCU_data)); +}; + +/* Inverse DCT (also performs dequantization) */ +typedef JMETHOD(void, inverse_DCT_method_ptr, + (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col)); + +struct jpeg_inverse_dct { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo)); + /* It is useful to allow each component to have a separate IDCT method. */ + inverse_DCT_method_ptr inverse_DCT[MAX_COMPONENTS]; +}; + +/* Upsampling (note that upsampler must also call color converter) */ +struct jpeg_upsampler { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo)); + JMETHOD(void, upsample, (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, + JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, + JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); + + boolean need_context_rows; /* TRUE if need rows above & below */ +}; + +/* Colorspace conversion */ +struct jpeg_color_deconverter { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo)); + JMETHOD(void, color_convert, (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows)); +}; + +/* Color quantization or color precision reduction */ +struct jpeg_color_quantizer { + JMETHOD(void, start_pass, (j_decompress_ptr cinfo, boolean is_pre_scan)); + JMETHOD(void, color_quantize, (j_decompress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPARRAY output_buf, + int num_rows)); + JMETHOD(void, finish_pass, (j_decompress_ptr cinfo)); + JMETHOD(void, new_color_map, (j_decompress_ptr cinfo)); +}; + + +/* Miscellaneous useful macros */ + +#undef MAX +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#undef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + + +/* We assume that right shift corresponds to signed division by 2 with + * rounding towards minus infinity. This is correct for typical "arithmetic + * shift" instructions that shift in copies of the sign bit. But some + * C compilers implement >> with an unsigned shift. For these machines you + * must define RIGHT_SHIFT_IS_UNSIGNED. + * RIGHT_SHIFT provides a proper signed right shift of an INT32 quantity. + * It is only applied with constant shift counts. SHIFT_TEMPS must be + * included in the variables of any routine using RIGHT_SHIFT. + */ + +#ifdef RIGHT_SHIFT_IS_UNSIGNED +#define SHIFT_TEMPS INT32 shift_temp; +#define RIGHT_SHIFT(x,shft) \ + ((shift_temp = (x)) < 0 ? \ + (shift_temp >> (shft)) | ((~((INT32) 0)) << (32-(shft))) : \ + (shift_temp >> (shft))) +#else +#define SHIFT_TEMPS +#define RIGHT_SHIFT(x,shft) ((x) >> (shft)) +#endif + + +/* Short forms of external names for systems with brain-damaged linkers. */ + +#ifdef NEED_SHORT_EXTERNAL_NAMES +#define jinit_compress_master jICompress +#define jinit_c_master_control jICMaster +#define jinit_c_main_controller jICMainC +#define jinit_c_prep_controller jICPrepC +#define jinit_c_coef_controller jICCoefC +#define jinit_color_converter jICColor +#define jinit_downsampler jIDownsampler +#define jinit_forward_dct jIFDCT +#define jinit_huff_encoder jIHEncoder +#define jinit_arith_encoder jIAEncoder +#define jinit_marker_writer jIMWriter +#define jinit_master_decompress jIDMaster +#define jinit_d_main_controller jIDMainC +#define jinit_d_coef_controller jIDCoefC +#define jinit_d_post_controller jIDPostC +#define jinit_input_controller jIInCtlr +#define jinit_marker_reader jIMReader +#define jinit_huff_decoder jIHDecoder +#define jinit_arith_decoder jIADecoder +#define jinit_inverse_dct jIIDCT +#define jinit_upsampler jIUpsampler +#define jinit_color_deconverter jIDColor +#define jinit_1pass_quantizer jI1Quant +#define jinit_2pass_quantizer jI2Quant +#define jinit_merged_upsampler jIMUpsampler +#define jinit_memory_mgr jIMemMgr +#define jdiv_round_up jDivRound +#define jround_up jRound +#define jzero_far jZeroFar +#define jcopy_sample_rows jCopySamples +#define jcopy_block_row jCopyBlocks +#define jpeg_zigzag_order jZIGTable +#define jpeg_natural_order jZAGTable +#define jpeg_natural_order7 jZAG7Table +#define jpeg_natural_order6 jZAG6Table +#define jpeg_natural_order5 jZAG5Table +#define jpeg_natural_order4 jZAG4Table +#define jpeg_natural_order3 jZAG3Table +#define jpeg_natural_order2 jZAG2Table +#define jpeg_aritab jAriTab +#endif /* NEED_SHORT_EXTERNAL_NAMES */ + + +/* On normal machines we can apply MEMCOPY() and MEMZERO() to sample arrays + * and coefficient-block arrays. This won't work on 80x86 because the arrays + * are FAR and we're assuming a small-pointer memory model. However, some + * DOS compilers provide far-pointer versions of memcpy() and memset() even + * in the small-model libraries. These will be used if USE_FMEM is defined. + * Otherwise, the routines in jutils.c do it the hard way. + */ + +#ifndef NEED_FAR_POINTERS /* normal case, same as regular macro */ +#define FMEMZERO(target,size) MEMZERO(target,size) +#else /* 80x86 case */ +#ifdef USE_FMEM +#define FMEMZERO(target,size) _fmemset((void FAR *)(target), 0, (size_t)(size)) +#else +EXTERN(void) jzero_far JPP((void FAR * target, size_t bytestozero)); +#define FMEMZERO(target,size) jzero_far(target, size) +#endif +#endif + + +/* Compression module initialization routines */ +EXTERN(void) jinit_compress_master JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_c_master_control JPP((j_compress_ptr cinfo, + boolean transcode_only)); +EXTERN(void) jinit_c_main_controller JPP((j_compress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_c_prep_controller JPP((j_compress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_c_coef_controller JPP((j_compress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_color_converter JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_downsampler JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_forward_dct JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_huff_encoder JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_arith_encoder JPP((j_compress_ptr cinfo)); +EXTERN(void) jinit_marker_writer JPP((j_compress_ptr cinfo)); +/* Decompression module initialization routines */ +EXTERN(void) jinit_master_decompress JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_d_main_controller JPP((j_decompress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_d_coef_controller JPP((j_decompress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_d_post_controller JPP((j_decompress_ptr cinfo, + boolean need_full_buffer)); +EXTERN(void) jinit_input_controller JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_marker_reader JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_huff_decoder JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_arith_decoder JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_inverse_dct JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_upsampler JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_color_deconverter JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_1pass_quantizer JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_2pass_quantizer JPP((j_decompress_ptr cinfo)); +EXTERN(void) jinit_merged_upsampler JPP((j_decompress_ptr cinfo)); +/* Memory manager initialization */ +EXTERN(void) jinit_memory_mgr JPP((j_common_ptr cinfo)); + +/* Utility routines in jutils.c */ +EXTERN(long) jdiv_round_up JPP((long a, long b)); +EXTERN(long) jround_up JPP((long a, long b)); +EXTERN(void) jcopy_sample_rows JPP((JSAMPARRAY input_array, int source_row, + JSAMPARRAY output_array, int dest_row, + int num_rows, JDIMENSION num_cols)); +EXTERN(void) jcopy_block_row JPP((JBLOCKROW input_row, JBLOCKROW output_row, + JDIMENSION num_blocks)); +/* Constant tables in jutils.c */ +#if 0 /* This table is not actually needed in v6a */ +extern const int jpeg_zigzag_order[]; /* natural coef order to zigzag order */ +#endif +extern const int jpeg_natural_order[]; /* zigzag coef order to natural order */ +extern const int jpeg_natural_order7[]; /* zz to natural order for 7x7 block */ +extern const int jpeg_natural_order6[]; /* zz to natural order for 6x6 block */ +extern const int jpeg_natural_order5[]; /* zz to natural order for 5x5 block */ +extern const int jpeg_natural_order4[]; /* zz to natural order for 4x4 block */ +extern const int jpeg_natural_order3[]; /* zz to natural order for 3x3 block */ +extern const int jpeg_natural_order2[]; /* zz to natural order for 2x2 block */ + +/* Arithmetic coding probability estimation tables in jaricom.c */ +extern const INT32 jpeg_aritab[]; + +/* Suppress undefined-structure complaints if necessary. */ + +#ifdef INCOMPLETE_TYPES_BROKEN +#ifndef AM_MEMORY_MANAGER /* only jmemmgr.c defines these */ +struct jvirt_sarray_control { long dummy; }; +struct jvirt_barray_control { long dummy; }; +#endif +#endif /* INCOMPLETE_TYPES_BROKEN */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpeglib.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpeglib.h new file mode 100644 index 000000000..add940e9e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jpeglib.h @@ -0,0 +1,1162 @@ +/* + * jpeglib.h + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * Modified 2002-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file defines the application interface for the JPEG library. + * Most applications using the library need only include this file, + * and perhaps jerror.h if they want to know the exact error codes. + */ + +#ifndef JPEGLIB_H +#define JPEGLIB_H + +/* + * First we include the configuration files that record how this + * installation of the JPEG library is set up. jconfig.h can be + * generated automatically for many systems. jmorecfg.h contains + * manual configuration options that most people need not worry about. + */ + +#ifndef JCONFIG_INCLUDED /* in case jinclude.h already did */ +#include "jconfig.h" /* widely used configuration options */ +#endif +#include "jmorecfg.h" /* seldom changed options */ + + +#ifdef __cplusplus +#ifndef DONT_USE_EXTERN_C +extern "C" { +#endif +#endif + +/* Version IDs for the JPEG library. + * Might be useful for tests like "#if JPEG_LIB_VERSION >= 80". + */ + +#define JPEG_LIB_VERSION 80 /* Compatibility version 8.0 */ +#define JPEG_LIB_VERSION_MAJOR 8 +#define JPEG_LIB_VERSION_MINOR 4 + + +/* Various constants determining the sizes of things. + * All of these are specified by the JPEG standard, so don't change them + * if you want to be compatible. + */ + +#define DCTSIZE 8 /* The basic DCT block is 8x8 coefficients */ +#define DCTSIZE2 64 /* DCTSIZE squared; # of elements in a block */ +#define NUM_QUANT_TBLS 4 /* Quantization tables are numbered 0..3 */ +#define NUM_HUFF_TBLS 4 /* Huffman tables are numbered 0..3 */ +#define NUM_ARITH_TBLS 16 /* Arith-coding tables are numbered 0..15 */ +#define MAX_COMPS_IN_SCAN 4 /* JPEG limit on # of components in one scan */ +#define MAX_SAMP_FACTOR 4 /* JPEG limit on sampling factors */ +/* Unfortunately, some bozo at Adobe saw no reason to be bound by the standard; + * the PostScript DCT filter can emit files with many more than 10 blocks/MCU. + * If you happen to run across such a file, you can up D_MAX_BLOCKS_IN_MCU + * to handle it. We even let you do this from the jconfig.h file. However, + * we strongly discourage changing C_MAX_BLOCKS_IN_MCU; just because Adobe + * sometimes emits noncompliant files doesn't mean you should too. + */ +#define C_MAX_BLOCKS_IN_MCU 10 /* compressor's limit on blocks per MCU */ +#ifndef D_MAX_BLOCKS_IN_MCU +#define D_MAX_BLOCKS_IN_MCU 10 /* decompressor's limit on blocks per MCU */ +#endif + + +/* Data structures for images (arrays of samples and of DCT coefficients). + * On 80x86 machines, the image arrays are too big for near pointers, + * but the pointer arrays can fit in near memory. + */ + +typedef JSAMPLE FAR *JSAMPROW; /* ptr to one image row of pixel samples. */ +typedef JSAMPROW *JSAMPARRAY; /* ptr to some rows (a 2-D sample array) */ +typedef JSAMPARRAY *JSAMPIMAGE; /* a 3-D sample array: top index is color */ + +typedef JCOEF JBLOCK[DCTSIZE2]; /* one block of coefficients */ +typedef JBLOCK FAR *JBLOCKROW; /* pointer to one row of coefficient blocks */ +typedef JBLOCKROW *JBLOCKARRAY; /* a 2-D array of coefficient blocks */ +typedef JBLOCKARRAY *JBLOCKIMAGE; /* a 3-D array of coefficient blocks */ + +typedef JCOEF FAR *JCOEFPTR; /* useful in a couple of places */ + + +/* Types for JPEG compression parameters and working tables. */ + + +/* DCT coefficient quantization tables. */ + +typedef struct { + /* This array gives the coefficient quantizers in natural array order + * (not the zigzag order in which they are stored in a JPEG DQT marker). + * CAUTION: IJG versions prior to v6a kept this array in zigzag order. + */ + UINT16 quantval[DCTSIZE2]; /* quantization step for each coefficient */ + /* This field is used only during compression. It's initialized FALSE when + * the table is created, and set TRUE when it's been output to the file. + * You could suppress output of a table by setting this to TRUE. + * (See jpeg_suppress_tables for an example.) + */ + boolean sent_table; /* TRUE when table has been output */ +} JQUANT_TBL; + + +/* Huffman coding tables. */ + +typedef struct { + /* These two fields directly represent the contents of a JPEG DHT marker */ + UINT8 bits[17]; /* bits[k] = # of symbols with codes of */ + /* length k bits; bits[0] is unused */ + UINT8 huffval[256]; /* The symbols, in order of incr code length */ + /* This field is used only during compression. It's initialized FALSE when + * the table is created, and set TRUE when it's been output to the file. + * You could suppress output of a table by setting this to TRUE. + * (See jpeg_suppress_tables for an example.) + */ + boolean sent_table; /* TRUE when table has been output */ +} JHUFF_TBL; + + +/* Basic info about one component (color channel). */ + +typedef struct { + /* These values are fixed over the whole image. */ + /* For compression, they must be supplied by parameter setup; */ + /* for decompression, they are read from the SOF marker. */ + int component_id; /* identifier for this component (0..255) */ + int component_index; /* its index in SOF or cinfo->comp_info[] */ + int h_samp_factor; /* horizontal sampling factor (1..4) */ + int v_samp_factor; /* vertical sampling factor (1..4) */ + int quant_tbl_no; /* quantization table selector (0..3) */ + /* These values may vary between scans. */ + /* For compression, they must be supplied by parameter setup; */ + /* for decompression, they are read from the SOS marker. */ + /* The decompressor output side may not use these variables. */ + int dc_tbl_no; /* DC entropy table selector (0..3) */ + int ac_tbl_no; /* AC entropy table selector (0..3) */ + + /* Remaining fields should be treated as private by applications. */ + + /* These values are computed during compression or decompression startup: */ + /* Component's size in DCT blocks. + * Any dummy blocks added to complete an MCU are not counted; therefore + * these values do not depend on whether a scan is interleaved or not. + */ + JDIMENSION width_in_blocks; + JDIMENSION height_in_blocks; + /* Size of a DCT block in samples, + * reflecting any scaling we choose to apply during the DCT step. + * Values from 1 to 16 are supported. + * Note that different components may receive different DCT scalings. + */ + int DCT_h_scaled_size; + int DCT_v_scaled_size; + /* The downsampled dimensions are the component's actual, unpadded number + * of samples at the main buffer (preprocessing/compression interface); + * DCT scaling is included, so + * downsampled_width = ceil(image_width * Hi/Hmax * DCT_h_scaled_size/DCTSIZE) + * and similarly for height. + */ + JDIMENSION downsampled_width; /* actual width in samples */ + JDIMENSION downsampled_height; /* actual height in samples */ + /* This flag is used only for decompression. In cases where some of the + * components will be ignored (eg grayscale output from YCbCr image), + * we can skip most computations for the unused components. + */ + boolean component_needed; /* do we need the value of this component? */ + + /* These values are computed before starting a scan of the component. */ + /* The decompressor output side may not use these variables. */ + int MCU_width; /* number of blocks per MCU, horizontally */ + int MCU_height; /* number of blocks per MCU, vertically */ + int MCU_blocks; /* MCU_width * MCU_height */ + int MCU_sample_width; /* MCU width in samples: MCU_width * DCT_h_scaled_size */ + int last_col_width; /* # of non-dummy blocks across in last MCU */ + int last_row_height; /* # of non-dummy blocks down in last MCU */ + + /* Saved quantization table for component; NULL if none yet saved. + * See jdinput.c comments about the need for this information. + * This field is currently used only for decompression. + */ + JQUANT_TBL * quant_table; + + /* Private per-component storage for DCT or IDCT subsystem. */ + void * dct_table; +} jpeg_component_info; + + +/* The script for encoding a multiple-scan file is an array of these: */ + +typedef struct { + int comps_in_scan; /* number of components encoded in this scan */ + int component_index[MAX_COMPS_IN_SCAN]; /* their SOF/comp_info[] indexes */ + int Ss, Se; /* progressive JPEG spectral selection parms */ + int Ah, Al; /* progressive JPEG successive approx. parms */ +} jpeg_scan_info; + +/* The decompressor can save APPn and COM markers in a list of these: */ + +typedef struct jpeg_marker_struct FAR * jpeg_saved_marker_ptr; + +struct jpeg_marker_struct { + jpeg_saved_marker_ptr next; /* next in list, or NULL */ + UINT8 marker; /* marker code: JPEG_COM, or JPEG_APP0+n */ + unsigned int original_length; /* # bytes of data in the file */ + unsigned int data_length; /* # bytes of data saved at data[] */ + JOCTET FAR * data; /* the data contained in the marker */ + /* the marker length word is not counted in data_length or original_length */ +}; + +/* Known color spaces. */ + +typedef enum { + JCS_UNKNOWN, /* error/unspecified */ + JCS_GRAYSCALE, /* monochrome */ + JCS_RGB, /* red/green/blue */ + JCS_YCbCr, /* Y/Cb/Cr (also known as YUV) */ + JCS_CMYK, /* C/M/Y/K */ + JCS_YCCK /* Y/Cb/Cr/K */ +} J_COLOR_SPACE; + +/* DCT/IDCT algorithm options. */ + +typedef enum { + JDCT_ISLOW, /* slow but accurate integer algorithm */ + JDCT_IFAST, /* faster, less accurate integer method */ + JDCT_FLOAT /* floating-point: accurate, fast on fast HW */ +} J_DCT_METHOD; + +#ifndef JDCT_DEFAULT /* may be overridden in jconfig.h */ +#define JDCT_DEFAULT JDCT_ISLOW +#endif +#ifndef JDCT_FASTEST /* may be overridden in jconfig.h */ +#define JDCT_FASTEST JDCT_IFAST +#endif + +/* Dithering options for decompression. */ + +typedef enum { + JDITHER_NONE, /* no dithering */ + JDITHER_ORDERED, /* simple ordered dither */ + JDITHER_FS /* Floyd-Steinberg error diffusion dither */ +} J_DITHER_MODE; + + +/* Common fields between JPEG compression and decompression master structs. */ + +#define jpeg_common_fields \ + struct jpeg_error_mgr * err; /* Error handler module */\ + struct jpeg_memory_mgr * mem; /* Memory manager module */\ + struct jpeg_progress_mgr * progress; /* Progress monitor, or NULL if none */\ + void * client_data; /* Available for use by application */\ + boolean is_decompressor; /* So common code can tell which is which */\ + int global_state /* For checking call sequence validity */ + +/* Routines that are to be used by both halves of the library are declared + * to receive a pointer to this structure. There are no actual instances of + * jpeg_common_struct, only of jpeg_compress_struct and jpeg_decompress_struct. + */ +struct jpeg_common_struct { + jpeg_common_fields; /* Fields common to both master struct types */ + /* Additional fields follow in an actual jpeg_compress_struct or + * jpeg_decompress_struct. All three structs must agree on these + * initial fields! (This would be a lot cleaner in C++.) + */ +}; + +typedef struct jpeg_common_struct * j_common_ptr; +typedef struct jpeg_compress_struct * j_compress_ptr; +typedef struct jpeg_decompress_struct * j_decompress_ptr; + + +/* Master record for a compression instance */ + +struct jpeg_compress_struct { + jpeg_common_fields; /* Fields shared with jpeg_decompress_struct */ + + /* Destination for compressed data */ + struct jpeg_destination_mgr * dest; + + /* Description of source image --- these fields must be filled in by + * outer application before starting compression. in_color_space must + * be correct before you can even call jpeg_set_defaults(). + */ + + JDIMENSION image_width; /* input image width */ + JDIMENSION image_height; /* input image height */ + int input_components; /* # of color components in input image */ + J_COLOR_SPACE in_color_space; /* colorspace of input image */ + + double input_gamma; /* image gamma of input image */ + + /* Compression parameters --- these fields must be set before calling + * jpeg_start_compress(). We recommend calling jpeg_set_defaults() to + * initialize everything to reasonable defaults, then changing anything + * the application specifically wants to change. That way you won't get + * burnt when new parameters are added. Also note that there are several + * helper routines to simplify changing parameters. + */ + + unsigned int scale_num, scale_denom; /* fraction by which to scale image */ + + JDIMENSION jpeg_width; /* scaled JPEG image width */ + JDIMENSION jpeg_height; /* scaled JPEG image height */ + /* Dimensions of actual JPEG image that will be written to file, + * derived from input dimensions by scaling factors above. + * These fields are computed by jpeg_start_compress(). + * You can also use jpeg_calc_jpeg_dimensions() to determine these values + * in advance of calling jpeg_start_compress(). + */ + + int data_precision; /* bits of precision in image data */ + + int num_components; /* # of color components in JPEG image */ + J_COLOR_SPACE jpeg_color_space; /* colorspace of JPEG image */ + + jpeg_component_info * comp_info; + /* comp_info[i] describes component that appears i'th in SOF */ + + JQUANT_TBL * quant_tbl_ptrs[NUM_QUANT_TBLS]; + int q_scale_factor[NUM_QUANT_TBLS]; + /* ptrs to coefficient quantization tables, or NULL if not defined, + * and corresponding scale factors (percentage, initialized 100). + */ + + JHUFF_TBL * dc_huff_tbl_ptrs[NUM_HUFF_TBLS]; + JHUFF_TBL * ac_huff_tbl_ptrs[NUM_HUFF_TBLS]; + /* ptrs to Huffman coding tables, or NULL if not defined */ + + UINT8 arith_dc_L[NUM_ARITH_TBLS]; /* L values for DC arith-coding tables */ + UINT8 arith_dc_U[NUM_ARITH_TBLS]; /* U values for DC arith-coding tables */ + UINT8 arith_ac_K[NUM_ARITH_TBLS]; /* Kx values for AC arith-coding tables */ + + int num_scans; /* # of entries in scan_info array */ + const jpeg_scan_info * scan_info; /* script for multi-scan file, or NULL */ + /* The default value of scan_info is NULL, which causes a single-scan + * sequential JPEG file to be emitted. To create a multi-scan file, + * set num_scans and scan_info to point to an array of scan definitions. + */ + + boolean raw_data_in; /* TRUE=caller supplies downsampled data */ + boolean arith_code; /* TRUE=arithmetic coding, FALSE=Huffman */ + boolean optimize_coding; /* TRUE=optimize entropy encoding parms */ + boolean CCIR601_sampling; /* TRUE=first samples are cosited */ + boolean do_fancy_downsampling; /* TRUE=apply fancy downsampling */ + int smoothing_factor; /* 1..100, or 0 for no input smoothing */ + J_DCT_METHOD dct_method; /* DCT algorithm selector */ + + /* The restart interval can be specified in absolute MCUs by setting + * restart_interval, or in MCU rows by setting restart_in_rows + * (in which case the correct restart_interval will be figured + * for each scan). + */ + unsigned int restart_interval; /* MCUs per restart, or 0 for no restart */ + int restart_in_rows; /* if > 0, MCU rows per restart interval */ + + /* Parameters controlling emission of special markers. */ + + boolean write_JFIF_header; /* should a JFIF marker be written? */ + UINT8 JFIF_major_version; /* What to write for the JFIF version number */ + UINT8 JFIF_minor_version; + /* These three values are not used by the JPEG code, merely copied */ + /* into the JFIF APP0 marker. density_unit can be 0 for unknown, */ + /* 1 for dots/inch, or 2 for dots/cm. Note that the pixel aspect */ + /* ratio is defined by X_density/Y_density even when density_unit=0. */ + UINT8 density_unit; /* JFIF code for pixel size units */ + UINT16 X_density; /* Horizontal pixel density */ + UINT16 Y_density; /* Vertical pixel density */ + boolean write_Adobe_marker; /* should an Adobe marker be written? */ + + /* State variable: index of next scanline to be written to + * jpeg_write_scanlines(). Application may use this to control its + * processing loop, e.g., "while (next_scanline < image_height)". + */ + + JDIMENSION next_scanline; /* 0 .. image_height-1 */ + + /* Remaining fields are known throughout compressor, but generally + * should not be touched by a surrounding application. + */ + + /* + * These fields are computed during compression startup + */ + boolean progressive_mode; /* TRUE if scan script uses progressive mode */ + int max_h_samp_factor; /* largest h_samp_factor */ + int max_v_samp_factor; /* largest v_samp_factor */ + + int min_DCT_h_scaled_size; /* smallest DCT_h_scaled_size of any component */ + int min_DCT_v_scaled_size; /* smallest DCT_v_scaled_size of any component */ + + JDIMENSION total_iMCU_rows; /* # of iMCU rows to be input to coef ctlr */ + /* The coefficient controller receives data in units of MCU rows as defined + * for fully interleaved scans (whether the JPEG file is interleaved or not). + * There are v_samp_factor * DCTSIZE sample rows of each component in an + * "iMCU" (interleaved MCU) row. + */ + + /* + * These fields are valid during any one scan. + * They describe the components and MCUs actually appearing in the scan. + */ + int comps_in_scan; /* # of JPEG components in this scan */ + jpeg_component_info * cur_comp_info[MAX_COMPS_IN_SCAN]; + /* *cur_comp_info[i] describes component that appears i'th in SOS */ + + JDIMENSION MCUs_per_row; /* # of MCUs across the image */ + JDIMENSION MCU_rows_in_scan; /* # of MCU rows in the image */ + + int blocks_in_MCU; /* # of DCT blocks per MCU */ + int MCU_membership[C_MAX_BLOCKS_IN_MCU]; + /* MCU_membership[i] is index in cur_comp_info of component owning */ + /* i'th block in an MCU */ + + int Ss, Se, Ah, Al; /* progressive JPEG parameters for scan */ + + int block_size; /* the basic DCT block size: 1..16 */ + const int * natural_order; /* natural-order position array */ + int lim_Se; /* min( Se, DCTSIZE2-1 ) */ + + /* + * Links to compression subobjects (methods and private variables of modules) + */ + struct jpeg_comp_master * master; + struct jpeg_c_main_controller * main; + struct jpeg_c_prep_controller * prep; + struct jpeg_c_coef_controller * coef; + struct jpeg_marker_writer * marker; + struct jpeg_color_converter * cconvert; + struct jpeg_downsampler * downsample; + struct jpeg_forward_dct * fdct; + struct jpeg_entropy_encoder * entropy; + jpeg_scan_info * script_space; /* workspace for jpeg_simple_progression */ + int script_space_size; +}; + + +/* Master record for a decompression instance */ + +struct jpeg_decompress_struct { + jpeg_common_fields; /* Fields shared with jpeg_compress_struct */ + + /* Source of compressed data */ + struct jpeg_source_mgr * src; + + /* Basic description of image --- filled in by jpeg_read_header(). */ + /* Application may inspect these values to decide how to process image. */ + + JDIMENSION image_width; /* nominal image width (from SOF marker) */ + JDIMENSION image_height; /* nominal image height */ + int num_components; /* # of color components in JPEG image */ + J_COLOR_SPACE jpeg_color_space; /* colorspace of JPEG image */ + + /* Decompression processing parameters --- these fields must be set before + * calling jpeg_start_decompress(). Note that jpeg_read_header() initializes + * them to default values. + */ + + J_COLOR_SPACE out_color_space; /* colorspace for output */ + + unsigned int scale_num, scale_denom; /* fraction by which to scale image */ + + double output_gamma; /* image gamma wanted in output */ + + boolean buffered_image; /* TRUE=multiple output passes */ + boolean raw_data_out; /* TRUE=downsampled data wanted */ + + J_DCT_METHOD dct_method; /* IDCT algorithm selector */ + boolean do_fancy_upsampling; /* TRUE=apply fancy upsampling */ + boolean do_block_smoothing; /* TRUE=apply interblock smoothing */ + + boolean quantize_colors; /* TRUE=colormapped output wanted */ + /* the following are ignored if not quantize_colors: */ + J_DITHER_MODE dither_mode; /* type of color dithering to use */ + boolean two_pass_quantize; /* TRUE=use two-pass color quantization */ + int desired_number_of_colors; /* max # colors to use in created colormap */ + /* these are significant only in buffered-image mode: */ + boolean enable_1pass_quant; /* enable future use of 1-pass quantizer */ + boolean enable_external_quant;/* enable future use of external colormap */ + boolean enable_2pass_quant; /* enable future use of 2-pass quantizer */ + + /* Description of actual output image that will be returned to application. + * These fields are computed by jpeg_start_decompress(). + * You can also use jpeg_calc_output_dimensions() to determine these values + * in advance of calling jpeg_start_decompress(). + */ + + JDIMENSION output_width; /* scaled image width */ + JDIMENSION output_height; /* scaled image height */ + int out_color_components; /* # of color components in out_color_space */ + int output_components; /* # of color components returned */ + /* output_components is 1 (a colormap index) when quantizing colors; + * otherwise it equals out_color_components. + */ + int rec_outbuf_height; /* min recommended height of scanline buffer */ + /* If the buffer passed to jpeg_read_scanlines() is less than this many rows + * high, space and time will be wasted due to unnecessary data copying. + * Usually rec_outbuf_height will be 1 or 2, at most 4. + */ + + /* When quantizing colors, the output colormap is described by these fields. + * The application can supply a colormap by setting colormap non-NULL before + * calling jpeg_start_decompress; otherwise a colormap is created during + * jpeg_start_decompress or jpeg_start_output. + * The map has out_color_components rows and actual_number_of_colors columns. + */ + int actual_number_of_colors; /* number of entries in use */ + JSAMPARRAY colormap; /* The color map as a 2-D pixel array */ + + /* State variables: these variables indicate the progress of decompression. + * The application may examine these but must not modify them. + */ + + /* Row index of next scanline to be read from jpeg_read_scanlines(). + * Application may use this to control its processing loop, e.g., + * "while (output_scanline < output_height)". + */ + JDIMENSION output_scanline; /* 0 .. output_height-1 */ + + /* Current input scan number and number of iMCU rows completed in scan. + * These indicate the progress of the decompressor input side. + */ + int input_scan_number; /* Number of SOS markers seen so far */ + JDIMENSION input_iMCU_row; /* Number of iMCU rows completed */ + + /* The "output scan number" is the notional scan being displayed by the + * output side. The decompressor will not allow output scan/row number + * to get ahead of input scan/row, but it can fall arbitrarily far behind. + */ + int output_scan_number; /* Nominal scan number being displayed */ + JDIMENSION output_iMCU_row; /* Number of iMCU rows read */ + + /* Current progression status. coef_bits[c][i] indicates the precision + * with which component c's DCT coefficient i (in zigzag order) is known. + * It is -1 when no data has yet been received, otherwise it is the point + * transform (shift) value for the most recent scan of the coefficient + * (thus, 0 at completion of the progression). + * This pointer is NULL when reading a non-progressive file. + */ + int (*coef_bits)[DCTSIZE2]; /* -1 or current Al value for each coef */ + + /* Internal JPEG parameters --- the application usually need not look at + * these fields. Note that the decompressor output side may not use + * any parameters that can change between scans. + */ + + /* Quantization and Huffman tables are carried forward across input + * datastreams when processing abbreviated JPEG datastreams. + */ + + JQUANT_TBL * quant_tbl_ptrs[NUM_QUANT_TBLS]; + /* ptrs to coefficient quantization tables, or NULL if not defined */ + + JHUFF_TBL * dc_huff_tbl_ptrs[NUM_HUFF_TBLS]; + JHUFF_TBL * ac_huff_tbl_ptrs[NUM_HUFF_TBLS]; + /* ptrs to Huffman coding tables, or NULL if not defined */ + + /* These parameters are never carried across datastreams, since they + * are given in SOF/SOS markers or defined to be reset by SOI. + */ + + int data_precision; /* bits of precision in image data */ + + jpeg_component_info * comp_info; + /* comp_info[i] describes component that appears i'th in SOF */ + + boolean is_baseline; /* TRUE if Baseline SOF0 encountered */ + boolean progressive_mode; /* TRUE if SOFn specifies progressive mode */ + boolean arith_code; /* TRUE=arithmetic coding, FALSE=Huffman */ + + UINT8 arith_dc_L[NUM_ARITH_TBLS]; /* L values for DC arith-coding tables */ + UINT8 arith_dc_U[NUM_ARITH_TBLS]; /* U values for DC arith-coding tables */ + UINT8 arith_ac_K[NUM_ARITH_TBLS]; /* Kx values for AC arith-coding tables */ + + unsigned int restart_interval; /* MCUs per restart interval, or 0 for no restart */ + + /* These fields record data obtained from optional markers recognized by + * the JPEG library. + */ + boolean saw_JFIF_marker; /* TRUE iff a JFIF APP0 marker was found */ + /* Data copied from JFIF marker; only valid if saw_JFIF_marker is TRUE: */ + UINT8 JFIF_major_version; /* JFIF version number */ + UINT8 JFIF_minor_version; + UINT8 density_unit; /* JFIF code for pixel size units */ + UINT16 X_density; /* Horizontal pixel density */ + UINT16 Y_density; /* Vertical pixel density */ + boolean saw_Adobe_marker; /* TRUE iff an Adobe APP14 marker was found */ + UINT8 Adobe_transform; /* Color transform code from Adobe marker */ + + boolean CCIR601_sampling; /* TRUE=first samples are cosited */ + + /* Aside from the specific data retained from APPn markers known to the + * library, the uninterpreted contents of any or all APPn and COM markers + * can be saved in a list for examination by the application. + */ + jpeg_saved_marker_ptr marker_list; /* Head of list of saved markers */ + + /* Remaining fields are known throughout decompressor, but generally + * should not be touched by a surrounding application. + */ + + /* + * These fields are computed during decompression startup + */ + int max_h_samp_factor; /* largest h_samp_factor */ + int max_v_samp_factor; /* largest v_samp_factor */ + + int min_DCT_h_scaled_size; /* smallest DCT_h_scaled_size of any component */ + int min_DCT_v_scaled_size; /* smallest DCT_v_scaled_size of any component */ + + JDIMENSION total_iMCU_rows; /* # of iMCU rows in image */ + /* The coefficient controller's input and output progress is measured in + * units of "iMCU" (interleaved MCU) rows. These are the same as MCU rows + * in fully interleaved JPEG scans, but are used whether the scan is + * interleaved or not. We define an iMCU row as v_samp_factor DCT block + * rows of each component. Therefore, the IDCT output contains + * v_samp_factor*DCT_v_scaled_size sample rows of a component per iMCU row. + */ + + JSAMPLE * sample_range_limit; /* table for fast range-limiting */ + + /* + * These fields are valid during any one scan. + * They describe the components and MCUs actually appearing in the scan. + * Note that the decompressor output side must not use these fields. + */ + int comps_in_scan; /* # of JPEG components in this scan */ + jpeg_component_info * cur_comp_info[MAX_COMPS_IN_SCAN]; + /* *cur_comp_info[i] describes component that appears i'th in SOS */ + + JDIMENSION MCUs_per_row; /* # of MCUs across the image */ + JDIMENSION MCU_rows_in_scan; /* # of MCU rows in the image */ + + int blocks_in_MCU; /* # of DCT blocks per MCU */ + int MCU_membership[D_MAX_BLOCKS_IN_MCU]; + /* MCU_membership[i] is index in cur_comp_info of component owning */ + /* i'th block in an MCU */ + + int Ss, Se, Ah, Al; /* progressive JPEG parameters for scan */ + + /* These fields are derived from Se of first SOS marker. + */ + int block_size; /* the basic DCT block size: 1..16 */ + const int * natural_order; /* natural-order position array for entropy decode */ + int lim_Se; /* min( Se, DCTSIZE2-1 ) for entropy decode */ + + /* This field is shared between entropy decoder and marker parser. + * It is either zero or the code of a JPEG marker that has been + * read from the data source, but has not yet been processed. + */ + int unread_marker; + + /* + * Links to decompression subobjects (methods, private variables of modules) + */ + struct jpeg_decomp_master * master; + struct jpeg_d_main_controller * main; + struct jpeg_d_coef_controller * coef; + struct jpeg_d_post_controller * post; + struct jpeg_input_controller * inputctl; + struct jpeg_marker_reader * marker; + struct jpeg_entropy_decoder * entropy; + struct jpeg_inverse_dct * idct; + struct jpeg_upsampler * upsample; + struct jpeg_color_deconverter * cconvert; + struct jpeg_color_quantizer * cquantize; +}; + + +/* "Object" declarations for JPEG modules that may be supplied or called + * directly by the surrounding application. + * As with all objects in the JPEG library, these structs only define the + * publicly visible methods and state variables of a module. Additional + * private fields may exist after the public ones. + */ + + +/* Error handler object */ + +struct jpeg_error_mgr { + /* Error exit handler: does not return to caller */ + JMETHOD(void, error_exit, (j_common_ptr cinfo)); + /* Conditionally emit a trace or warning message */ + JMETHOD(void, emit_message, (j_common_ptr cinfo, int msg_level)); + /* Routine that actually outputs a trace or error message */ + JMETHOD(void, output_message, (j_common_ptr cinfo)); + /* Format a message string for the most recent JPEG error or message */ + JMETHOD(void, format_message, (j_common_ptr cinfo, char * buffer)); +#define JMSG_LENGTH_MAX 200 /* recommended size of format_message buffer */ + /* Reset error state variables at start of a new image */ + JMETHOD(void, reset_error_mgr, (j_common_ptr cinfo)); + + /* The message ID code and any parameters are saved here. + * A message can have one string parameter or up to 8 int parameters. + */ + int msg_code; +#define JMSG_STR_PARM_MAX 80 + union { + int i[8]; + char s[JMSG_STR_PARM_MAX]; + } msg_parm; + + /* Standard state variables for error facility */ + + int trace_level; /* max msg_level that will be displayed */ + + /* For recoverable corrupt-data errors, we emit a warning message, + * but keep going unless emit_message chooses to abort. emit_message + * should count warnings in num_warnings. The surrounding application + * can check for bad data by seeing if num_warnings is nonzero at the + * end of processing. + */ + long num_warnings; /* number of corrupt-data warnings */ + + /* These fields point to the table(s) of error message strings. + * An application can change the table pointer to switch to a different + * message list (typically, to change the language in which errors are + * reported). Some applications may wish to add additional error codes + * that will be handled by the JPEG library error mechanism; the second + * table pointer is used for this purpose. + * + * First table includes all errors generated by JPEG library itself. + * Error code 0 is reserved for a "no such error string" message. + */ + const char * const * jpeg_message_table; /* Library errors */ + int last_jpeg_message; /* Table contains strings 0..last_jpeg_message */ + /* Second table can be added by application (see cjpeg/djpeg for example). + * It contains strings numbered first_addon_message..last_addon_message. + */ + const char * const * addon_message_table; /* Non-library errors */ + int first_addon_message; /* code for first string in addon table */ + int last_addon_message; /* code for last string in addon table */ +}; + + +/* Progress monitor object */ + +struct jpeg_progress_mgr { + JMETHOD(void, progress_monitor, (j_common_ptr cinfo)); + + long pass_counter; /* work units completed in this pass */ + long pass_limit; /* total number of work units in this pass */ + int completed_passes; /* passes completed so far */ + int total_passes; /* total number of passes expected */ +}; + + +/* Data destination object for compression */ + +struct jpeg_destination_mgr { + JOCTET * next_output_byte; /* => next byte to write in buffer */ + size_t free_in_buffer; /* # of byte spaces remaining in buffer */ + + JMETHOD(void, init_destination, (j_compress_ptr cinfo)); + JMETHOD(boolean, empty_output_buffer, (j_compress_ptr cinfo)); + JMETHOD(void, term_destination, (j_compress_ptr cinfo)); +}; + + +/* Data source object for decompression */ + +struct jpeg_source_mgr { + const JOCTET * next_input_byte; /* => next byte to read from buffer */ + size_t bytes_in_buffer; /* # of bytes remaining in buffer */ + + JMETHOD(void, init_source, (j_decompress_ptr cinfo)); + JMETHOD(boolean, fill_input_buffer, (j_decompress_ptr cinfo)); + JMETHOD(void, skip_input_data, (j_decompress_ptr cinfo, long num_bytes)); + JMETHOD(boolean, resync_to_restart, (j_decompress_ptr cinfo, int desired)); + JMETHOD(void, term_source, (j_decompress_ptr cinfo)); +}; + + +/* Memory manager object. + * Allocates "small" objects (a few K total), "large" objects (tens of K), + * and "really big" objects (virtual arrays with backing store if needed). + * The memory manager does not allow individual objects to be freed; rather, + * each created object is assigned to a pool, and whole pools can be freed + * at once. This is faster and more convenient than remembering exactly what + * to free, especially where malloc()/free() are not too speedy. + * NB: alloc routines never return NULL. They exit to error_exit if not + * successful. + */ + +#define JPOOL_PERMANENT 0 /* lasts until master record is destroyed */ +#define JPOOL_IMAGE 1 /* lasts until done with image/datastream */ +#define JPOOL_NUMPOOLS 2 + +typedef struct jvirt_sarray_control * jvirt_sarray_ptr; +typedef struct jvirt_barray_control * jvirt_barray_ptr; + + +struct jpeg_memory_mgr { + /* Method pointers */ + JMETHOD(void *, alloc_small, (j_common_ptr cinfo, int pool_id, + size_t sizeofobject)); + JMETHOD(void FAR *, alloc_large, (j_common_ptr cinfo, int pool_id, + size_t sizeofobject)); + JMETHOD(JSAMPARRAY, alloc_sarray, (j_common_ptr cinfo, int pool_id, + JDIMENSION samplesperrow, + JDIMENSION numrows)); + JMETHOD(JBLOCKARRAY, alloc_barray, (j_common_ptr cinfo, int pool_id, + JDIMENSION blocksperrow, + JDIMENSION numrows)); + JMETHOD(jvirt_sarray_ptr, request_virt_sarray, (j_common_ptr cinfo, + int pool_id, + boolean pre_zero, + JDIMENSION samplesperrow, + JDIMENSION numrows, + JDIMENSION maxaccess)); + JMETHOD(jvirt_barray_ptr, request_virt_barray, (j_common_ptr cinfo, + int pool_id, + boolean pre_zero, + JDIMENSION blocksperrow, + JDIMENSION numrows, + JDIMENSION maxaccess)); + JMETHOD(void, realize_virt_arrays, (j_common_ptr cinfo)); + JMETHOD(JSAMPARRAY, access_virt_sarray, (j_common_ptr cinfo, + jvirt_sarray_ptr ptr, + JDIMENSION start_row, + JDIMENSION num_rows, + boolean writable)); + JMETHOD(JBLOCKARRAY, access_virt_barray, (j_common_ptr cinfo, + jvirt_barray_ptr ptr, + JDIMENSION start_row, + JDIMENSION num_rows, + boolean writable)); + JMETHOD(void, free_pool, (j_common_ptr cinfo, int pool_id)); + JMETHOD(void, self_destruct, (j_common_ptr cinfo)); + + /* Limit on memory allocation for this JPEG object. (Note that this is + * merely advisory, not a guaranteed maximum; it only affects the space + * used for virtual-array buffers.) May be changed by outer application + * after creating the JPEG object. + */ + long max_memory_to_use; + + /* Maximum allocation request accepted by alloc_large. */ + long max_alloc_chunk; +}; + + +/* Routine signature for application-supplied marker processing methods. + * Need not pass marker code since it is stored in cinfo->unread_marker. + */ +typedef JMETHOD(boolean, jpeg_marker_parser_method, (j_decompress_ptr cinfo)); + + +/* Declarations for routines called by application. + * The JPP macro hides prototype parameters from compilers that can't cope. + * Note JPP requires double parentheses. + */ + +#ifdef HAVE_PROTOTYPES +#define JPP(arglist) arglist +#else +#define JPP(arglist) () +#endif + + +/* Short forms of external names for systems with brain-damaged linkers. + * We shorten external names to be unique in the first six letters, which + * is good enough for all known systems. + * (If your compiler itself needs names to be unique in less than 15 + * characters, you are out of luck. Get a better compiler.) + */ + +#ifdef NEED_SHORT_EXTERNAL_NAMES +#define jpeg_std_error jStdError +#define jpeg_CreateCompress jCreaCompress +#define jpeg_CreateDecompress jCreaDecompress +#define jpeg_destroy_compress jDestCompress +#define jpeg_destroy_decompress jDestDecompress +#define jpeg_stdio_dest jStdDest +#define jpeg_stdio_src jStdSrc +#define jpeg_mem_dest jMemDest +#define jpeg_mem_src jMemSrc +#define jpeg_set_defaults jSetDefaults +#define jpeg_set_colorspace jSetColorspace +#define jpeg_default_colorspace jDefColorspace +#define jpeg_set_quality jSetQuality +#define jpeg_set_linear_quality jSetLQuality +#define jpeg_default_qtables jDefQTables +#define jpeg_add_quant_table jAddQuantTable +#define jpeg_quality_scaling jQualityScaling +#define jpeg_simple_progression jSimProgress +#define jpeg_suppress_tables jSuppressTables +#define jpeg_alloc_quant_table jAlcQTable +#define jpeg_alloc_huff_table jAlcHTable +#define jpeg_start_compress jStrtCompress +#define jpeg_write_scanlines jWrtScanlines +#define jpeg_finish_compress jFinCompress +#define jpeg_calc_jpeg_dimensions jCjpegDimensions +#define jpeg_write_raw_data jWrtRawData +#define jpeg_write_marker jWrtMarker +#define jpeg_write_m_header jWrtMHeader +#define jpeg_write_m_byte jWrtMByte +#define jpeg_write_tables jWrtTables +#define jpeg_read_header jReadHeader +#define jpeg_start_decompress jStrtDecompress +#define jpeg_read_scanlines jReadScanlines +#define jpeg_finish_decompress jFinDecompress +#define jpeg_read_raw_data jReadRawData +#define jpeg_has_multiple_scans jHasMultScn +#define jpeg_start_output jStrtOutput +#define jpeg_finish_output jFinOutput +#define jpeg_input_complete jInComplete +#define jpeg_new_colormap jNewCMap +#define jpeg_consume_input jConsumeInput +#define jpeg_core_output_dimensions jCoreDimensions +#define jpeg_calc_output_dimensions jCalcDimensions +#define jpeg_save_markers jSaveMarkers +#define jpeg_set_marker_processor jSetMarker +#define jpeg_read_coefficients jReadCoefs +#define jpeg_write_coefficients jWrtCoefs +#define jpeg_copy_critical_parameters jCopyCrit +#define jpeg_abort_compress jAbrtCompress +#define jpeg_abort_decompress jAbrtDecompress +#define jpeg_abort jAbort +#define jpeg_destroy jDestroy +#define jpeg_resync_to_restart jResyncRestart +#endif /* NEED_SHORT_EXTERNAL_NAMES */ + + +/* Default error-management setup */ +EXTERN(struct jpeg_error_mgr *) jpeg_std_error + JPP((struct jpeg_error_mgr * err)); + +/* Initialization of JPEG compression objects. + * jpeg_create_compress() and jpeg_create_decompress() are the exported + * names that applications should call. These expand to calls on + * jpeg_CreateCompress and jpeg_CreateDecompress with additional information + * passed for version mismatch checking. + * NB: you must set up the error-manager BEFORE calling jpeg_create_xxx. + */ +#define jpeg_create_compress(cinfo) \ + jpeg_CreateCompress((cinfo), JPEG_LIB_VERSION, \ + (size_t) sizeof(struct jpeg_compress_struct)) +#define jpeg_create_decompress(cinfo) \ + jpeg_CreateDecompress((cinfo), JPEG_LIB_VERSION, \ + (size_t) sizeof(struct jpeg_decompress_struct)) +EXTERN(void) jpeg_CreateCompress JPP((j_compress_ptr cinfo, + int version, size_t structsize)); +EXTERN(void) jpeg_CreateDecompress JPP((j_decompress_ptr cinfo, + int version, size_t structsize)); +/* Destruction of JPEG compression objects */ +EXTERN(void) jpeg_destroy_compress JPP((j_compress_ptr cinfo)); +EXTERN(void) jpeg_destroy_decompress JPP((j_decompress_ptr cinfo)); + +#ifdef JFILE +/* Standard data source and destination managers: stdio streams. */ +/* Caller is responsible for opening the file before and closing after. */ +EXTERN(void) jpeg_stdio_dest JPP((j_compress_ptr cinfo, JFILE * outfile)); +EXTERN(void) jpeg_stdio_src JPP((j_decompress_ptr cinfo, JFILE * infile)); +#endif /* JFILE */ + +/* Data source and destination managers: memory buffers. */ +EXTERN(void) jpeg_mem_dest JPP((j_compress_ptr cinfo, + unsigned char ** outbuffer, + unsigned long * outsize)); +EXTERN(void) jpeg_mem_src JPP((j_decompress_ptr cinfo, + unsigned char * inbuffer, + unsigned long insize)); + +/* Default parameter setup for compression */ +EXTERN(void) jpeg_set_defaults JPP((j_compress_ptr cinfo)); +/* Compression parameter setup aids */ +EXTERN(void) jpeg_set_colorspace JPP((j_compress_ptr cinfo, + J_COLOR_SPACE colorspace)); +EXTERN(void) jpeg_default_colorspace JPP((j_compress_ptr cinfo)); +EXTERN(void) jpeg_set_quality JPP((j_compress_ptr cinfo, int quality, + boolean force_baseline)); +EXTERN(void) jpeg_set_linear_quality JPP((j_compress_ptr cinfo, + int scale_factor, + boolean force_baseline)); +EXTERN(void) jpeg_default_qtables JPP((j_compress_ptr cinfo, + boolean force_baseline)); +EXTERN(void) jpeg_add_quant_table JPP((j_compress_ptr cinfo, int which_tbl, + const unsigned int *basic_table, + int scale_factor, + boolean force_baseline)); +EXTERN(int) jpeg_quality_scaling JPP((int quality)); +EXTERN(void) jpeg_simple_progression JPP((j_compress_ptr cinfo)); +EXTERN(void) jpeg_suppress_tables JPP((j_compress_ptr cinfo, + boolean suppress)); +EXTERN(JQUANT_TBL *) jpeg_alloc_quant_table JPP((j_common_ptr cinfo)); +EXTERN(JHUFF_TBL *) jpeg_alloc_huff_table JPP((j_common_ptr cinfo)); + +/* Main entry points for compression */ +EXTERN(void) jpeg_start_compress JPP((j_compress_ptr cinfo, + boolean write_all_tables)); +EXTERN(JDIMENSION) jpeg_write_scanlines JPP((j_compress_ptr cinfo, + JSAMPARRAY scanlines, + JDIMENSION num_lines)); +EXTERN(void) jpeg_finish_compress JPP((j_compress_ptr cinfo)); + +/* Precalculate JPEG dimensions for current compression parameters. */ +EXTERN(void) jpeg_calc_jpeg_dimensions JPP((j_compress_ptr cinfo)); + +/* Replaces jpeg_write_scanlines when writing raw downsampled data. */ +EXTERN(JDIMENSION) jpeg_write_raw_data JPP((j_compress_ptr cinfo, + JSAMPIMAGE data, + JDIMENSION num_lines)); + +/* Write a special marker. See libjpeg.txt concerning safe usage. */ +EXTERN(void) jpeg_write_marker + JPP((j_compress_ptr cinfo, int marker, + const JOCTET * dataptr, unsigned int datalen)); +/* Same, but piecemeal. */ +EXTERN(void) jpeg_write_m_header + JPP((j_compress_ptr cinfo, int marker, unsigned int datalen)); +EXTERN(void) jpeg_write_m_byte + JPP((j_compress_ptr cinfo, int val)); + +/* Alternate compression function: just write an abbreviated table file */ +EXTERN(void) jpeg_write_tables JPP((j_compress_ptr cinfo)); + +/* Decompression startup: read start of JPEG datastream to see what's there */ +EXTERN(int) jpeg_read_header JPP((j_decompress_ptr cinfo, + boolean require_image)); +/* Return value is one of: */ +#define JPEG_SUSPENDED 0 /* Suspended due to lack of input data */ +#define JPEG_HEADER_OK 1 /* Found valid image datastream */ +#define JPEG_HEADER_TABLES_ONLY 2 /* Found valid table-specs-only datastream */ +/* If you pass require_image = TRUE (normal case), you need not check for + * a TABLES_ONLY return code; an abbreviated file will cause an error exit. + * JPEG_SUSPENDED is only possible if you use a data source module that can + * give a suspension return (the stdio source module doesn't). + */ + +/* Main entry points for decompression */ +EXTERN(boolean) jpeg_start_decompress JPP((j_decompress_ptr cinfo)); +EXTERN(JDIMENSION) jpeg_read_scanlines JPP((j_decompress_ptr cinfo, + JSAMPARRAY scanlines, + JDIMENSION max_lines)); +EXTERN(boolean) jpeg_finish_decompress JPP((j_decompress_ptr cinfo)); + +/* Replaces jpeg_read_scanlines when reading raw downsampled data. */ +EXTERN(JDIMENSION) jpeg_read_raw_data JPP((j_decompress_ptr cinfo, + JSAMPIMAGE data, + JDIMENSION max_lines)); + +/* Additional entry points for buffered-image mode. */ +EXTERN(boolean) jpeg_has_multiple_scans JPP((j_decompress_ptr cinfo)); +EXTERN(boolean) jpeg_start_output JPP((j_decompress_ptr cinfo, + int scan_number)); +EXTERN(boolean) jpeg_finish_output JPP((j_decompress_ptr cinfo)); +EXTERN(boolean) jpeg_input_complete JPP((j_decompress_ptr cinfo)); +EXTERN(void) jpeg_new_colormap JPP((j_decompress_ptr cinfo)); +EXTERN(int) jpeg_consume_input JPP((j_decompress_ptr cinfo)); +/* Return value is one of: */ +/* #define JPEG_SUSPENDED 0 Suspended due to lack of input data */ +#define JPEG_REACHED_SOS 1 /* Reached start of new scan */ +#define JPEG_REACHED_EOI 2 /* Reached end of image */ +#define JPEG_ROW_COMPLETED 3 /* Completed one iMCU row */ +#define JPEG_SCAN_COMPLETED 4 /* Completed last iMCU row of a scan */ + +/* Precalculate output dimensions for current decompression parameters. */ +EXTERN(void) jpeg_core_output_dimensions JPP((j_decompress_ptr cinfo)); +EXTERN(void) jpeg_calc_output_dimensions JPP((j_decompress_ptr cinfo)); + +/* Control saving of COM and APPn markers into marker_list. */ +EXTERN(void) jpeg_save_markers + JPP((j_decompress_ptr cinfo, int marker_code, + unsigned int length_limit)); + +/* Install a special processing method for COM or APPn markers. */ +EXTERN(void) jpeg_set_marker_processor + JPP((j_decompress_ptr cinfo, int marker_code, + jpeg_marker_parser_method routine)); + +/* Read or write raw DCT coefficients --- useful for lossless transcoding. */ +EXTERN(jvirt_barray_ptr *) jpeg_read_coefficients JPP((j_decompress_ptr cinfo)); +EXTERN(void) jpeg_write_coefficients JPP((j_compress_ptr cinfo, + jvirt_barray_ptr * coef_arrays)); +EXTERN(void) jpeg_copy_critical_parameters JPP((j_decompress_ptr srcinfo, + j_compress_ptr dstinfo)); + +/* If you choose to abort compression or decompression before completing + * jpeg_finish_(de)compress, then you need to clean up to release memory, + * temporary files, etc. You can just call jpeg_destroy_(de)compress + * if you're done with the JPEG object, but if you want to clean it up and + * reuse it, call this: + */ +EXTERN(void) jpeg_abort_compress JPP((j_compress_ptr cinfo)); +EXTERN(void) jpeg_abort_decompress JPP((j_decompress_ptr cinfo)); + +/* Generic versions of jpeg_abort and jpeg_destroy that work on either + * flavor of JPEG object. These may be more convenient in some places. + */ +EXTERN(void) jpeg_abort JPP((j_common_ptr cinfo)); +EXTERN(void) jpeg_destroy JPP((j_common_ptr cinfo)); + +/* Default restart-marker-resync procedure for use by data source modules */ +EXTERN(boolean) jpeg_resync_to_restart JPP((j_decompress_ptr cinfo, + int desired)); + + +/* These marker codes are exported since applications and data source modules + * are likely to want to use them. + */ + +#define JPEG_RST0 0xD0 /* RST0 marker code */ +#define JPEG_EOI 0xD9 /* EOI marker code */ +#define JPEG_APP0 0xE0 /* APP0 marker code */ +#define JPEG_COM 0xFE /* COM marker code */ + + +/* If we have a brain-damaged compiler that emits warnings (or worse, errors) + * for structure definitions that are never filled in, keep it quiet by + * supplying dummy definitions for the various substructures. + */ + +#ifdef INCOMPLETE_TYPES_BROKEN +#ifndef JPEG_INTERNALS /* will be defined in jpegint.h */ +struct jvirt_sarray_control { long dummy; }; +struct jvirt_barray_control { long dummy; }; +struct jpeg_comp_master { long dummy; }; +struct jpeg_c_main_controller { long dummy; }; +struct jpeg_c_prep_controller { long dummy; }; +struct jpeg_c_coef_controller { long dummy; }; +struct jpeg_marker_writer { long dummy; }; +struct jpeg_color_converter { long dummy; }; +struct jpeg_downsampler { long dummy; }; +struct jpeg_forward_dct { long dummy; }; +struct jpeg_entropy_encoder { long dummy; }; +struct jpeg_decomp_master { long dummy; }; +struct jpeg_d_main_controller { long dummy; }; +struct jpeg_d_coef_controller { long dummy; }; +struct jpeg_d_post_controller { long dummy; }; +struct jpeg_input_controller { long dummy; }; +struct jpeg_marker_reader { long dummy; }; +struct jpeg_entropy_decoder { long dummy; }; +struct jpeg_inverse_dct { long dummy; }; +struct jpeg_upsampler { long dummy; }; +struct jpeg_color_deconverter { long dummy; }; +struct jpeg_color_quantizer { long dummy; }; +#endif /* JPEG_INTERNALS */ +#endif /* INCOMPLETE_TYPES_BROKEN */ + + +/* + * The JPEG library modules define JPEG_INTERNALS before including this file. + * The internal structure declarations are read only when that is true. + * Applications using the library should not include jpegint.h, but may wish + * to include jerror.h. + */ + +#ifdef JPEG_INTERNALS +#include "jpegint.h" /* fetch private declarations */ +#include "jerror.h" /* fetch error codes too */ +#endif + +#ifdef __cplusplus +#ifndef DONT_USE_EXTERN_C +} +#endif +#endif + +#endif /* JPEGLIB_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jversion.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jversion.h new file mode 100644 index 000000000..5d4915103 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/include/jversion.h @@ -0,0 +1,14 @@ +/* + * jversion.h + * + * Copyright (C) 1991-2012, Thomas G. Lane, Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains software version identification. + */ + + +#define JVERSION "8d 15-Jan-2012" + +#define JCOPYRIGHT "Copyright (C) 2012, Thomas G. Lane, Guido Vollbeding" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jaricom.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jaricom.c new file mode 100644 index 000000000..690068861 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jaricom.c @@ -0,0 +1,153 @@ +/* + * jaricom.c + * + * Developed 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains probability estimation tables for common use in + * arithmetic entropy encoding and decoding routines. + * + * This data represents Table D.3 in the JPEG spec (D.2 in the draft), + * ISO/IEC IS 10918-1 and CCITT Recommendation ITU-T T.81, and Table 24 + * in the JBIG spec, ISO/IEC IS 11544 and CCITT Recommendation ITU-T T.82. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + +/* The following #define specifies the packing of the four components + * into the compact INT32 representation. + * Note that this formula must match the actual arithmetic encoder + * and decoder implementation. The implementation has to be changed + * if this formula is changed. + * The current organization is leaned on Markus Kuhn's JBIG + * implementation (jbig_tab.c). + */ + +#define V(i,a,b,c,d) (((INT32)a << 16) | ((INT32)c << 8) | ((INT32)d << 7) | b) + +const INT32 jpeg_aritab[113+1] = { +/* + * Index, Qe_Value, Next_Index_LPS, Next_Index_MPS, Switch_MPS + */ + V( 0, 0x5a1d, 1, 1, 1 ), + V( 1, 0x2586, 14, 2, 0 ), + V( 2, 0x1114, 16, 3, 0 ), + V( 3, 0x080b, 18, 4, 0 ), + V( 4, 0x03d8, 20, 5, 0 ), + V( 5, 0x01da, 23, 6, 0 ), + V( 6, 0x00e5, 25, 7, 0 ), + V( 7, 0x006f, 28, 8, 0 ), + V( 8, 0x0036, 30, 9, 0 ), + V( 9, 0x001a, 33, 10, 0 ), + V( 10, 0x000d, 35, 11, 0 ), + V( 11, 0x0006, 9, 12, 0 ), + V( 12, 0x0003, 10, 13, 0 ), + V( 13, 0x0001, 12, 13, 0 ), + V( 14, 0x5a7f, 15, 15, 1 ), + V( 15, 0x3f25, 36, 16, 0 ), + V( 16, 0x2cf2, 38, 17, 0 ), + V( 17, 0x207c, 39, 18, 0 ), + V( 18, 0x17b9, 40, 19, 0 ), + V( 19, 0x1182, 42, 20, 0 ), + V( 20, 0x0cef, 43, 21, 0 ), + V( 21, 0x09a1, 45, 22, 0 ), + V( 22, 0x072f, 46, 23, 0 ), + V( 23, 0x055c, 48, 24, 0 ), + V( 24, 0x0406, 49, 25, 0 ), + V( 25, 0x0303, 51, 26, 0 ), + V( 26, 0x0240, 52, 27, 0 ), + V( 27, 0x01b1, 54, 28, 0 ), + V( 28, 0x0144, 56, 29, 0 ), + V( 29, 0x00f5, 57, 30, 0 ), + V( 30, 0x00b7, 59, 31, 0 ), + V( 31, 0x008a, 60, 32, 0 ), + V( 32, 0x0068, 62, 33, 0 ), + V( 33, 0x004e, 63, 34, 0 ), + V( 34, 0x003b, 32, 35, 0 ), + V( 35, 0x002c, 33, 9, 0 ), + V( 36, 0x5ae1, 37, 37, 1 ), + V( 37, 0x484c, 64, 38, 0 ), + V( 38, 0x3a0d, 65, 39, 0 ), + V( 39, 0x2ef1, 67, 40, 0 ), + V( 40, 0x261f, 68, 41, 0 ), + V( 41, 0x1f33, 69, 42, 0 ), + V( 42, 0x19a8, 70, 43, 0 ), + V( 43, 0x1518, 72, 44, 0 ), + V( 44, 0x1177, 73, 45, 0 ), + V( 45, 0x0e74, 74, 46, 0 ), + V( 46, 0x0bfb, 75, 47, 0 ), + V( 47, 0x09f8, 77, 48, 0 ), + V( 48, 0x0861, 78, 49, 0 ), + V( 49, 0x0706, 79, 50, 0 ), + V( 50, 0x05cd, 48, 51, 0 ), + V( 51, 0x04de, 50, 52, 0 ), + V( 52, 0x040f, 50, 53, 0 ), + V( 53, 0x0363, 51, 54, 0 ), + V( 54, 0x02d4, 52, 55, 0 ), + V( 55, 0x025c, 53, 56, 0 ), + V( 56, 0x01f8, 54, 57, 0 ), + V( 57, 0x01a4, 55, 58, 0 ), + V( 58, 0x0160, 56, 59, 0 ), + V( 59, 0x0125, 57, 60, 0 ), + V( 60, 0x00f6, 58, 61, 0 ), + V( 61, 0x00cb, 59, 62, 0 ), + V( 62, 0x00ab, 61, 63, 0 ), + V( 63, 0x008f, 61, 32, 0 ), + V( 64, 0x5b12, 65, 65, 1 ), + V( 65, 0x4d04, 80, 66, 0 ), + V( 66, 0x412c, 81, 67, 0 ), + V( 67, 0x37d8, 82, 68, 0 ), + V( 68, 0x2fe8, 83, 69, 0 ), + V( 69, 0x293c, 84, 70, 0 ), + V( 70, 0x2379, 86, 71, 0 ), + V( 71, 0x1edf, 87, 72, 0 ), + V( 72, 0x1aa9, 87, 73, 0 ), + V( 73, 0x174e, 72, 74, 0 ), + V( 74, 0x1424, 72, 75, 0 ), + V( 75, 0x119c, 74, 76, 0 ), + V( 76, 0x0f6b, 74, 77, 0 ), + V( 77, 0x0d51, 75, 78, 0 ), + V( 78, 0x0bb6, 77, 79, 0 ), + V( 79, 0x0a40, 77, 48, 0 ), + V( 80, 0x5832, 80, 81, 1 ), + V( 81, 0x4d1c, 88, 82, 0 ), + V( 82, 0x438e, 89, 83, 0 ), + V( 83, 0x3bdd, 90, 84, 0 ), + V( 84, 0x34ee, 91, 85, 0 ), + V( 85, 0x2eae, 92, 86, 0 ), + V( 86, 0x299a, 93, 87, 0 ), + V( 87, 0x2516, 86, 71, 0 ), + V( 88, 0x5570, 88, 89, 1 ), + V( 89, 0x4ca9, 95, 90, 0 ), + V( 90, 0x44d9, 96, 91, 0 ), + V( 91, 0x3e22, 97, 92, 0 ), + V( 92, 0x3824, 99, 93, 0 ), + V( 93, 0x32b4, 99, 94, 0 ), + V( 94, 0x2e17, 93, 86, 0 ), + V( 95, 0x56a8, 95, 96, 1 ), + V( 96, 0x4f46, 101, 97, 0 ), + V( 97, 0x47e5, 102, 98, 0 ), + V( 98, 0x41cf, 103, 99, 0 ), + V( 99, 0x3c3d, 104, 100, 0 ), + V( 100, 0x375e, 99, 93, 0 ), + V( 101, 0x5231, 105, 102, 0 ), + V( 102, 0x4c0f, 106, 103, 0 ), + V( 103, 0x4639, 107, 104, 0 ), + V( 104, 0x415e, 103, 99, 0 ), + V( 105, 0x5627, 105, 106, 1 ), + V( 106, 0x50e7, 108, 107, 0 ), + V( 107, 0x4b85, 109, 103, 0 ), + V( 108, 0x5597, 110, 109, 0 ), + V( 109, 0x504f, 111, 107, 0 ), + V( 110, 0x5a10, 110, 111, 1 ), + V( 111, 0x5522, 112, 109, 0 ), + V( 112, 0x59eb, 112, 111, 1 ), +/* + * This last entry is used for fixed probability estimate of 0.5 + * as suggested in Section 10.3 Table 5 of ITU-T Rec. T.851. + */ + V( 113, 0x5a1d, 113, 113, 0 ) +}; diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapimin.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapimin.c new file mode 100644 index 000000000..639ce86f4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapimin.c @@ -0,0 +1,288 @@ +/* + * jcapimin.c + * + * Copyright (C) 1994-1998, Thomas G. Lane. + * Modified 2003-2010 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains application interface code for the compression half + * of the JPEG library. These are the "minimum" API routines that may be + * needed in either the normal full-compression case or the transcoding-only + * case. + * + * Most of the routines intended to be called directly by an application + * are in this file or in jcapistd.c. But also see jcparam.c for + * parameter-setup helper routines, jcomapi.c for routines shared by + * compression and decompression, and jctrans.c for the transcoding case. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Initialization of a JPEG compression object. + * The error manager must already be set up (in case memory manager fails). + */ + +GLOBAL(void) +jpeg_CreateCompress (j_compress_ptr cinfo, int version, size_t structsize) +{ + int i; + + /* Guard against version mismatches between library and caller. */ + cinfo->mem = NULL; /* so jpeg_destroy knows mem mgr not called */ + if (version != JPEG_LIB_VERSION) + ERREXIT2(cinfo, JERR_BAD_LIB_VERSION, JPEG_LIB_VERSION, version); + if (structsize != SIZEOF(struct jpeg_compress_struct)) + ERREXIT2(cinfo, JERR_BAD_STRUCT_SIZE, + (int) SIZEOF(struct jpeg_compress_struct), (int) structsize); + + /* For debugging purposes, we zero the whole master structure. + * But the application has already set the err pointer, and may have set + * client_data, so we have to save and restore those fields. + * Note: if application hasn't set client_data, tools like Purify may + * complain here. + */ + { + struct jpeg_error_mgr * err = cinfo->err; + void * client_data = cinfo->client_data; /* ignore Purify complaint here */ + MEMZERO(cinfo, SIZEOF(struct jpeg_compress_struct)); + cinfo->err = err; + cinfo->client_data = client_data; + } + cinfo->is_decompressor = FALSE; + + /* Initialize a memory manager instance for this object */ + jinit_memory_mgr((j_common_ptr) cinfo); + + /* Zero out pointers to permanent structures. */ + cinfo->progress = NULL; + cinfo->dest = NULL; + + cinfo->comp_info = NULL; + + for (i = 0; i < NUM_QUANT_TBLS; i++) { + cinfo->quant_tbl_ptrs[i] = NULL; + cinfo->q_scale_factor[i] = 100; + } + + for (i = 0; i < NUM_HUFF_TBLS; i++) { + cinfo->dc_huff_tbl_ptrs[i] = NULL; + cinfo->ac_huff_tbl_ptrs[i] = NULL; + } + + /* Must do it here for emit_dqt in case jpeg_write_tables is used */ + cinfo->block_size = DCTSIZE; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + + cinfo->script_space = NULL; + + cinfo->input_gamma = 1.0; /* in case application forgets */ + + /* OK, I'm ready */ + cinfo->global_state = CSTATE_START; +} + + +/* + * Destruction of a JPEG compression object + */ + +GLOBAL(void) +jpeg_destroy_compress (j_compress_ptr cinfo) +{ + jpeg_destroy((j_common_ptr) cinfo); /* use common routine */ +} + + +/* + * Abort processing of a JPEG compression operation, + * but don't destroy the object itself. + */ + +GLOBAL(void) +jpeg_abort_compress (j_compress_ptr cinfo) +{ + jpeg_abort((j_common_ptr) cinfo); /* use common routine */ +} + + +/* + * Forcibly suppress or un-suppress all quantization and Huffman tables. + * Marks all currently defined tables as already written (if suppress) + * or not written (if !suppress). This will control whether they get emitted + * by a subsequent jpeg_start_compress call. + * + * This routine is exported for use by applications that want to produce + * abbreviated JPEG datastreams. It logically belongs in jcparam.c, but + * since it is called by jpeg_start_compress, we put it here --- otherwise + * jcparam.o would be linked whether the application used it or not. + */ + +GLOBAL(void) +jpeg_suppress_tables (j_compress_ptr cinfo, boolean suppress) +{ + int i; + JQUANT_TBL * qtbl; + JHUFF_TBL * htbl; + + for (i = 0; i < NUM_QUANT_TBLS; i++) { + if ((qtbl = cinfo->quant_tbl_ptrs[i]) != NULL) + qtbl->sent_table = suppress; + } + + for (i = 0; i < NUM_HUFF_TBLS; i++) { + if ((htbl = cinfo->dc_huff_tbl_ptrs[i]) != NULL) + htbl->sent_table = suppress; + if ((htbl = cinfo->ac_huff_tbl_ptrs[i]) != NULL) + htbl->sent_table = suppress; + } +} + + +/* + * Finish JPEG compression. + * + * If a multipass operating mode was selected, this may do a great deal of + * work including most of the actual output. + */ + +GLOBAL(void) +jpeg_finish_compress (j_compress_ptr cinfo) +{ + JDIMENSION iMCU_row; + + if (cinfo->global_state == CSTATE_SCANNING || + cinfo->global_state == CSTATE_RAW_OK) { + /* Terminate first pass */ + if (cinfo->next_scanline < cinfo->image_height) + ERREXIT(cinfo, JERR_TOO_LITTLE_DATA); + (*cinfo->master->finish_pass) (cinfo); + } else if (cinfo->global_state != CSTATE_WRCOEFS) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + /* Perform any remaining passes */ + while (! cinfo->master->is_last_pass) { + (*cinfo->master->prepare_for_pass) (cinfo); + for (iMCU_row = 0; iMCU_row < cinfo->total_iMCU_rows; iMCU_row++) { + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) iMCU_row; + cinfo->progress->pass_limit = (long) cinfo->total_iMCU_rows; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + /* We bypass the main controller and invoke coef controller directly; + * all work is being done from the coefficient buffer. + */ + if (! (*cinfo->coef->compress_data) (cinfo, (JSAMPIMAGE) NULL)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); + } + (*cinfo->master->finish_pass) (cinfo); + } + /* Write EOI, do final cleanup */ + (*cinfo->marker->write_file_trailer) (cinfo); + (*cinfo->dest->term_destination) (cinfo); + /* We can use jpeg_abort to release memory and reset global_state */ + jpeg_abort((j_common_ptr) cinfo); +} + + +/* + * Write a special marker. + * This is only recommended for writing COM or APPn markers. + * Must be called after jpeg_start_compress() and before + * first call to jpeg_write_scanlines() or jpeg_write_raw_data(). + */ + +GLOBAL(void) +jpeg_write_marker (j_compress_ptr cinfo, int marker, + const JOCTET *dataptr, unsigned int datalen) +{ + JMETHOD(void, write_marker_byte, (j_compress_ptr info, int val)); + + if (cinfo->next_scanline != 0 || + (cinfo->global_state != CSTATE_SCANNING && + cinfo->global_state != CSTATE_RAW_OK && + cinfo->global_state != CSTATE_WRCOEFS)) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + (*cinfo->marker->write_marker_header) (cinfo, marker, datalen); + write_marker_byte = cinfo->marker->write_marker_byte; /* copy for speed */ + while (datalen--) { + (*write_marker_byte) (cinfo, *dataptr); + dataptr++; + } +} + +/* Same, but piecemeal. */ + +GLOBAL(void) +jpeg_write_m_header (j_compress_ptr cinfo, int marker, unsigned int datalen) +{ + if (cinfo->next_scanline != 0 || + (cinfo->global_state != CSTATE_SCANNING && + cinfo->global_state != CSTATE_RAW_OK && + cinfo->global_state != CSTATE_WRCOEFS)) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + (*cinfo->marker->write_marker_header) (cinfo, marker, datalen); +} + +GLOBAL(void) +jpeg_write_m_byte (j_compress_ptr cinfo, int val) +{ + (*cinfo->marker->write_marker_byte) (cinfo, val); +} + + +/* + * Alternate compression function: just write an abbreviated table file. + * Before calling this, all parameters and a data destination must be set up. + * + * To produce a pair of files containing abbreviated tables and abbreviated + * image data, one would proceed as follows: + * + * initialize JPEG object + * set JPEG parameters + * set destination to table file + * jpeg_write_tables(cinfo); + * set destination to image file + * jpeg_start_compress(cinfo, FALSE); + * write data... + * jpeg_finish_compress(cinfo); + * + * jpeg_write_tables has the side effect of marking all tables written + * (same as jpeg_suppress_tables(..., TRUE)). Thus a subsequent start_compress + * will not re-emit the tables unless it is passed write_all_tables=TRUE. + */ + +GLOBAL(void) +jpeg_write_tables (j_compress_ptr cinfo) +{ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + /* (Re)initialize error mgr and destination modules */ + (*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo); + (*cinfo->dest->init_destination) (cinfo); + /* Initialize the marker writer ... bit of a crock to do it here. */ + jinit_marker_writer(cinfo); + /* Write them tables! */ + (*cinfo->marker->write_tables_only) (cinfo); + /* And clean up. */ + (*cinfo->dest->term_destination) (cinfo); + /* + * In library releases up through v6a, we called jpeg_abort() here to free + * any working memory allocated by the destination manager and marker + * writer. Some applications had a problem with that: they allocated space + * of their own from the library memory manager, and didn't want it to go + * away during write_tables. So now we do nothing. This will cause a + * memory leak if an app calls write_tables repeatedly without doing a full + * compression cycle or otherwise resetting the JPEG object. However, that + * seems less bad than unexpectedly freeing memory in the normal case. + * An app that prefers the old behavior can call jpeg_abort for itself after + * each call to jpeg_write_tables(). + */ +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapistd.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapistd.c new file mode 100644 index 000000000..c0320b1b1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcapistd.c @@ -0,0 +1,161 @@ +/* + * jcapistd.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains application interface code for the compression half + * of the JPEG library. These are the "standard" API routines that are + * used in the normal full-compression case. They are not used by a + * transcoding-only application. Note that if an application links in + * jpeg_start_compress, it will end up linking in the entire compressor. + * We thus must separate this file from jcapimin.c to avoid linking the + * whole compression library into a transcoder. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Compression initialization. + * Before calling this, all parameters and a data destination must be set up. + * + * We require a write_all_tables parameter as a failsafe check when writing + * multiple datastreams from the same compression object. Since prior runs + * will have left all the tables marked sent_table=TRUE, a subsequent run + * would emit an abbreviated stream (no tables) by default. This may be what + * is wanted, but for safety's sake it should not be the default behavior: + * programmers should have to make a deliberate choice to emit abbreviated + * images. Therefore the documentation and examples should encourage people + * to pass write_all_tables=TRUE; then it will take active thought to do the + * wrong thing. + */ + +GLOBAL(void) +jpeg_start_compress (j_compress_ptr cinfo, boolean write_all_tables) +{ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + if (write_all_tables) + jpeg_suppress_tables(cinfo, FALSE); /* mark all tables to be written */ + + /* (Re)initialize error mgr and destination modules */ + (*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo); + (*cinfo->dest->init_destination) (cinfo); + /* Perform master selection of active modules */ + jinit_compress_master(cinfo); + /* Set up for the first pass */ + (*cinfo->master->prepare_for_pass) (cinfo); + /* Ready for application to drive first pass through jpeg_write_scanlines + * or jpeg_write_raw_data. + */ + cinfo->next_scanline = 0; + cinfo->global_state = (cinfo->raw_data_in ? CSTATE_RAW_OK : CSTATE_SCANNING); +} + + +/* + * Write some scanlines of data to the JPEG compressor. + * + * The return value will be the number of lines actually written. + * This should be less than the supplied num_lines only in case that + * the data destination module has requested suspension of the compressor, + * or if more than image_height scanlines are passed in. + * + * Note: we warn about excess calls to jpeg_write_scanlines() since + * this likely signals an application programmer error. However, + * excess scanlines passed in the last valid call are *silently* ignored, + * so that the application need not adjust num_lines for end-of-image + * when using a multiple-scanline buffer. + */ + +GLOBAL(JDIMENSION) +jpeg_write_scanlines (j_compress_ptr cinfo, JSAMPARRAY scanlines, + JDIMENSION num_lines) +{ + JDIMENSION row_ctr, rows_left; + + if (cinfo->global_state != CSTATE_SCANNING) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + if (cinfo->next_scanline >= cinfo->image_height) + WARNMS(cinfo, JWRN_TOO_MUCH_DATA); + + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) cinfo->next_scanline; + cinfo->progress->pass_limit = (long) cinfo->image_height; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + + /* Give master control module another chance if this is first call to + * jpeg_write_scanlines. This lets output of the frame/scan headers be + * delayed so that application can write COM, etc, markers between + * jpeg_start_compress and jpeg_write_scanlines. + */ + if (cinfo->master->call_pass_startup) + (*cinfo->master->pass_startup) (cinfo); + + /* Ignore any extra scanlines at bottom of image. */ + rows_left = cinfo->image_height - cinfo->next_scanline; + if (num_lines > rows_left) + num_lines = rows_left; + + row_ctr = 0; + (*cinfo->main->process_data) (cinfo, scanlines, &row_ctr, num_lines); + cinfo->next_scanline += row_ctr; + return row_ctr; +} + + +/* + * Alternate entry point to write raw data. + * Processes exactly one iMCU row per call, unless suspended. + */ + +GLOBAL(JDIMENSION) +jpeg_write_raw_data (j_compress_ptr cinfo, JSAMPIMAGE data, + JDIMENSION num_lines) +{ + JDIMENSION lines_per_iMCU_row; + + if (cinfo->global_state != CSTATE_RAW_OK) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + if (cinfo->next_scanline >= cinfo->image_height) { + WARNMS(cinfo, JWRN_TOO_MUCH_DATA); + return 0; + } + + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) cinfo->next_scanline; + cinfo->progress->pass_limit = (long) cinfo->image_height; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + + /* Give master control module another chance if this is first call to + * jpeg_write_raw_data. This lets output of the frame/scan headers be + * delayed so that application can write COM, etc, markers between + * jpeg_start_compress and jpeg_write_raw_data. + */ + if (cinfo->master->call_pass_startup) + (*cinfo->master->pass_startup) (cinfo); + + /* Verify that at least one iMCU row has been passed. */ + lines_per_iMCU_row = cinfo->max_v_samp_factor * DCTSIZE; + if (num_lines < lines_per_iMCU_row) + ERREXIT(cinfo, JERR_BUFFER_SIZE); + + /* Directly compress the row. */ + if (! (*cinfo->coef->compress_data) (cinfo, data)) { + /* If compressor did not consume the whole row, suspend processing. */ + return 0; + } + + /* OK, we processed one iMCU row. */ + cinfo->next_scanline += lines_per_iMCU_row; + return lines_per_iMCU_row; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcarith.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcarith.c new file mode 100644 index 000000000..033f67069 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcarith.c @@ -0,0 +1,937 @@ +/* + * jcarith.c + * + * Developed 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains portable arithmetic entropy encoding routines for JPEG + * (implementing the ISO/IEC IS 10918-1 and CCITT Recommendation ITU-T T.81). + * + * Both sequential and progressive modes are supported in this single module. + * + * Suspension is not currently supported in this module. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Expanded entropy encoder object for arithmetic encoding. */ + +typedef struct { + struct jpeg_entropy_encoder pub; /* public fields */ + + INT32 c; /* C register, base of coding interval, layout as in sec. D.1.3 */ + INT32 a; /* A register, normalized size of coding interval */ + INT32 sc; /* counter for stacked 0xFF values which might overflow */ + INT32 zc; /* counter for pending 0x00 output values which might * + * be discarded at the end ("Pacman" termination) */ + int ct; /* bit shift counter, determines when next byte will be written */ + int buffer; /* buffer for most recent output byte != 0xFF */ + + int last_dc_val[MAX_COMPS_IN_SCAN]; /* last DC coef for each component */ + int dc_context[MAX_COMPS_IN_SCAN]; /* context index for DC conditioning */ + + unsigned int restarts_to_go; /* MCUs left in this restart interval */ + int next_restart_num; /* next restart number to write (0-7) */ + + /* Pointers to statistics areas (these workspaces have image lifespan) */ + unsigned char * dc_stats[NUM_ARITH_TBLS]; + unsigned char * ac_stats[NUM_ARITH_TBLS]; + + /* Statistics bin for coding with fixed probability 0.5 */ + unsigned char fixed_bin[4]; +} arith_entropy_encoder; + +typedef arith_entropy_encoder * arith_entropy_ptr; + +/* The following two definitions specify the allocation chunk size + * for the statistics area. + * According to sections F.1.4.4.1.3 and F.1.4.4.2, we need at least + * 49 statistics bins for DC, and 245 statistics bins for AC coding. + * + * We use a compact representation with 1 byte per statistics bin, + * thus the numbers directly represent byte sizes. + * This 1 byte per statistics bin contains the meaning of the MPS + * (more probable symbol) in the highest bit (mask 0x80), and the + * index into the probability estimation state machine table + * in the lower bits (mask 0x7F). + */ + +#define DC_STAT_BINS 64 +#define AC_STAT_BINS 256 + +/* NOTE: Uncomment the following #define if you want to use the + * given formula for calculating the AC conditioning parameter Kx + * for spectral selection progressive coding in section G.1.3.2 + * of the spec (Kx = Kmin + SRL (8 + Se - Kmin) 4). + * Although the spec and P&M authors claim that this "has proven + * to give good results for 8 bit precision samples", I'm not + * convinced yet that this is really beneficial. + * Early tests gave only very marginal compression enhancements + * (a few - around 5 or so - bytes even for very large files), + * which would turn out rather negative if we'd suppress the + * DAC (Define Arithmetic Conditioning) marker segments for + * the default parameters in the future. + * Note that currently the marker writing module emits 12-byte + * DAC segments for a full-component scan in a color image. + * This is not worth worrying about IMHO. However, since the + * spec defines the default values to be used if the tables + * are omitted (unlike Huffman tables, which are required + * anyway), one might optimize this behaviour in the future, + * and then it would be disadvantageous to use custom tables if + * they don't provide sufficient gain to exceed the DAC size. + * + * On the other hand, I'd consider it as a reasonable result + * that the conditioning has no significant influence on the + * compression performance. This means that the basic + * statistical model is already rather stable. + * + * Thus, at the moment, we use the default conditioning values + * anyway, and do not use the custom formula. + * +#define CALCULATE_SPECTRAL_CONDITIONING + */ + +/* IRIGHT_SHIFT is like RIGHT_SHIFT, but works on int rather than INT32. + * We assume that int right shift is unsigned if INT32 right shift is, + * which should be safe. + */ + +#ifdef RIGHT_SHIFT_IS_UNSIGNED +#define ISHIFT_TEMPS int ishift_temp; +#define IRIGHT_SHIFT(x,shft) \ + ((ishift_temp = (x)) < 0 ? \ + (ishift_temp >> (shft)) | ((~0) << (16-(shft))) : \ + (ishift_temp >> (shft))) +#else +#define ISHIFT_TEMPS +#define IRIGHT_SHIFT(x,shft) ((x) >> (shft)) +#endif + + +LOCAL(void) +emit_byte (int val, j_compress_ptr cinfo) +/* Write next output byte; we do not support suspension in this module. */ +{ + struct jpeg_destination_mgr * dest = cinfo->dest; + + *dest->next_output_byte++ = (JOCTET) val; + if (--dest->free_in_buffer == 0) + if (! (*dest->empty_output_buffer) (cinfo)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); +} + + +/* + * Finish up at the end of an arithmetic-compressed scan. + */ + +METHODDEF(void) +finish_pass (j_compress_ptr cinfo) +{ + arith_entropy_ptr e = (arith_entropy_ptr) cinfo->entropy; + INT32 temp; + + /* Section D.1.8: Termination of encoding */ + + /* Find the e->c in the coding interval with the largest + * number of trailing zero bits */ + if ((temp = (e->a - 1 + e->c) & 0xFFFF0000L) < e->c) + e->c = temp + 0x8000L; + else + e->c = temp; + /* Send remaining bytes to output */ + e->c <<= e->ct; + if (e->c & 0xF8000000L) { + /* One final overflow has to be handled */ + if (e->buffer >= 0) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + emit_byte(e->buffer + 1, cinfo); + if (e->buffer + 1 == 0xFF) + emit_byte(0x00, cinfo); + } + e->zc += e->sc; /* carry-over converts stacked 0xFF bytes to 0x00 */ + e->sc = 0; + } else { + if (e->buffer == 0) + ++e->zc; + else if (e->buffer >= 0) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + emit_byte(e->buffer, cinfo); + } + if (e->sc) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + do { + emit_byte(0xFF, cinfo); + emit_byte(0x00, cinfo); + } while (--e->sc); + } + } + /* Output final bytes only if they are not 0x00 */ + if (e->c & 0x7FFF800L) { + if (e->zc) /* output final pending zero bytes */ + do emit_byte(0x00, cinfo); + while (--e->zc); + emit_byte((e->c >> 19) & 0xFF, cinfo); + if (((e->c >> 19) & 0xFF) == 0xFF) + emit_byte(0x00, cinfo); + if (e->c & 0x7F800L) { + emit_byte((e->c >> 11) & 0xFF, cinfo); + if (((e->c >> 11) & 0xFF) == 0xFF) + emit_byte(0x00, cinfo); + } + } +} + + +/* + * The core arithmetic encoding routine (common in JPEG and JBIG). + * This needs to go as fast as possible. + * Machine-dependent optimization facilities + * are not utilized in this portable implementation. + * However, this code should be fairly efficient and + * may be a good base for further optimizations anyway. + * + * Parameter 'val' to be encoded may be 0 or 1 (binary decision). + * + * Note: I've added full "Pacman" termination support to the + * byte output routines, which is equivalent to the optional + * Discard_final_zeros procedure (Figure D.15) in the spec. + * Thus, we always produce the shortest possible output + * stream compliant to the spec (no trailing zero bytes, + * except for FF stuffing). + * + * I've also introduced a new scheme for accessing + * the probability estimation state machine table, + * derived from Markus Kuhn's JBIG implementation. + */ + +LOCAL(void) +arith_encode (j_compress_ptr cinfo, unsigned char *st, int val) +{ + register arith_entropy_ptr e = (arith_entropy_ptr) cinfo->entropy; + register unsigned char nl, nm; + register INT32 qe, temp; + register int sv; + + /* Fetch values from our compact representation of Table D.3(D.2): + * Qe values and probability estimation state machine + */ + sv = *st; + qe = jpeg_aritab[sv & 0x7F]; /* => Qe_Value */ + nl = qe & 0xFF; qe >>= 8; /* Next_Index_LPS + Switch_MPS */ + nm = qe & 0xFF; qe >>= 8; /* Next_Index_MPS */ + + /* Encode & estimation procedures per sections D.1.4 & D.1.5 */ + e->a -= qe; + if (val != (sv >> 7)) { + /* Encode the less probable symbol */ + if (e->a >= qe) { + /* If the interval size (qe) for the less probable symbol (LPS) + * is larger than the interval size for the MPS, then exchange + * the two symbols for coding efficiency, otherwise code the LPS + * as usual: */ + e->c += e->a; + e->a = qe; + } + *st = (sv & 0x80) ^ nl; /* Estimate_after_LPS */ + } else { + /* Encode the more probable symbol */ + if (e->a >= 0x8000L) + return; /* A >= 0x8000 -> ready, no renormalization required */ + if (e->a < qe) { + /* If the interval size (qe) for the less probable symbol (LPS) + * is larger than the interval size for the MPS, then exchange + * the two symbols for coding efficiency: */ + e->c += e->a; + e->a = qe; + } + *st = (sv & 0x80) ^ nm; /* Estimate_after_MPS */ + } + + /* Renormalization & data output per section D.1.6 */ + do { + e->a <<= 1; + e->c <<= 1; + if (--e->ct == 0) { + /* Another byte is ready for output */ + temp = e->c >> 19; + if (temp > 0xFF) { + /* Handle overflow over all stacked 0xFF bytes */ + if (e->buffer >= 0) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + emit_byte(e->buffer + 1, cinfo); + if (e->buffer + 1 == 0xFF) + emit_byte(0x00, cinfo); + } + e->zc += e->sc; /* carry-over converts stacked 0xFF bytes to 0x00 */ + e->sc = 0; + /* Note: The 3 spacer bits in the C register guarantee + * that the new buffer byte can't be 0xFF here + * (see page 160 in the P&M JPEG book). */ + e->buffer = temp & 0xFF; /* new output byte, might overflow later */ + } else if (temp == 0xFF) { + ++e->sc; /* stack 0xFF byte (which might overflow later) */ + } else { + /* Output all stacked 0xFF bytes, they will not overflow any more */ + if (e->buffer == 0) + ++e->zc; + else if (e->buffer >= 0) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + emit_byte(e->buffer, cinfo); + } + if (e->sc) { + if (e->zc) + do emit_byte(0x00, cinfo); + while (--e->zc); + do { + emit_byte(0xFF, cinfo); + emit_byte(0x00, cinfo); + } while (--e->sc); + } + e->buffer = temp & 0xFF; /* new output byte (can still overflow) */ + } + e->c &= 0x7FFFFL; + e->ct += 8; + } + } while (e->a < 0x8000L); +} + + +/* + * Emit a restart marker & resynchronize predictions. + */ + +LOCAL(void) +emit_restart (j_compress_ptr cinfo, int restart_num) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + int ci; + jpeg_component_info * compptr; + + finish_pass(cinfo); + + emit_byte(0xFF, cinfo); + emit_byte(JPEG_RST0 + restart_num, cinfo); + + /* Re-initialize statistics areas */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) { + MEMZERO(entropy->dc_stats[compptr->dc_tbl_no], DC_STAT_BINS); + /* Reset DC predictions to 0 */ + entropy->last_dc_val[ci] = 0; + entropy->dc_context[ci] = 0; + } + /* AC needs no table when not present */ + if (cinfo->Se) { + MEMZERO(entropy->ac_stats[compptr->ac_tbl_no], AC_STAT_BINS); + } + } + + /* Reset arithmetic encoding variables */ + entropy->c = 0; + entropy->a = 0x10000L; + entropy->sc = 0; + entropy->zc = 0; + entropy->ct = 11; + entropy->buffer = -1; /* empty */ +} + + +/* + * MCU encoding for DC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +encode_mcu_DC_first (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + unsigned char *st; + int blkn, ci, tbl; + int v, v2, m; + ISHIFT_TEMPS + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + emit_restart(cinfo, entropy->next_restart_num); + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + tbl = cinfo->cur_comp_info[ci]->dc_tbl_no; + + /* Compute the DC value after the required point transform by Al. + * This is simply an arithmetic right shift. + */ + m = IRIGHT_SHIFT((int) ((*block)[0]), cinfo->Al); + + /* Sections F.1.4.1 & F.1.4.4.1: Encoding of DC coefficients */ + + /* Table F.4: Point to statistics bin S0 for DC coefficient coding */ + st = entropy->dc_stats[tbl] + entropy->dc_context[ci]; + + /* Figure F.4: Encode_DC_DIFF */ + if ((v = m - entropy->last_dc_val[ci]) == 0) { + arith_encode(cinfo, st, 0); + entropy->dc_context[ci] = 0; /* zero diff category */ + } else { + entropy->last_dc_val[ci] = m; + arith_encode(cinfo, st, 1); + /* Figure F.6: Encoding nonzero value v */ + /* Figure F.7: Encoding the sign of v */ + if (v > 0) { + arith_encode(cinfo, st + 1, 0); /* Table F.4: SS = S0 + 1 */ + st += 2; /* Table F.4: SP = S0 + 2 */ + entropy->dc_context[ci] = 4; /* small positive diff category */ + } else { + v = -v; + arith_encode(cinfo, st + 1, 1); /* Table F.4: SS = S0 + 1 */ + st += 3; /* Table F.4: SN = S0 + 3 */ + entropy->dc_context[ci] = 8; /* small negative diff category */ + } + /* Figure F.8: Encoding the magnitude category of v */ + m = 0; + if (v -= 1) { + arith_encode(cinfo, st, 1); + m = 1; + v2 = v; + st = entropy->dc_stats[tbl] + 20; /* Table F.4: X1 = 20 */ + while (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st += 1; + } + } + arith_encode(cinfo, st, 0); + /* Section F.1.4.4.1.2: Establish dc_context conditioning category */ + if (m < (int) ((1L << cinfo->arith_dc_L[tbl]) >> 1)) + entropy->dc_context[ci] = 0; /* zero diff category */ + else if (m > (int) ((1L << cinfo->arith_dc_U[tbl]) >> 1)) + entropy->dc_context[ci] += 8; /* large diff category */ + /* Figure F.9: Encoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + arith_encode(cinfo, st, (m & v) ? 1 : 0); + } + } + + return TRUE; +} + + +/* + * MCU encoding for AC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +encode_mcu_AC_first (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + unsigned char *st; + int tbl, k, ke; + int v, v2, m; + const int * natural_order; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + emit_restart(cinfo, entropy->next_restart_num); + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + natural_order = cinfo->natural_order; + + /* Encode the MCU data block */ + block = MCU_data[0]; + tbl = cinfo->cur_comp_info[0]->ac_tbl_no; + + /* Sections F.1.4.2 & F.1.4.4.2: Encoding of AC coefficients */ + + /* Establish EOB (end-of-block) index */ + for (ke = cinfo->Se; ke > 0; ke--) + /* We must apply the point transform by Al. For AC coefficients this + * is an integer division with rounding towards 0. To do this portably + * in C, we shift after obtaining the absolute value. + */ + if ((v = (*block)[natural_order[ke]]) >= 0) { + if (v >>= cinfo->Al) break; + } else { + v = -v; + if (v >>= cinfo->Al) break; + } + + /* Figure F.5: Encode_AC_Coefficients */ + for (k = cinfo->Ss; k <= ke; k++) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + arith_encode(cinfo, st, 0); /* EOB decision */ + for (;;) { + if ((v = (*block)[natural_order[k]]) >= 0) { + if (v >>= cinfo->Al) { + arith_encode(cinfo, st + 1, 1); + arith_encode(cinfo, entropy->fixed_bin, 0); + break; + } + } else { + v = -v; + if (v >>= cinfo->Al) { + arith_encode(cinfo, st + 1, 1); + arith_encode(cinfo, entropy->fixed_bin, 1); + break; + } + } + arith_encode(cinfo, st + 1, 0); st += 3; k++; + } + st += 2; + /* Figure F.8: Encoding the magnitude category of v */ + m = 0; + if (v -= 1) { + arith_encode(cinfo, st, 1); + m = 1; + v2 = v; + if (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st = entropy->ac_stats[tbl] + + (k <= cinfo->arith_ac_K[tbl] ? 189 : 217); + while (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st += 1; + } + } + } + arith_encode(cinfo, st, 0); + /* Figure F.9: Encoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + arith_encode(cinfo, st, (m & v) ? 1 : 0); + } + /* Encode EOB decision only if k <= cinfo->Se */ + if (k <= cinfo->Se) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + arith_encode(cinfo, st, 1); + } + + return TRUE; +} + + +/* + * MCU encoding for DC successive approximation refinement scan. + */ + +METHODDEF(boolean) +encode_mcu_DC_refine (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + unsigned char *st; + int Al, blkn; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + emit_restart(cinfo, entropy->next_restart_num); + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + st = entropy->fixed_bin; /* use fixed probability estimation */ + Al = cinfo->Al; + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + /* We simply emit the Al'th bit of the DC coefficient value. */ + arith_encode(cinfo, st, (MCU_data[blkn][0][0] >> Al) & 1); + } + + return TRUE; +} + + +/* + * MCU encoding for AC successive approximation refinement scan. + */ + +METHODDEF(boolean) +encode_mcu_AC_refine (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + unsigned char *st; + int tbl, k, ke, kex; + int v; + const int * natural_order; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + emit_restart(cinfo, entropy->next_restart_num); + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + natural_order = cinfo->natural_order; + + /* Encode the MCU data block */ + block = MCU_data[0]; + tbl = cinfo->cur_comp_info[0]->ac_tbl_no; + + /* Section G.1.3.3: Encoding of AC coefficients */ + + /* Establish EOB (end-of-block) index */ + for (ke = cinfo->Se; ke > 0; ke--) + /* We must apply the point transform by Al. For AC coefficients this + * is an integer division with rounding towards 0. To do this portably + * in C, we shift after obtaining the absolute value. + */ + if ((v = (*block)[natural_order[ke]]) >= 0) { + if (v >>= cinfo->Al) break; + } else { + v = -v; + if (v >>= cinfo->Al) break; + } + + /* Establish EOBx (previous stage end-of-block) index */ + for (kex = ke; kex > 0; kex--) + if ((v = (*block)[natural_order[kex]]) >= 0) { + if (v >>= cinfo->Ah) break; + } else { + v = -v; + if (v >>= cinfo->Ah) break; + } + + /* Figure G.10: Encode_AC_Coefficients_SA */ + for (k = cinfo->Ss; k <= ke; k++) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + if (k > kex) + arith_encode(cinfo, st, 0); /* EOB decision */ + for (;;) { + if ((v = (*block)[natural_order[k]]) >= 0) { + if (v >>= cinfo->Al) { + if (v >> 1) /* previously nonzero coef */ + arith_encode(cinfo, st + 2, (v & 1)); + else { /* newly nonzero coef */ + arith_encode(cinfo, st + 1, 1); + arith_encode(cinfo, entropy->fixed_bin, 0); + } + break; + } + } else { + v = -v; + if (v >>= cinfo->Al) { + if (v >> 1) /* previously nonzero coef */ + arith_encode(cinfo, st + 2, (v & 1)); + else { /* newly nonzero coef */ + arith_encode(cinfo, st + 1, 1); + arith_encode(cinfo, entropy->fixed_bin, 1); + } + break; + } + } + arith_encode(cinfo, st + 1, 0); st += 3; k++; + } + } + /* Encode EOB decision only if k <= cinfo->Se */ + if (k <= cinfo->Se) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + arith_encode(cinfo, st, 1); + } + + return TRUE; +} + + +/* + * Encode and output one MCU's worth of arithmetic-compressed coefficients. + */ + +METHODDEF(boolean) +encode_mcu (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + jpeg_component_info * compptr; + JBLOCKROW block; + unsigned char *st; + int blkn, ci, tbl, k, ke; + int v, v2, m; + const int * natural_order; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + emit_restart(cinfo, entropy->next_restart_num); + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + natural_order = cinfo->natural_order; + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + + /* Sections F.1.4.1 & F.1.4.4.1: Encoding of DC coefficients */ + + tbl = compptr->dc_tbl_no; + + /* Table F.4: Point to statistics bin S0 for DC coefficient coding */ + st = entropy->dc_stats[tbl] + entropy->dc_context[ci]; + + /* Figure F.4: Encode_DC_DIFF */ + if ((v = (*block)[0] - entropy->last_dc_val[ci]) == 0) { + arith_encode(cinfo, st, 0); + entropy->dc_context[ci] = 0; /* zero diff category */ + } else { + entropy->last_dc_val[ci] = (*block)[0]; + arith_encode(cinfo, st, 1); + /* Figure F.6: Encoding nonzero value v */ + /* Figure F.7: Encoding the sign of v */ + if (v > 0) { + arith_encode(cinfo, st + 1, 0); /* Table F.4: SS = S0 + 1 */ + st += 2; /* Table F.4: SP = S0 + 2 */ + entropy->dc_context[ci] = 4; /* small positive diff category */ + } else { + v = -v; + arith_encode(cinfo, st + 1, 1); /* Table F.4: SS = S0 + 1 */ + st += 3; /* Table F.4: SN = S0 + 3 */ + entropy->dc_context[ci] = 8; /* small negative diff category */ + } + /* Figure F.8: Encoding the magnitude category of v */ + m = 0; + if (v -= 1) { + arith_encode(cinfo, st, 1); + m = 1; + v2 = v; + st = entropy->dc_stats[tbl] + 20; /* Table F.4: X1 = 20 */ + while (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st += 1; + } + } + arith_encode(cinfo, st, 0); + /* Section F.1.4.4.1.2: Establish dc_context conditioning category */ + if (m < (int) ((1L << cinfo->arith_dc_L[tbl]) >> 1)) + entropy->dc_context[ci] = 0; /* zero diff category */ + else if (m > (int) ((1L << cinfo->arith_dc_U[tbl]) >> 1)) + entropy->dc_context[ci] += 8; /* large diff category */ + /* Figure F.9: Encoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + arith_encode(cinfo, st, (m & v) ? 1 : 0); + } + + /* Sections F.1.4.2 & F.1.4.4.2: Encoding of AC coefficients */ + + if ((ke = cinfo->lim_Se) == 0) continue; + tbl = compptr->ac_tbl_no; + + /* Establish EOB (end-of-block) index */ + do { + if ((*block)[natural_order[ke]]) break; + } while (--ke); + + /* Figure F.5: Encode_AC_Coefficients */ + for (k = 0; k < ke;) { + st = entropy->ac_stats[tbl] + 3 * k; + arith_encode(cinfo, st, 0); /* EOB decision */ + while ((v = (*block)[natural_order[++k]]) == 0) { + arith_encode(cinfo, st + 1, 0); + st += 3; + } + arith_encode(cinfo, st + 1, 1); + /* Figure F.6: Encoding nonzero value v */ + /* Figure F.7: Encoding the sign of v */ + if (v > 0) { + arith_encode(cinfo, entropy->fixed_bin, 0); + } else { + v = -v; + arith_encode(cinfo, entropy->fixed_bin, 1); + } + st += 2; + /* Figure F.8: Encoding the magnitude category of v */ + m = 0; + if (v -= 1) { + arith_encode(cinfo, st, 1); + m = 1; + v2 = v; + if (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st = entropy->ac_stats[tbl] + + (k <= cinfo->arith_ac_K[tbl] ? 189 : 217); + while (v2 >>= 1) { + arith_encode(cinfo, st, 1); + m <<= 1; + st += 1; + } + } + } + arith_encode(cinfo, st, 0); + /* Figure F.9: Encoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + arith_encode(cinfo, st, (m & v) ? 1 : 0); + } + /* Encode EOB decision only if k < cinfo->lim_Se */ + if (k < cinfo->lim_Se) { + st = entropy->ac_stats[tbl] + 3 * k; + arith_encode(cinfo, st, 1); + } + } + + return TRUE; +} + + +/* + * Initialize for an arithmetic-compressed scan. + */ + +METHODDEF(void) +start_pass (j_compress_ptr cinfo, boolean gather_statistics) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + int ci, tbl; + jpeg_component_info * compptr; + + if (gather_statistics) + /* Make sure to avoid that in the master control logic! + * We are fully adaptive here and need no extra + * statistics gathering pass! + */ + ERREXIT(cinfo, JERR_NOT_COMPILED); + + /* We assume jcmaster.c already validated the progressive scan parameters. */ + + /* Select execution routines */ + if (cinfo->progressive_mode) { + if (cinfo->Ah == 0) { + if (cinfo->Ss == 0) + entropy->pub.encode_mcu = encode_mcu_DC_first; + else + entropy->pub.encode_mcu = encode_mcu_AC_first; + } else { + if (cinfo->Ss == 0) + entropy->pub.encode_mcu = encode_mcu_DC_refine; + else + entropy->pub.encode_mcu = encode_mcu_AC_refine; + } + } else + entropy->pub.encode_mcu = encode_mcu; + + /* Allocate & initialize requested statistics areas */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) { + tbl = compptr->dc_tbl_no; + if (tbl < 0 || tbl >= NUM_ARITH_TBLS) + ERREXIT1(cinfo, JERR_NO_ARITH_TABLE, tbl); + if (entropy->dc_stats[tbl] == NULL) + entropy->dc_stats[tbl] = (unsigned char *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, DC_STAT_BINS); + MEMZERO(entropy->dc_stats[tbl], DC_STAT_BINS); + /* Initialize DC predictions to 0 */ + entropy->last_dc_val[ci] = 0; + entropy->dc_context[ci] = 0; + } + /* AC needs no table when not present */ + if (cinfo->Se) { + tbl = compptr->ac_tbl_no; + if (tbl < 0 || tbl >= NUM_ARITH_TBLS) + ERREXIT1(cinfo, JERR_NO_ARITH_TABLE, tbl); + if (entropy->ac_stats[tbl] == NULL) + entropy->ac_stats[tbl] = (unsigned char *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, AC_STAT_BINS); + MEMZERO(entropy->ac_stats[tbl], AC_STAT_BINS); +#ifdef CALCULATE_SPECTRAL_CONDITIONING + if (cinfo->progressive_mode) + /* Section G.1.3.2: Set appropriate arithmetic conditioning value Kx */ + cinfo->arith_ac_K[tbl] = cinfo->Ss + ((8 + cinfo->Se - cinfo->Ss) >> 4); +#endif + } + } + + /* Initialize arithmetic encoding variables */ + entropy->c = 0; + entropy->a = 0x10000L; + entropy->sc = 0; + entropy->zc = 0; + entropy->ct = 11; + entropy->buffer = -1; /* empty */ + + /* Initialize restart stuff */ + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num = 0; +} + + +/* + * Module initialization routine for arithmetic entropy encoding. + */ + +GLOBAL(void) +jinit_arith_encoder (j_compress_ptr cinfo) +{ + arith_entropy_ptr entropy; + int i; + + entropy = (arith_entropy_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(arith_entropy_encoder)); + cinfo->entropy = (struct jpeg_entropy_encoder *) entropy; + entropy->pub.start_pass = start_pass; + entropy->pub.finish_pass = finish_pass; + + /* Mark tables unallocated */ + for (i = 0; i < NUM_ARITH_TBLS; i++) { + entropy->dc_stats[i] = NULL; + entropy->ac_stats[i] = NULL; + } + + /* Initialize index for fixed probability estimation */ + entropy->fixed_bin[0] = 113; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccoefct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccoefct.c new file mode 100644 index 000000000..924a703dd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccoefct.c @@ -0,0 +1,454 @@ +/* + * jccoefct.c + * + * Copyright (C) 1994-1997, Thomas G. Lane. + * Modified 2003-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the coefficient buffer controller for compression. + * This controller is the top level of the JPEG compressor proper. + * The coefficient buffer lies between forward-DCT and entropy encoding steps. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* We use a full-image coefficient buffer when doing Huffman optimization, + * and also for writing multiple-scan JPEG files. In all cases, the DCT + * step is run during the first pass, and subsequent passes need only read + * the buffered coefficients. + */ +#ifdef ENTROPY_OPT_SUPPORTED +#define FULL_COEF_BUFFER_SUPPORTED +#else +#ifdef C_MULTISCAN_FILES_SUPPORTED +#define FULL_COEF_BUFFER_SUPPORTED +#endif +#endif + + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_c_coef_controller pub; /* public fields */ + + JDIMENSION iMCU_row_num; /* iMCU row # within image */ + JDIMENSION mcu_ctr; /* counts MCUs processed in current row */ + int MCU_vert_offset; /* counts MCU rows within iMCU row */ + int MCU_rows_per_iMCU_row; /* number of such rows needed */ + + /* For single-pass compression, it's sufficient to buffer just one MCU + * (although this may prove a bit slow in practice). We allocate a + * workspace of C_MAX_BLOCKS_IN_MCU coefficient blocks, and reuse it for each + * MCU constructed and sent. (On 80x86, the workspace is FAR even though + * it's not really very big; this is to keep the module interfaces unchanged + * when a large coefficient buffer is necessary.) + * In multi-pass modes, this array points to the current MCU's blocks + * within the virtual arrays. + */ + JBLOCKROW MCU_buffer[C_MAX_BLOCKS_IN_MCU]; + + /* In multi-pass modes, we need a virtual block array for each component. */ + jvirt_barray_ptr whole_image[MAX_COMPONENTS]; +} my_coef_controller; + +typedef my_coef_controller * my_coef_ptr; + + +/* Forward declarations */ +METHODDEF(boolean) compress_data + JPP((j_compress_ptr cinfo, JSAMPIMAGE input_buf)); +#ifdef FULL_COEF_BUFFER_SUPPORTED +METHODDEF(boolean) compress_first_pass + JPP((j_compress_ptr cinfo, JSAMPIMAGE input_buf)); +METHODDEF(boolean) compress_output + JPP((j_compress_ptr cinfo, JSAMPIMAGE input_buf)); +#endif + + +LOCAL(void) +start_iMCU_row (j_compress_ptr cinfo) +/* Reset within-iMCU-row counters for a new row */ +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + /* In an interleaved scan, an MCU row is the same as an iMCU row. + * In a noninterleaved scan, an iMCU row has v_samp_factor MCU rows. + * But at the bottom of the image, process only what's left. + */ + if (cinfo->comps_in_scan > 1) { + coef->MCU_rows_per_iMCU_row = 1; + } else { + if (coef->iMCU_row_num < (cinfo->total_iMCU_rows-1)) + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->v_samp_factor; + else + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->last_row_height; + } + + coef->mcu_ctr = 0; + coef->MCU_vert_offset = 0; +} + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_coef (j_compress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + coef->iMCU_row_num = 0; + start_iMCU_row(cinfo); + + switch (pass_mode) { + case JBUF_PASS_THRU: + if (coef->whole_image[0] != NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + coef->pub.compress_data = compress_data; + break; +#ifdef FULL_COEF_BUFFER_SUPPORTED + case JBUF_SAVE_AND_PASS: + if (coef->whole_image[0] == NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + coef->pub.compress_data = compress_first_pass; + break; + case JBUF_CRANK_DEST: + if (coef->whole_image[0] == NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + coef->pub.compress_data = compress_output; + break; +#endif + default: + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + break; + } +} + + +/* + * Process some data in the single-pass case. + * We process the equivalent of one fully interleaved MCU row ("iMCU" row) + * per call, ie, v_samp_factor block rows for each component in the image. + * Returns TRUE if the iMCU row is completed, FALSE if suspended. + * + * NB: input_buf contains a plane for each component in image, + * which we index according to the component's SOF position. + */ + +METHODDEF(boolean) +compress_data (j_compress_ptr cinfo, JSAMPIMAGE input_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION MCU_col_num; /* index of current MCU within row */ + JDIMENSION last_MCU_col = cinfo->MCUs_per_row - 1; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + int blkn, bi, ci, yindex, yoffset, blockcnt; + JDIMENSION ypos, xpos; + jpeg_component_info *compptr; + forward_DCT_ptr forward_DCT; + + /* Loop to write as much as one whole iMCU row */ + for (yoffset = coef->MCU_vert_offset; yoffset < coef->MCU_rows_per_iMCU_row; + yoffset++) { + for (MCU_col_num = coef->mcu_ctr; MCU_col_num <= last_MCU_col; + MCU_col_num++) { + /* Determine where data comes from in input_buf and do the DCT thing. + * Each call on forward_DCT processes a horizontal row of DCT blocks + * as wide as an MCU; we rely on having allocated the MCU_buffer[] blocks + * sequentially. Dummy blocks at the right or bottom edge are filled in + * specially. The data in them does not matter for image reconstruction, + * so we fill them with values that will encode to the smallest amount of + * data, viz: all zeroes in the AC entries, DC entries equal to previous + * block's DC value. (Thanks to Thomas Kinsman for this idea.) + */ + blkn = 0; + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + forward_DCT = cinfo->fdct->forward_DCT[compptr->component_index]; + blockcnt = (MCU_col_num < last_MCU_col) ? compptr->MCU_width + : compptr->last_col_width; + xpos = MCU_col_num * compptr->MCU_sample_width; + ypos = yoffset * compptr->DCT_v_scaled_size; + /* ypos == (yoffset+yindex) * DCTSIZE */ + for (yindex = 0; yindex < compptr->MCU_height; yindex++) { + if (coef->iMCU_row_num < last_iMCU_row || + yoffset+yindex < compptr->last_row_height) { + (*forward_DCT) (cinfo, compptr, + input_buf[compptr->component_index], + coef->MCU_buffer[blkn], + ypos, xpos, (JDIMENSION) blockcnt); + if (blockcnt < compptr->MCU_width) { + /* Create some dummy blocks at the right edge of the image. */ + FMEMZERO((void FAR *) coef->MCU_buffer[blkn + blockcnt], + (compptr->MCU_width - blockcnt) * SIZEOF(JBLOCK)); + for (bi = blockcnt; bi < compptr->MCU_width; bi++) { + coef->MCU_buffer[blkn+bi][0][0] = coef->MCU_buffer[blkn+bi-1][0][0]; + } + } + } else { + /* Create a row of dummy blocks at the bottom of the image. */ + FMEMZERO((void FAR *) coef->MCU_buffer[blkn], + compptr->MCU_width * SIZEOF(JBLOCK)); + for (bi = 0; bi < compptr->MCU_width; bi++) { + coef->MCU_buffer[blkn+bi][0][0] = coef->MCU_buffer[blkn-1][0][0]; + } + } + blkn += compptr->MCU_width; + ypos += compptr->DCT_v_scaled_size; + } + } + /* Try to write the MCU. In event of a suspension failure, we will + * re-DCT the MCU on restart (a bit inefficient, could be fixed...) + */ + if (! (*cinfo->entropy->encode_mcu) (cinfo, coef->MCU_buffer)) { + /* Suspension forced; update state counters and exit */ + coef->MCU_vert_offset = yoffset; + coef->mcu_ctr = MCU_col_num; + return FALSE; + } + } + /* Completed an MCU row, but perhaps not an iMCU row */ + coef->mcu_ctr = 0; + } + /* Completed the iMCU row, advance counters for next one */ + coef->iMCU_row_num++; + start_iMCU_row(cinfo); + return TRUE; +} + + +#ifdef FULL_COEF_BUFFER_SUPPORTED + +/* + * Process some data in the first pass of a multi-pass case. + * We process the equivalent of one fully interleaved MCU row ("iMCU" row) + * per call, ie, v_samp_factor block rows for each component in the image. + * This amount of data is read from the source buffer, DCT'd and quantized, + * and saved into the virtual arrays. We also generate suitable dummy blocks + * as needed at the right and lower edges. (The dummy blocks are constructed + * in the virtual arrays, which have been padded appropriately.) This makes + * it possible for subsequent passes not to worry about real vs. dummy blocks. + * + * We must also emit the data to the entropy encoder. This is conveniently + * done by calling compress_output() after we've loaded the current strip + * of the virtual arrays. + * + * NB: input_buf contains a plane for each component in image. All + * components are DCT'd and loaded into the virtual arrays in this pass. + * However, it may be that only a subset of the components are emitted to + * the entropy encoder during this first pass; be careful about looking + * at the scan-dependent variables (MCU dimensions, etc). + */ + +METHODDEF(boolean) +compress_first_pass (j_compress_ptr cinfo, JSAMPIMAGE input_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + JDIMENSION blocks_across, MCUs_across, MCUindex; + int bi, ci, h_samp_factor, block_row, block_rows, ndummy; + JCOEF lastDC; + jpeg_component_info *compptr; + JBLOCKARRAY buffer; + JBLOCKROW thisblockrow, lastblockrow; + forward_DCT_ptr forward_DCT; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Align the virtual buffer for this component. */ + buffer = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[ci], + coef->iMCU_row_num * compptr->v_samp_factor, + (JDIMENSION) compptr->v_samp_factor, TRUE); + /* Count non-dummy DCT block rows in this iMCU row. */ + if (coef->iMCU_row_num < last_iMCU_row) + block_rows = compptr->v_samp_factor; + else { + /* NB: can't use last_row_height here, since may not be set! */ + block_rows = (int) (compptr->height_in_blocks % compptr->v_samp_factor); + if (block_rows == 0) block_rows = compptr->v_samp_factor; + } + blocks_across = compptr->width_in_blocks; + h_samp_factor = compptr->h_samp_factor; + /* Count number of dummy blocks to be added at the right margin. */ + ndummy = (int) (blocks_across % h_samp_factor); + if (ndummy > 0) + ndummy = h_samp_factor - ndummy; + forward_DCT = cinfo->fdct->forward_DCT[ci]; + /* Perform DCT for all non-dummy blocks in this iMCU row. Each call + * on forward_DCT processes a complete horizontal row of DCT blocks. + */ + for (block_row = 0; block_row < block_rows; block_row++) { + thisblockrow = buffer[block_row]; + (*forward_DCT) (cinfo, compptr, input_buf[ci], thisblockrow, + (JDIMENSION) (block_row * compptr->DCT_v_scaled_size), + (JDIMENSION) 0, blocks_across); + if (ndummy > 0) { + /* Create dummy blocks at the right edge of the image. */ + thisblockrow += blocks_across; /* => first dummy block */ + FMEMZERO((void FAR *) thisblockrow, ndummy * SIZEOF(JBLOCK)); + lastDC = thisblockrow[-1][0]; + for (bi = 0; bi < ndummy; bi++) { + thisblockrow[bi][0] = lastDC; + } + } + } + /* If at end of image, create dummy block rows as needed. + * The tricky part here is that within each MCU, we want the DC values + * of the dummy blocks to match the last real block's DC value. + * This squeezes a few more bytes out of the resulting file... + */ + if (coef->iMCU_row_num == last_iMCU_row) { + blocks_across += ndummy; /* include lower right corner */ + MCUs_across = blocks_across / h_samp_factor; + for (block_row = block_rows; block_row < compptr->v_samp_factor; + block_row++) { + thisblockrow = buffer[block_row]; + lastblockrow = buffer[block_row-1]; + FMEMZERO((void FAR *) thisblockrow, + (size_t) (blocks_across * SIZEOF(JBLOCK))); + for (MCUindex = 0; MCUindex < MCUs_across; MCUindex++) { + lastDC = lastblockrow[h_samp_factor-1][0]; + for (bi = 0; bi < h_samp_factor; bi++) { + thisblockrow[bi][0] = lastDC; + } + thisblockrow += h_samp_factor; /* advance to next MCU in row */ + lastblockrow += h_samp_factor; + } + } + } + } + /* NB: compress_output will increment iMCU_row_num if successful. + * A suspension return will result in redoing all the work above next time. + */ + + /* Emit data to the entropy encoder, sharing code with subsequent passes */ + return compress_output(cinfo, input_buf); +} + + +/* + * Process some data in subsequent passes of a multi-pass case. + * We process the equivalent of one fully interleaved MCU row ("iMCU" row) + * per call, ie, v_samp_factor block rows for each component in the scan. + * The data is obtained from the virtual arrays and fed to the entropy coder. + * Returns TRUE if the iMCU row is completed, FALSE if suspended. + * + * NB: input_buf is ignored; it is likely to be a NULL pointer. + */ + +METHODDEF(boolean) +compress_output (j_compress_ptr cinfo, JSAMPIMAGE input_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION MCU_col_num; /* index of current MCU within row */ + int blkn, ci, xindex, yindex, yoffset; + JDIMENSION start_col; + JBLOCKARRAY buffer[MAX_COMPS_IN_SCAN]; + JBLOCKROW buffer_ptr; + jpeg_component_info *compptr; + + /* Align the virtual buffers for the components used in this scan. + * NB: during first pass, this is safe only because the buffers will + * already be aligned properly, so jmemmgr.c won't need to do any I/O. + */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + buffer[ci] = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[compptr->component_index], + coef->iMCU_row_num * compptr->v_samp_factor, + (JDIMENSION) compptr->v_samp_factor, FALSE); + } + + /* Loop to process one whole iMCU row */ + for (yoffset = coef->MCU_vert_offset; yoffset < coef->MCU_rows_per_iMCU_row; + yoffset++) { + for (MCU_col_num = coef->mcu_ctr; MCU_col_num < cinfo->MCUs_per_row; + MCU_col_num++) { + /* Construct list of pointers to DCT blocks belonging to this MCU */ + blkn = 0; /* index of current DCT block within MCU */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + start_col = MCU_col_num * compptr->MCU_width; + for (yindex = 0; yindex < compptr->MCU_height; yindex++) { + buffer_ptr = buffer[ci][yindex+yoffset] + start_col; + for (xindex = 0; xindex < compptr->MCU_width; xindex++) { + coef->MCU_buffer[blkn++] = buffer_ptr++; + } + } + } + /* Try to write the MCU. */ + if (! (*cinfo->entropy->encode_mcu) (cinfo, coef->MCU_buffer)) { + /* Suspension forced; update state counters and exit */ + coef->MCU_vert_offset = yoffset; + coef->mcu_ctr = MCU_col_num; + return FALSE; + } + } + /* Completed an MCU row, but perhaps not an iMCU row */ + coef->mcu_ctr = 0; + } + /* Completed the iMCU row, advance counters for next one */ + coef->iMCU_row_num++; + start_iMCU_row(cinfo); + return TRUE; +} + +#endif /* FULL_COEF_BUFFER_SUPPORTED */ + + +/* + * Initialize coefficient buffer controller. + */ + +GLOBAL(void) +jinit_c_coef_controller (j_compress_ptr cinfo, boolean need_full_buffer) +{ + my_coef_ptr coef; + + coef = (my_coef_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_coef_controller)); + cinfo->coef = (struct jpeg_c_coef_controller *) coef; + coef->pub.start_pass = start_pass_coef; + + /* Create the coefficient buffer. */ + if (need_full_buffer) { +#ifdef FULL_COEF_BUFFER_SUPPORTED + /* Allocate a full-image virtual array for each component, */ + /* padded to a multiple of samp_factor DCT blocks in each direction. */ + int ci; + jpeg_component_info *compptr; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + coef->whole_image[ci] = (*cinfo->mem->request_virt_barray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, FALSE, + (JDIMENSION) jround_up((long) compptr->width_in_blocks, + (long) compptr->h_samp_factor), + (JDIMENSION) jround_up((long) compptr->height_in_blocks, + (long) compptr->v_samp_factor), + (JDIMENSION) compptr->v_samp_factor); + } +#else + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); +#endif + } else { + /* We only need a single-MCU buffer. */ + JBLOCKROW buffer; + int i; + + buffer = (JBLOCKROW) + (*cinfo->mem->alloc_large) ((j_common_ptr) cinfo, JPOOL_IMAGE, + C_MAX_BLOCKS_IN_MCU * SIZEOF(JBLOCK)); + for (i = 0; i < C_MAX_BLOCKS_IN_MCU; i++) { + coef->MCU_buffer[i] = buffer + i; + } + coef->whole_image[0] = NULL; /* flag for no virtual arrays */ + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccolor.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccolor.c new file mode 100644 index 000000000..3e2d0e927 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jccolor.c @@ -0,0 +1,490 @@ +/* + * jccolor.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modified 2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains input colorspace conversion routines. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private subobject */ + +typedef struct { + struct jpeg_color_converter pub; /* public fields */ + + /* Private state for RGB->YCC conversion */ + INT32 * rgb_ycc_tab; /* => table for RGB to YCbCr conversion */ +} my_color_converter; + +typedef my_color_converter * my_cconvert_ptr; + + +/**************** RGB -> YCbCr conversion: most common case **************/ + +/* + * YCbCr is defined per CCIR 601-1, except that Cb and Cr are + * normalized to the range 0..MAXJSAMPLE rather than -0.5 .. 0.5. + * The conversion equations to be implemented are therefore + * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B + * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE + * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE + * (These numbers are derived from TIFF 6.0 section 21, dated 3-June-92.) + * Note: older versions of the IJG code used a zero offset of MAXJSAMPLE/2, + * rather than CENTERJSAMPLE, for Cb and Cr. This gave equal positive and + * negative swings for Cb/Cr, but meant that grayscale values (Cb=Cr=0) + * were not represented exactly. Now we sacrifice exact representation of + * maximum red and maximum blue in order to get exact grayscales. + * + * To avoid floating-point arithmetic, we represent the fractional constants + * as integers scaled up by 2^16 (about 4 digits precision); we have to divide + * the products by 2^16, with appropriate rounding, to get the correct answer. + * + * For even more speed, we avoid doing any multiplications in the inner loop + * by precalculating the constants times R,G,B for all possible values. + * For 8-bit JSAMPLEs this is very reasonable (only 256 entries per table); + * for 12-bit samples it is still acceptable. It's not very reasonable for + * 16-bit samples, but if you want lossless storage you shouldn't be changing + * colorspace anyway. + * The CENTERJSAMPLE offsets and the rounding fudge-factor of 0.5 are included + * in the tables to save adding them separately in the inner loop. + */ + +#define SCALEBITS 16 /* speediest right-shift on some machines */ +#define CBCR_OFFSET ((INT32) CENTERJSAMPLE << SCALEBITS) +#define ONE_HALF ((INT32) 1 << (SCALEBITS-1)) +#define FIX(x) ((INT32) ((x) * (1L< Y section */ +#define G_Y_OFF (1*(MAXJSAMPLE+1)) /* offset to G => Y section */ +#define B_Y_OFF (2*(MAXJSAMPLE+1)) /* etc. */ +#define R_CB_OFF (3*(MAXJSAMPLE+1)) +#define G_CB_OFF (4*(MAXJSAMPLE+1)) +#define B_CB_OFF (5*(MAXJSAMPLE+1)) +#define R_CR_OFF B_CB_OFF /* B=>Cb, R=>Cr are the same */ +#define G_CR_OFF (6*(MAXJSAMPLE+1)) +#define B_CR_OFF (7*(MAXJSAMPLE+1)) +#define TABLE_SIZE (8*(MAXJSAMPLE+1)) + + +/* + * Initialize for RGB->YCC colorspace conversion. + */ + +METHODDEF(void) +rgb_ycc_start (j_compress_ptr cinfo) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + INT32 * rgb_ycc_tab; + INT32 i; + + /* Allocate and fill in the conversion tables. */ + cconvert->rgb_ycc_tab = rgb_ycc_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (TABLE_SIZE * SIZEOF(INT32))); + + for (i = 0; i <= MAXJSAMPLE; i++) { + rgb_ycc_tab[i+R_Y_OFF] = FIX(0.29900) * i; + rgb_ycc_tab[i+G_Y_OFF] = FIX(0.58700) * i; + rgb_ycc_tab[i+B_Y_OFF] = FIX(0.11400) * i + ONE_HALF; + rgb_ycc_tab[i+R_CB_OFF] = (-FIX(0.16874)) * i; + rgb_ycc_tab[i+G_CB_OFF] = (-FIX(0.33126)) * i; + /* We use a rounding fudge-factor of 0.5-epsilon for Cb and Cr. + * This ensures that the maximum output will round to MAXJSAMPLE + * not MAXJSAMPLE+1, and thus that we don't have to range-limit. + */ + rgb_ycc_tab[i+B_CB_OFF] = FIX(0.50000) * i + CBCR_OFFSET + ONE_HALF-1; +/* B=>Cb and R=>Cr tables are the same + rgb_ycc_tab[i+R_CR_OFF] = FIX(0.50000) * i + CBCR_OFFSET + ONE_HALF-1; +*/ + rgb_ycc_tab[i+G_CR_OFF] = (-FIX(0.41869)) * i; + rgb_ycc_tab[i+B_CR_OFF] = (-FIX(0.08131)) * i; + } +} + + +/* + * Convert some rows of samples to the JPEG colorspace. + * + * Note that we change from the application's interleaved-pixel format + * to our internal noninterleaved, one-plane-per-component format. + * The input buffer is therefore three times as wide as the output buffer. + * + * A starting row offset is provided only for the output buffer. The caller + * can easily adjust the passed input_buf value to accommodate any row + * offset required on that side. + */ + +METHODDEF(void) +rgb_ycc_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int r, g, b; + register INT32 * ctab = cconvert->rgb_ycc_tab; + register JSAMPROW inptr; + register JSAMPROW outptr0, outptr1, outptr2; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->image_width; + + while (--num_rows >= 0) { + inptr = *input_buf++; + outptr0 = output_buf[0][output_row]; + outptr1 = output_buf[1][output_row]; + outptr2 = output_buf[2][output_row]; + output_row++; + for (col = 0; col < num_cols; col++) { + r = GETJSAMPLE(inptr[RGB_RED]); + g = GETJSAMPLE(inptr[RGB_GREEN]); + b = GETJSAMPLE(inptr[RGB_BLUE]); + inptr += RGB_PIXELSIZE; + /* If the inputs are 0..MAXJSAMPLE, the outputs of these equations + * must be too; we do not need an explicit range-limiting operation. + * Hence the value being shifted is never negative, and we don't + * need the general RIGHT_SHIFT macro. + */ + /* Y */ + outptr0[col] = (JSAMPLE) + ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) + >> SCALEBITS); + /* Cb */ + outptr1[col] = (JSAMPLE) + ((ctab[r+R_CB_OFF] + ctab[g+G_CB_OFF] + ctab[b+B_CB_OFF]) + >> SCALEBITS); + /* Cr */ + outptr2[col] = (JSAMPLE) + ((ctab[r+R_CR_OFF] + ctab[g+G_CR_OFF] + ctab[b+B_CR_OFF]) + >> SCALEBITS); + } + } +} + + +/**************** Cases other than RGB -> YCbCr **************/ + + +/* + * Convert some rows of samples to the JPEG colorspace. + * This version handles RGB->grayscale conversion, which is the same + * as the RGB->Y portion of RGB->YCbCr. + * We assume rgb_ycc_start has been called (we only use the Y tables). + */ + +METHODDEF(void) +rgb_gray_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int r, g, b; + register INT32 * ctab = cconvert->rgb_ycc_tab; + register JSAMPROW inptr; + register JSAMPROW outptr; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->image_width; + + while (--num_rows >= 0) { + inptr = *input_buf++; + outptr = output_buf[0][output_row]; + output_row++; + for (col = 0; col < num_cols; col++) { + r = GETJSAMPLE(inptr[RGB_RED]); + g = GETJSAMPLE(inptr[RGB_GREEN]); + b = GETJSAMPLE(inptr[RGB_BLUE]); + inptr += RGB_PIXELSIZE; + /* Y */ + outptr[col] = (JSAMPLE) + ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) + >> SCALEBITS); + } + } +} + + +/* + * Convert some rows of samples to the JPEG colorspace. + * This version handles Adobe-style CMYK->YCCK conversion, + * where we convert R=1-C, G=1-M, and B=1-Y to YCbCr using the same + * conversion as above, while passing K (black) unchanged. + * We assume rgb_ycc_start has been called. + */ + +METHODDEF(void) +cmyk_ycck_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int r, g, b; + register INT32 * ctab = cconvert->rgb_ycc_tab; + register JSAMPROW inptr; + register JSAMPROW outptr0, outptr1, outptr2, outptr3; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->image_width; + + while (--num_rows >= 0) { + inptr = *input_buf++; + outptr0 = output_buf[0][output_row]; + outptr1 = output_buf[1][output_row]; + outptr2 = output_buf[2][output_row]; + outptr3 = output_buf[3][output_row]; + output_row++; + for (col = 0; col < num_cols; col++) { + r = MAXJSAMPLE - GETJSAMPLE(inptr[0]); + g = MAXJSAMPLE - GETJSAMPLE(inptr[1]); + b = MAXJSAMPLE - GETJSAMPLE(inptr[2]); + /* K passes through as-is */ + outptr3[col] = inptr[3]; /* don't need GETJSAMPLE here */ + inptr += 4; + /* If the inputs are 0..MAXJSAMPLE, the outputs of these equations + * must be too; we do not need an explicit range-limiting operation. + * Hence the value being shifted is never negative, and we don't + * need the general RIGHT_SHIFT macro. + */ + /* Y */ + outptr0[col] = (JSAMPLE) + ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) + >> SCALEBITS); + /* Cb */ + outptr1[col] = (JSAMPLE) + ((ctab[r+R_CB_OFF] + ctab[g+G_CB_OFF] + ctab[b+B_CB_OFF]) + >> SCALEBITS); + /* Cr */ + outptr2[col] = (JSAMPLE) + ((ctab[r+R_CR_OFF] + ctab[g+G_CR_OFF] + ctab[b+B_CR_OFF]) + >> SCALEBITS); + } + } +} + + +/* + * Convert some rows of samples to the JPEG colorspace. + * This version handles grayscale output with no conversion. + * The source can be either plain grayscale or YCbCr (since Y == gray). + */ + +METHODDEF(void) +grayscale_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + register JSAMPROW inptr; + register JSAMPROW outptr; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->image_width; + int instride = cinfo->input_components; + + while (--num_rows >= 0) { + inptr = *input_buf++; + outptr = output_buf[0][output_row]; + output_row++; + for (col = 0; col < num_cols; col++) { + outptr[col] = inptr[0]; /* don't need GETJSAMPLE() here */ + inptr += instride; + } + } +} + + +/* + * Convert some rows of samples to the JPEG colorspace. + * No colorspace conversion, but change from interleaved + * to separate-planes representation. + */ + +METHODDEF(void) +rgb_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + register JSAMPROW inptr; + register JSAMPROW outptr0, outptr1, outptr2; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->image_width; + + while (--num_rows >= 0) { + inptr = *input_buf++; + outptr0 = output_buf[0][output_row]; + outptr1 = output_buf[1][output_row]; + outptr2 = output_buf[2][output_row]; + output_row++; + for (col = 0; col < num_cols; col++) { + /* We can dispense with GETJSAMPLE() here */ + outptr0[col] = inptr[RGB_RED]; + outptr1[col] = inptr[RGB_GREEN]; + outptr2[col] = inptr[RGB_BLUE]; + inptr += RGB_PIXELSIZE; + } + } +} + + +/* + * Convert some rows of samples to the JPEG colorspace. + * This version handles multi-component colorspaces without conversion. + * We assume input_components == num_components. + */ + +METHODDEF(void) +null_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ + register JSAMPROW inptr; + register JSAMPROW outptr; + register JDIMENSION col; + register int ci; + int nc = cinfo->num_components; + JDIMENSION num_cols = cinfo->image_width; + + while (--num_rows >= 0) { + /* It seems fastest to make a separate pass for each component. */ + for (ci = 0; ci < nc; ci++) { + inptr = *input_buf; + outptr = output_buf[ci][output_row]; + for (col = 0; col < num_cols; col++) { + outptr[col] = inptr[ci]; /* don't need GETJSAMPLE() here */ + inptr += nc; + } + } + input_buf++; + output_row++; + } +} + + +/* + * Empty method for start_pass. + */ + +METHODDEF(void) +null_method (j_compress_ptr cinfo) +{ + /* no work needed */ +} + + +/* + * Module initialization routine for input colorspace conversion. + */ + +GLOBAL(void) +jinit_color_converter (j_compress_ptr cinfo) +{ + my_cconvert_ptr cconvert; + + cconvert = (my_cconvert_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_color_converter)); + cinfo->cconvert = (struct jpeg_color_converter *) cconvert; + /* set start_pass to null method until we find out differently */ + cconvert->pub.start_pass = null_method; + + /* Make sure input_components agrees with in_color_space */ + switch (cinfo->in_color_space) { + case JCS_GRAYSCALE: + if (cinfo->input_components != 1) + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + break; + + case JCS_RGB: + if (cinfo->input_components != RGB_PIXELSIZE) + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + break; + + case JCS_YCbCr: + if (cinfo->input_components != 3) + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + break; + + case JCS_CMYK: + case JCS_YCCK: + if (cinfo->input_components != 4) + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + break; + + default: /* JCS_UNKNOWN can be anything */ + if (cinfo->input_components < 1) + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + break; + } + + /* Check num_components, set conversion method based on requested space */ + switch (cinfo->jpeg_color_space) { + case JCS_GRAYSCALE: + if (cinfo->num_components != 1) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + if (cinfo->in_color_space == JCS_GRAYSCALE || + cinfo->in_color_space == JCS_YCbCr) + cconvert->pub.color_convert = grayscale_convert; + else if (cinfo->in_color_space == JCS_RGB) { + cconvert->pub.start_pass = rgb_ycc_start; + cconvert->pub.color_convert = rgb_gray_convert; + } else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_RGB: + if (cinfo->num_components != 3) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + if (cinfo->in_color_space == JCS_RGB) + cconvert->pub.color_convert = rgb_convert; + else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_YCbCr: + if (cinfo->num_components != 3) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + if (cinfo->in_color_space == JCS_RGB) { + cconvert->pub.start_pass = rgb_ycc_start; + cconvert->pub.color_convert = rgb_ycc_convert; + } else if (cinfo->in_color_space == JCS_YCbCr) + cconvert->pub.color_convert = null_convert; + else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_CMYK: + if (cinfo->num_components != 4) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + if (cinfo->in_color_space == JCS_CMYK) + cconvert->pub.color_convert = null_convert; + else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_YCCK: + if (cinfo->num_components != 4) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + if (cinfo->in_color_space == JCS_CMYK) { + cconvert->pub.start_pass = rgb_ycc_start; + cconvert->pub.color_convert = cmyk_ycck_convert; + } else if (cinfo->in_color_space == JCS_YCCK) + cconvert->pub.color_convert = null_convert; + else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + default: /* allow null conversion of JCS_UNKNOWN */ + if (cinfo->jpeg_color_space != cinfo->in_color_space || + cinfo->num_components != cinfo->input_components) + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + cconvert->pub.color_convert = null_convert; + break; + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c new file mode 100644 index 000000000..0bbdbb685 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcdctmgr.c @@ -0,0 +1,482 @@ +/* + * jcdctmgr.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the forward-DCT management logic. + * This code selects a particular DCT implementation to be used, + * and it performs related housekeeping chores including coefficient + * quantization. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + + +/* Private subobject for this module */ + +typedef struct { + struct jpeg_forward_dct pub; /* public fields */ + + /* Pointer to the DCT routine actually in use */ + forward_DCT_method_ptr do_dct[MAX_COMPONENTS]; + + /* The actual post-DCT divisors --- not identical to the quant table + * entries, because of scaling (especially for an unnormalized DCT). + * Each table is given in normal array order. + */ + DCTELEM * divisors[NUM_QUANT_TBLS]; + +#ifdef DCT_FLOAT_SUPPORTED + /* Same as above for the floating-point case. */ + float_DCT_method_ptr do_float_dct[MAX_COMPONENTS]; + FAST_FLOAT * float_divisors[NUM_QUANT_TBLS]; +#endif +} my_fdct_controller; + +typedef my_fdct_controller * my_fdct_ptr; + + +/* The current scaled-DCT routines require ISLOW-style divisor tables, + * so be sure to compile that code if either ISLOW or SCALING is requested. + */ +#ifdef DCT_ISLOW_SUPPORTED +#define PROVIDE_ISLOW_TABLES +#else +#ifdef DCT_SCALING_SUPPORTED +#define PROVIDE_ISLOW_TABLES +#endif +#endif + + +/* + * Perform forward DCT on one or more blocks of a component. + * + * The input samples are taken from the sample_data[] array starting at + * position start_row/start_col, and moving to the right for any additional + * blocks. The quantized coefficients are returned in coef_blocks[]. + */ + +METHODDEF(void) +forward_DCT (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY sample_data, JBLOCKROW coef_blocks, + JDIMENSION start_row, JDIMENSION start_col, + JDIMENSION num_blocks) +/* This version is used for integer DCT implementations. */ +{ + /* This routine is heavily used, so it's worth coding it tightly. */ + my_fdct_ptr fdct = (my_fdct_ptr) cinfo->fdct; + forward_DCT_method_ptr do_dct = fdct->do_dct[compptr->component_index]; + DCTELEM * divisors = fdct->divisors[compptr->quant_tbl_no]; + DCTELEM workspace[DCTSIZE2]; /* work area for FDCT subroutine */ + JDIMENSION bi; + + sample_data += start_row; /* fold in the vertical offset once */ + + for (bi = 0; bi < num_blocks; bi++, start_col += compptr->DCT_h_scaled_size) { + /* Perform the DCT */ + (*do_dct) (workspace, sample_data, start_col); + + /* Quantize/descale the coefficients, and store into coef_blocks[] */ + { register DCTELEM temp, qval; + register int i; + register JCOEFPTR output_ptr = coef_blocks[bi]; + + for (i = 0; i < DCTSIZE2; i++) { + qval = divisors[i]; + temp = workspace[i]; + /* Divide the coefficient value by qval, ensuring proper rounding. + * Since C does not specify the direction of rounding for negative + * quotients, we have to force the dividend positive for portability. + * + * In most files, at least half of the output values will be zero + * (at default quantization settings, more like three-quarters...) + * so we should ensure that this case is fast. On many machines, + * a comparison is enough cheaper than a divide to make a special test + * a win. Since both inputs will be nonnegative, we need only test + * for a < b to discover whether a/b is 0. + * If your machine's division is fast enough, define FAST_DIVIDE. + */ +#ifdef FAST_DIVIDE +#define DIVIDE_BY(a,b) a /= b +#else +#define DIVIDE_BY(a,b) if (a >= b) a /= b; else a = 0 +#endif + if (temp < 0) { + temp = -temp; + temp += qval>>1; /* for rounding */ + DIVIDE_BY(temp, qval); + temp = -temp; + } else { + temp += qval>>1; /* for rounding */ + DIVIDE_BY(temp, qval); + } + output_ptr[i] = (JCOEF) temp; + } + } + } +} + + +#ifdef DCT_FLOAT_SUPPORTED + +METHODDEF(void) +forward_DCT_float (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY sample_data, JBLOCKROW coef_blocks, + JDIMENSION start_row, JDIMENSION start_col, + JDIMENSION num_blocks) +/* This version is used for floating-point DCT implementations. */ +{ + /* This routine is heavily used, so it's worth coding it tightly. */ + my_fdct_ptr fdct = (my_fdct_ptr) cinfo->fdct; + float_DCT_method_ptr do_dct = fdct->do_float_dct[compptr->component_index]; + FAST_FLOAT * divisors = fdct->float_divisors[compptr->quant_tbl_no]; + FAST_FLOAT workspace[DCTSIZE2]; /* work area for FDCT subroutine */ + JDIMENSION bi; + + sample_data += start_row; /* fold in the vertical offset once */ + + for (bi = 0; bi < num_blocks; bi++, start_col += compptr->DCT_h_scaled_size) { + /* Perform the DCT */ + (*do_dct) (workspace, sample_data, start_col); + + /* Quantize/descale the coefficients, and store into coef_blocks[] */ + { register FAST_FLOAT temp; + register int i; + register JCOEFPTR output_ptr = coef_blocks[bi]; + + for (i = 0; i < DCTSIZE2; i++) { + /* Apply the quantization and scaling factor */ + temp = workspace[i] * divisors[i]; + /* Round to nearest integer. + * Since C does not specify the direction of rounding for negative + * quotients, we have to force the dividend positive for portability. + * The maximum coefficient size is +-16K (for 12-bit data), so this + * code should work for either 16-bit or 32-bit ints. + */ + output_ptr[i] = (JCOEF) ((int) (temp + (FAST_FLOAT) 16384.5) - 16384); + } + } + } +} + +#endif /* DCT_FLOAT_SUPPORTED */ + + +/* + * Initialize for a processing pass. + * Verify that all referenced Q-tables are present, and set up + * the divisor table for each one. + * In the current implementation, DCT of all components is done during + * the first pass, even if only some components will be output in the + * first scan. Hence all components should be examined here. + */ + +METHODDEF(void) +start_pass_fdctmgr (j_compress_ptr cinfo) +{ + my_fdct_ptr fdct = (my_fdct_ptr) cinfo->fdct; + int ci, qtblno, i; + jpeg_component_info *compptr; + int method = 0; + JQUANT_TBL * qtbl; + DCTELEM * dtbl; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Select the proper DCT routine for this component's scaling */ + switch ((compptr->DCT_h_scaled_size << 8) + compptr->DCT_v_scaled_size) { +#ifdef DCT_SCALING_SUPPORTED + case ((1 << 8) + 1): + fdct->do_dct[ci] = jpeg_fdct_1x1; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((2 << 8) + 2): + fdct->do_dct[ci] = jpeg_fdct_2x2; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((3 << 8) + 3): + fdct->do_dct[ci] = jpeg_fdct_3x3; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((4 << 8) + 4): + fdct->do_dct[ci] = jpeg_fdct_4x4; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((5 << 8) + 5): + fdct->do_dct[ci] = jpeg_fdct_5x5; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((6 << 8) + 6): + fdct->do_dct[ci] = jpeg_fdct_6x6; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((7 << 8) + 7): + fdct->do_dct[ci] = jpeg_fdct_7x7; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((9 << 8) + 9): + fdct->do_dct[ci] = jpeg_fdct_9x9; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((10 << 8) + 10): + fdct->do_dct[ci] = jpeg_fdct_10x10; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((11 << 8) + 11): + fdct->do_dct[ci] = jpeg_fdct_11x11; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((12 << 8) + 12): + fdct->do_dct[ci] = jpeg_fdct_12x12; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((13 << 8) + 13): + fdct->do_dct[ci] = jpeg_fdct_13x13; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((14 << 8) + 14): + fdct->do_dct[ci] = jpeg_fdct_14x14; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((15 << 8) + 15): + fdct->do_dct[ci] = jpeg_fdct_15x15; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((16 << 8) + 16): + fdct->do_dct[ci] = jpeg_fdct_16x16; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((16 << 8) + 8): + fdct->do_dct[ci] = jpeg_fdct_16x8; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((14 << 8) + 7): + fdct->do_dct[ci] = jpeg_fdct_14x7; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((12 << 8) + 6): + fdct->do_dct[ci] = jpeg_fdct_12x6; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((10 << 8) + 5): + fdct->do_dct[ci] = jpeg_fdct_10x5; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((8 << 8) + 4): + fdct->do_dct[ci] = jpeg_fdct_8x4; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((6 << 8) + 3): + fdct->do_dct[ci] = jpeg_fdct_6x3; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((4 << 8) + 2): + fdct->do_dct[ci] = jpeg_fdct_4x2; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((2 << 8) + 1): + fdct->do_dct[ci] = jpeg_fdct_2x1; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((8 << 8) + 16): + fdct->do_dct[ci] = jpeg_fdct_8x16; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((7 << 8) + 14): + fdct->do_dct[ci] = jpeg_fdct_7x14; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((6 << 8) + 12): + fdct->do_dct[ci] = jpeg_fdct_6x12; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((5 << 8) + 10): + fdct->do_dct[ci] = jpeg_fdct_5x10; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((4 << 8) + 8): + fdct->do_dct[ci] = jpeg_fdct_4x8; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((3 << 8) + 6): + fdct->do_dct[ci] = jpeg_fdct_3x6; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((2 << 8) + 4): + fdct->do_dct[ci] = jpeg_fdct_2x4; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; + case ((1 << 8) + 2): + fdct->do_dct[ci] = jpeg_fdct_1x2; + method = JDCT_ISLOW; /* jfdctint uses islow-style table */ + break; +#endif + case ((DCTSIZE << 8) + DCTSIZE): + switch (cinfo->dct_method) { +#ifdef DCT_ISLOW_SUPPORTED + case JDCT_ISLOW: + fdct->do_dct[ci] = jpeg_fdct_islow; + method = JDCT_ISLOW; + break; +#endif +#ifdef DCT_IFAST_SUPPORTED + case JDCT_IFAST: + fdct->do_dct[ci] = jpeg_fdct_ifast; + method = JDCT_IFAST; + break; +#endif +#ifdef DCT_FLOAT_SUPPORTED + case JDCT_FLOAT: + fdct->do_float_dct[ci] = jpeg_fdct_float; + method = JDCT_FLOAT; + break; +#endif + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + break; + } + break; + default: + ERREXIT2(cinfo, JERR_BAD_DCTSIZE, + compptr->DCT_h_scaled_size, compptr->DCT_v_scaled_size); + break; + } + qtblno = compptr->quant_tbl_no; + /* Make sure specified quantization table is present */ + if (qtblno < 0 || qtblno >= NUM_QUANT_TBLS || + cinfo->quant_tbl_ptrs[qtblno] == NULL) + ERREXIT1(cinfo, JERR_NO_QUANT_TABLE, qtblno); + qtbl = cinfo->quant_tbl_ptrs[qtblno]; + /* Compute divisors for this quant table */ + /* We may do this more than once for same table, but it's not a big deal */ + switch (method) { +#ifdef PROVIDE_ISLOW_TABLES + case JDCT_ISLOW: + /* For LL&M IDCT method, divisors are equal to raw quantization + * coefficients multiplied by 8 (to counteract scaling). + */ + if (fdct->divisors[qtblno] == NULL) { + fdct->divisors[qtblno] = (DCTELEM *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + DCTSIZE2 * SIZEOF(DCTELEM)); + } + dtbl = fdct->divisors[qtblno]; + for (i = 0; i < DCTSIZE2; i++) { + dtbl[i] = ((DCTELEM) qtbl->quantval[i]) << 3; + } + fdct->pub.forward_DCT[ci] = forward_DCT; + break; +#endif +#ifdef DCT_IFAST_SUPPORTED + case JDCT_IFAST: + { + /* For AA&N IDCT method, divisors are equal to quantization + * coefficients scaled by scalefactor[row]*scalefactor[col], where + * scalefactor[0] = 1 + * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 + * We apply a further scale factor of 8. + */ +#define CONST_BITS 14 + static const INT16 aanscales[DCTSIZE2] = { + /* precomputed values scaled up by 14 bits */ + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, + 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, + 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, + 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, + 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 + }; + SHIFT_TEMPS + + if (fdct->divisors[qtblno] == NULL) { + fdct->divisors[qtblno] = (DCTELEM *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + DCTSIZE2 * SIZEOF(DCTELEM)); + } + dtbl = fdct->divisors[qtblno]; + for (i = 0; i < DCTSIZE2; i++) { + dtbl[i] = (DCTELEM) + DESCALE(MULTIPLY16V16((INT32) qtbl->quantval[i], + (INT32) aanscales[i]), + CONST_BITS-3); + } + } + fdct->pub.forward_DCT[ci] = forward_DCT; + break; +#endif +#ifdef DCT_FLOAT_SUPPORTED + case JDCT_FLOAT: + { + /* For float AA&N IDCT method, divisors are equal to quantization + * coefficients scaled by scalefactor[row]*scalefactor[col], where + * scalefactor[0] = 1 + * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 + * We apply a further scale factor of 8. + * What's actually stored is 1/divisor so that the inner loop can + * use a multiplication rather than a division. + */ + FAST_FLOAT * fdtbl; + int row, col; + static const double aanscalefactor[DCTSIZE] = { + 1.0, 1.387039845, 1.306562965, 1.175875602, + 1.0, 0.785694958, 0.541196100, 0.275899379 + }; + + if (fdct->float_divisors[qtblno] == NULL) { + fdct->float_divisors[qtblno] = (FAST_FLOAT *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + DCTSIZE2 * SIZEOF(FAST_FLOAT)); + } + fdtbl = fdct->float_divisors[qtblno]; + i = 0; + for (row = 0; row < DCTSIZE; row++) { + for (col = 0; col < DCTSIZE; col++) { + fdtbl[i] = (FAST_FLOAT) + (1.0 / (((double) qtbl->quantval[i] * + aanscalefactor[row] * aanscalefactor[col] * 8.0))); + i++; + } + } + } + fdct->pub.forward_DCT[ci] = forward_DCT_float; + break; +#endif + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + break; + } + } +} + + +/* + * Initialize FDCT manager. + */ + +GLOBAL(void) +jinit_forward_dct (j_compress_ptr cinfo) +{ + my_fdct_ptr fdct; + int i; + + fdct = (my_fdct_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_fdct_controller)); + cinfo->fdct = (struct jpeg_forward_dct *) fdct; + fdct->pub.start_pass = start_pass_fdctmgr; + + /* Mark divisor tables unallocated */ + for (i = 0; i < NUM_QUANT_TBLS; i++) { + fdct->divisors[i] = NULL; +#ifdef DCT_FLOAT_SUPPORTED + fdct->float_divisors[i] = NULL; +#endif + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jchuff.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jchuff.c new file mode 100644 index 000000000..257d7aa1f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jchuff.c @@ -0,0 +1,1576 @@ +/* + * jchuff.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2006-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains Huffman entropy encoding routines. + * Both sequential and progressive modes are supported in this single module. + * + * Much of the complexity here has to do with supporting output suspension. + * If the data destination module demands suspension, we want to be able to + * back up to the start of the current MCU. To do this, we copy state + * variables into local working storage, and update them back to the + * permanent JPEG objects only upon successful completion of an MCU. + * + * We do not support output suspension for the progressive JPEG mode, since + * the library currently does not allow multiple-scan files to be written + * with output suspension. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* The legal range of a DCT coefficient is + * -1024 .. +1023 for 8-bit data; + * -16384 .. +16383 for 12-bit data. + * Hence the magnitude should always fit in 10 or 14 bits respectively. + */ + +#if BITS_IN_JSAMPLE == 8 +#define MAX_COEF_BITS 10 +#else +#define MAX_COEF_BITS 14 +#endif + +/* Derived data constructed for each Huffman table */ + +typedef struct { + unsigned int ehufco[256]; /* code for each symbol */ + char ehufsi[256]; /* length of code for each symbol */ + /* If no code has been allocated for a symbol S, ehufsi[S] contains 0 */ +} c_derived_tbl; + + +/* Expanded entropy encoder object for Huffman encoding. + * + * The savable_state subrecord contains fields that change within an MCU, + * but must not be updated permanently until we complete the MCU. + */ + +typedef struct { + INT32 put_buffer; /* current bit-accumulation buffer */ + int put_bits; /* # of bits now in it */ + int last_dc_val[MAX_COMPS_IN_SCAN]; /* last DC coef for each component */ +} savable_state; + +/* This macro is to work around compilers with missing or broken + * structure assignment. You'll need to fix this code if you have + * such a compiler and you change MAX_COMPS_IN_SCAN. + */ + +#ifndef NO_STRUCT_ASSIGN +#define ASSIGN_STATE(dest,src) ((dest) = (src)) +#else +#if MAX_COMPS_IN_SCAN == 4 +#define ASSIGN_STATE(dest,src) \ + ((dest).put_buffer = (src).put_buffer, \ + (dest).put_bits = (src).put_bits, \ + (dest).last_dc_val[0] = (src).last_dc_val[0], \ + (dest).last_dc_val[1] = (src).last_dc_val[1], \ + (dest).last_dc_val[2] = (src).last_dc_val[2], \ + (dest).last_dc_val[3] = (src).last_dc_val[3]) +#endif +#endif + + +typedef struct { + struct jpeg_entropy_encoder pub; /* public fields */ + + savable_state saved; /* Bit buffer & DC state at start of MCU */ + + /* These fields are NOT loaded into local working state. */ + unsigned int restarts_to_go; /* MCUs left in this restart interval */ + int next_restart_num; /* next restart number to write (0-7) */ + + /* Pointers to derived tables (these workspaces have image lifespan) */ + c_derived_tbl * dc_derived_tbls[NUM_HUFF_TBLS]; + c_derived_tbl * ac_derived_tbls[NUM_HUFF_TBLS]; + + /* Statistics tables for optimization */ + long * dc_count_ptrs[NUM_HUFF_TBLS]; + long * ac_count_ptrs[NUM_HUFF_TBLS]; + + /* Following fields used only in progressive mode */ + + /* Mode flag: TRUE for optimization, FALSE for actual data output */ + boolean gather_statistics; + + /* next_output_byte/free_in_buffer are local copies of cinfo->dest fields. + */ + JOCTET * next_output_byte; /* => next byte to write in buffer */ + size_t free_in_buffer; /* # of byte spaces remaining in buffer */ + j_compress_ptr cinfo; /* link to cinfo (needed for dump_buffer) */ + + /* Coding status for AC components */ + int ac_tbl_no; /* the table number of the single component */ + unsigned int EOBRUN; /* run length of EOBs */ + unsigned int BE; /* # of buffered correction bits before MCU */ + char * bit_buffer; /* buffer for correction bits (1 per char) */ + /* packing correction bits tightly would save some space but cost time... */ +} huff_entropy_encoder; + +typedef huff_entropy_encoder * huff_entropy_ptr; + +/* Working state while writing an MCU (sequential mode). + * This struct contains all the fields that are needed by subroutines. + */ + +typedef struct { + JOCTET * next_output_byte; /* => next byte to write in buffer */ + size_t free_in_buffer; /* # of byte spaces remaining in buffer */ + savable_state cur; /* Current bit buffer & DC state */ + j_compress_ptr cinfo; /* dump_buffer needs access to this */ +} working_state; + +/* MAX_CORR_BITS is the number of bits the AC refinement correction-bit + * buffer can hold. Larger sizes may slightly improve compression, but + * 1000 is already well into the realm of overkill. + * The minimum safe size is 64 bits. + */ + +#define MAX_CORR_BITS 1000 /* Max # of correction bits I can buffer */ + +/* IRIGHT_SHIFT is like RIGHT_SHIFT, but works on int rather than INT32. + * We assume that int right shift is unsigned if INT32 right shift is, + * which should be safe. + */ + +#ifdef RIGHT_SHIFT_IS_UNSIGNED +#define ISHIFT_TEMPS int ishift_temp; +#define IRIGHT_SHIFT(x,shft) \ + ((ishift_temp = (x)) < 0 ? \ + (ishift_temp >> (shft)) | ((~0) << (16-(shft))) : \ + (ishift_temp >> (shft))) +#else +#define ISHIFT_TEMPS +#define IRIGHT_SHIFT(x,shft) ((x) >> (shft)) +#endif + + +/* + * Compute the derived values for a Huffman table. + * This routine also performs some validation checks on the table. + */ + +LOCAL(void) +jpeg_make_c_derived_tbl (j_compress_ptr cinfo, boolean isDC, int tblno, + c_derived_tbl ** pdtbl) +{ + JHUFF_TBL *htbl; + c_derived_tbl *dtbl; + int p, i, l, lastp, si, maxsymbol; + char huffsize[257]; + unsigned int huffcode[257]; + unsigned int code; + + /* Note that huffsize[] and huffcode[] are filled in code-length order, + * paralleling the order of the symbols themselves in htbl->huffval[]. + */ + + /* Find the input Huffman table */ + if (tblno < 0 || tblno >= NUM_HUFF_TBLS) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tblno); + htbl = + isDC ? cinfo->dc_huff_tbl_ptrs[tblno] : cinfo->ac_huff_tbl_ptrs[tblno]; + if (htbl == NULL) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tblno); + + /* Allocate a workspace if we haven't already done so. */ + if (*pdtbl == NULL) + *pdtbl = (c_derived_tbl *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(c_derived_tbl)); + dtbl = *pdtbl; + + /* Figure C.1: make table of Huffman code length for each symbol */ + + p = 0; + for (l = 1; l <= 16; l++) { + i = (int) htbl->bits[l]; + if (i < 0 || p + i > 256) /* protect against table overrun */ + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + while (i--) + huffsize[p++] = (char) l; + } + huffsize[p] = 0; + lastp = p; + + /* Figure C.2: generate the codes themselves */ + /* We also validate that the counts represent a legal Huffman code tree. */ + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p]) { + while (((int) huffsize[p]) == si) { + huffcode[p++] = code; + code++; + } + /* code is now 1 more than the last code used for codelength si; but + * it must still fit in si bits, since no code is allowed to be all ones. + */ + if (((INT32) code) >= (((INT32) 1) << si)) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + code <<= 1; + si++; + } + + /* Figure C.3: generate encoding tables */ + /* These are code and size indexed by symbol value */ + + /* Set all codeless symbols to have code length 0; + * this lets us detect duplicate VAL entries here, and later + * allows emit_bits to detect any attempt to emit such symbols. + */ + MEMZERO(dtbl->ehufsi, SIZEOF(dtbl->ehufsi)); + + /* This is also a convenient place to check for out-of-range + * and duplicated VAL entries. We allow 0..255 for AC symbols + * but only 0..15 for DC. (We could constrain them further + * based on data depth and mode, but this seems enough.) + */ + maxsymbol = isDC ? 15 : 255; + + for (p = 0; p < lastp; p++) { + i = htbl->huffval[p]; + if (i < 0 || i > maxsymbol || dtbl->ehufsi[i]) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + dtbl->ehufco[i] = huffcode[p]; + dtbl->ehufsi[i] = huffsize[p]; + } +} + + +/* Outputting bytes to the file. + * NB: these must be called only when actually outputting, + * that is, entropy->gather_statistics == FALSE. + */ + +/* Emit a byte, taking 'action' if must suspend. */ +#define emit_byte_s(state,val,action) \ + { *(state)->next_output_byte++ = (JOCTET) (val); \ + if (--(state)->free_in_buffer == 0) \ + if (! dump_buffer_s(state)) \ + { action; } } + +/* Emit a byte */ +#define emit_byte_e(entropy,val) \ + { *(entropy)->next_output_byte++ = (JOCTET) (val); \ + if (--(entropy)->free_in_buffer == 0) \ + dump_buffer_e(entropy); } + + +LOCAL(boolean) +dump_buffer_s (working_state * state) +/* Empty the output buffer; return TRUE if successful, FALSE if must suspend */ +{ + struct jpeg_destination_mgr * dest = state->cinfo->dest; + + if (! (*dest->empty_output_buffer) (state->cinfo)) + return FALSE; + /* After a successful buffer dump, must reset buffer pointers */ + state->next_output_byte = dest->next_output_byte; + state->free_in_buffer = dest->free_in_buffer; + return TRUE; +} + + +LOCAL(void) +dump_buffer_e (huff_entropy_ptr entropy) +/* Empty the output buffer; we do not support suspension in this case. */ +{ + struct jpeg_destination_mgr * dest = entropy->cinfo->dest; + + if (! (*dest->empty_output_buffer) (entropy->cinfo)) + ERREXIT(entropy->cinfo, JERR_CANT_SUSPEND); + /* After a successful buffer dump, must reset buffer pointers */ + entropy->next_output_byte = dest->next_output_byte; + entropy->free_in_buffer = dest->free_in_buffer; +} + + +/* Outputting bits to the file */ + +/* Only the right 24 bits of put_buffer are used; the valid bits are + * left-justified in this part. At most 16 bits can be passed to emit_bits + * in one call, and we never retain more than 7 bits in put_buffer + * between calls, so 24 bits are sufficient. + */ + +INLINE +LOCAL(boolean) +emit_bits_s (working_state * state, unsigned int code, int size) +/* Emit some bits; return TRUE if successful, FALSE if must suspend */ +{ + /* This routine is heavily used, so it's worth coding tightly. */ + register INT32 put_buffer = (INT32) code; + register int put_bits = state->cur.put_bits; + + /* if size is 0, caller used an invalid Huffman table entry */ + if (size == 0) + ERREXIT(state->cinfo, JERR_HUFF_MISSING_CODE); + + put_buffer &= (((INT32) 1)<cur.put_buffer; /* and merge with old buffer contents */ + + while (put_bits >= 8) { + int c = (int) ((put_buffer >> 16) & 0xFF); + + emit_byte_s(state, c, return FALSE); + if (c == 0xFF) { /* need to stuff a zero byte? */ + emit_byte_s(state, 0, return FALSE); + } + put_buffer <<= 8; + put_bits -= 8; + } + + state->cur.put_buffer = put_buffer; /* update state variables */ + state->cur.put_bits = put_bits; + + return TRUE; +} + + +INLINE +LOCAL(void) +emit_bits_e (huff_entropy_ptr entropy, unsigned int code, int size) +/* Emit some bits, unless we are in gather mode */ +{ + /* This routine is heavily used, so it's worth coding tightly. */ + register INT32 put_buffer = (INT32) code; + register int put_bits = entropy->saved.put_bits; + + /* if size is 0, caller used an invalid Huffman table entry */ + if (size == 0) + ERREXIT(entropy->cinfo, JERR_HUFF_MISSING_CODE); + + if (entropy->gather_statistics) + return; /* do nothing if we're only getting stats */ + + put_buffer &= (((INT32) 1)<saved.put_buffer; + + while (put_bits >= 8) { + int c = (int) ((put_buffer >> 16) & 0xFF); + + emit_byte_e(entropy, c); + if (c == 0xFF) { /* need to stuff a zero byte? */ + emit_byte_e(entropy, 0); + } + put_buffer <<= 8; + put_bits -= 8; + } + + entropy->saved.put_buffer = put_buffer; /* update variables */ + entropy->saved.put_bits = put_bits; +} + + +LOCAL(boolean) +flush_bits_s (working_state * state) +{ + if (! emit_bits_s(state, 0x7F, 7)) /* fill any partial byte with ones */ + return FALSE; + state->cur.put_buffer = 0; /* and reset bit-buffer to empty */ + state->cur.put_bits = 0; + return TRUE; +} + + +LOCAL(void) +flush_bits_e (huff_entropy_ptr entropy) +{ + emit_bits_e(entropy, 0x7F, 7); /* fill any partial byte with ones */ + entropy->saved.put_buffer = 0; /* and reset bit-buffer to empty */ + entropy->saved.put_bits = 0; +} + + +/* + * Emit (or just count) a Huffman symbol. + */ + +INLINE +LOCAL(void) +emit_dc_symbol (huff_entropy_ptr entropy, int tbl_no, int symbol) +{ + if (entropy->gather_statistics) + entropy->dc_count_ptrs[tbl_no][symbol]++; + else { + c_derived_tbl * tbl = entropy->dc_derived_tbls[tbl_no]; + emit_bits_e(entropy, tbl->ehufco[symbol], tbl->ehufsi[symbol]); + } +} + + +INLINE +LOCAL(void) +emit_ac_symbol (huff_entropy_ptr entropy, int tbl_no, int symbol) +{ + if (entropy->gather_statistics) + entropy->ac_count_ptrs[tbl_no][symbol]++; + else { + c_derived_tbl * tbl = entropy->ac_derived_tbls[tbl_no]; + emit_bits_e(entropy, tbl->ehufco[symbol], tbl->ehufsi[symbol]); + } +} + + +/* + * Emit bits from a correction bit buffer. + */ + +LOCAL(void) +emit_buffered_bits (huff_entropy_ptr entropy, char * bufstart, + unsigned int nbits) +{ + if (entropy->gather_statistics) + return; /* no real work */ + + while (nbits > 0) { + emit_bits_e(entropy, (unsigned int) (*bufstart), 1); + bufstart++; + nbits--; + } +} + + +/* + * Emit any pending EOBRUN symbol. + */ + +LOCAL(void) +emit_eobrun (huff_entropy_ptr entropy) +{ + register int temp, nbits; + + if (entropy->EOBRUN > 0) { /* if there is any pending EOBRUN */ + temp = entropy->EOBRUN; + nbits = 0; + while ((temp >>= 1)) + nbits++; + /* safety check: shouldn't happen given limited correction-bit buffer */ + if (nbits > 14) + ERREXIT(entropy->cinfo, JERR_HUFF_MISSING_CODE); + + emit_ac_symbol(entropy, entropy->ac_tbl_no, nbits << 4); + if (nbits) + emit_bits_e(entropy, entropy->EOBRUN, nbits); + + entropy->EOBRUN = 0; + + /* Emit any buffered correction bits */ + emit_buffered_bits(entropy, entropy->bit_buffer, entropy->BE); + entropy->BE = 0; + } +} + + +/* + * Emit a restart marker & resynchronize predictions. + */ + +LOCAL(boolean) +emit_restart_s (working_state * state, int restart_num) +{ + int ci; + + if (! flush_bits_s(state)) + return FALSE; + + emit_byte_s(state, 0xFF, return FALSE); + emit_byte_s(state, JPEG_RST0 + restart_num, return FALSE); + + /* Re-initialize DC predictions to 0 */ + for (ci = 0; ci < state->cinfo->comps_in_scan; ci++) + state->cur.last_dc_val[ci] = 0; + + /* The restart counter is not updated until we successfully write the MCU. */ + + return TRUE; +} + + +LOCAL(void) +emit_restart_e (huff_entropy_ptr entropy, int restart_num) +{ + int ci; + + emit_eobrun(entropy); + + if (! entropy->gather_statistics) { + flush_bits_e(entropy); + emit_byte_e(entropy, 0xFF); + emit_byte_e(entropy, JPEG_RST0 + restart_num); + } + + if (entropy->cinfo->Ss == 0) { + /* Re-initialize DC predictions to 0 */ + for (ci = 0; ci < entropy->cinfo->comps_in_scan; ci++) + entropy->saved.last_dc_val[ci] = 0; + } else { + /* Re-initialize all AC-related fields to 0 */ + entropy->EOBRUN = 0; + entropy->BE = 0; + } +} + + +/* + * MCU encoding for DC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +encode_mcu_DC_first (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int temp, temp2; + register int nbits; + int blkn, ci; + int Al = cinfo->Al; + JBLOCKROW block; + jpeg_component_info * compptr; + ISHIFT_TEMPS + + entropy->next_output_byte = cinfo->dest->next_output_byte; + entropy->free_in_buffer = cinfo->dest->free_in_buffer; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) + if (entropy->restarts_to_go == 0) + emit_restart_e(entropy, entropy->next_restart_num); + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + + /* Compute the DC value after the required point transform by Al. + * This is simply an arithmetic right shift. + */ + temp2 = IRIGHT_SHIFT((int) ((*block)[0]), Al); + + /* DC differences are figured on the point-transformed values. */ + temp = temp2 - entropy->saved.last_dc_val[ci]; + entropy->saved.last_dc_val[ci] = temp2; + + /* Encode the DC coefficient difference per section G.1.2.1 */ + temp2 = temp; + if (temp < 0) { + temp = -temp; /* temp is abs value of input */ + /* For a negative input, want temp2 = bitwise complement of abs(input) */ + /* This code assumes we are on a two's complement machine */ + temp2--; + } + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 0; + while (temp) { + nbits++; + temp >>= 1; + } + /* Check for out-of-range coefficient values. + * Since we're encoding a difference, the range limit is twice as much. + */ + if (nbits > MAX_COEF_BITS+1) + ERREXIT(cinfo, JERR_BAD_DCT_COEF); + + /* Count/emit the Huffman-coded symbol for the number of bits */ + emit_dc_symbol(entropy, compptr->dc_tbl_no, nbits); + + /* Emit that number of bits of the value, if positive, */ + /* or the complement of its magnitude, if negative. */ + if (nbits) /* emit_bits rejects calls with size 0 */ + emit_bits_e(entropy, (unsigned int) temp2, nbits); + } + + cinfo->dest->next_output_byte = entropy->next_output_byte; + cinfo->dest->free_in_buffer = entropy->free_in_buffer; + + /* Update restart-interval state too */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + return TRUE; +} + + +/* + * MCU encoding for AC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +encode_mcu_AC_first (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int temp, temp2; + register int nbits; + register int r, k; + int Se, Al; + const int * natural_order; + JBLOCKROW block; + + entropy->next_output_byte = cinfo->dest->next_output_byte; + entropy->free_in_buffer = cinfo->dest->free_in_buffer; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) + if (entropy->restarts_to_go == 0) + emit_restart_e(entropy, entropy->next_restart_num); + + Se = cinfo->Se; + Al = cinfo->Al; + natural_order = cinfo->natural_order; + + /* Encode the MCU data block */ + block = MCU_data[0]; + + /* Encode the AC coefficients per section G.1.2.2, fig. G.3 */ + + r = 0; /* r = run length of zeros */ + + for (k = cinfo->Ss; k <= Se; k++) { + if ((temp = (*block)[natural_order[k]]) == 0) { + r++; + continue; + } + /* We must apply the point transform by Al. For AC coefficients this + * is an integer division with rounding towards 0. To do this portably + * in C, we shift after obtaining the absolute value; so the code is + * interwoven with finding the abs value (temp) and output bits (temp2). + */ + if (temp < 0) { + temp = -temp; /* temp is abs value of input */ + temp >>= Al; /* apply the point transform */ + /* For a negative coef, want temp2 = bitwise complement of abs(coef) */ + temp2 = ~temp; + } else { + temp >>= Al; /* apply the point transform */ + temp2 = temp; + } + /* Watch out for case that nonzero coef is zero after point transform */ + if (temp == 0) { + r++; + continue; + } + + /* Emit any pending EOBRUN */ + if (entropy->EOBRUN > 0) + emit_eobrun(entropy); + /* if run length > 15, must emit special run-length-16 codes (0xF0) */ + while (r > 15) { + emit_ac_symbol(entropy, entropy->ac_tbl_no, 0xF0); + r -= 16; + } + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 1; /* there must be at least one 1 bit */ + while ((temp >>= 1)) + nbits++; + /* Check for out-of-range coefficient values */ + if (nbits > MAX_COEF_BITS) + ERREXIT(cinfo, JERR_BAD_DCT_COEF); + + /* Count/emit Huffman symbol for run length / number of bits */ + emit_ac_symbol(entropy, entropy->ac_tbl_no, (r << 4) + nbits); + + /* Emit that number of bits of the value, if positive, */ + /* or the complement of its magnitude, if negative. */ + emit_bits_e(entropy, (unsigned int) temp2, nbits); + + r = 0; /* reset zero run length */ + } + + if (r > 0) { /* If there are trailing zeroes, */ + entropy->EOBRUN++; /* count an EOB */ + if (entropy->EOBRUN == 0x7FFF) + emit_eobrun(entropy); /* force it out to avoid overflow */ + } + + cinfo->dest->next_output_byte = entropy->next_output_byte; + cinfo->dest->free_in_buffer = entropy->free_in_buffer; + + /* Update restart-interval state too */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + return TRUE; +} + + +/* + * MCU encoding for DC successive approximation refinement scan. + * Note: we assume such scans can be multi-component, although the spec + * is not very clear on the point. + */ + +METHODDEF(boolean) +encode_mcu_DC_refine (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int temp; + int blkn; + int Al = cinfo->Al; + JBLOCKROW block; + + entropy->next_output_byte = cinfo->dest->next_output_byte; + entropy->free_in_buffer = cinfo->dest->free_in_buffer; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) + if (entropy->restarts_to_go == 0) + emit_restart_e(entropy, entropy->next_restart_num); + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + + /* We simply emit the Al'th bit of the DC coefficient value. */ + temp = (*block)[0]; + emit_bits_e(entropy, (unsigned int) (temp >> Al), 1); + } + + cinfo->dest->next_output_byte = entropy->next_output_byte; + cinfo->dest->free_in_buffer = entropy->free_in_buffer; + + /* Update restart-interval state too */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + return TRUE; +} + + +/* + * MCU encoding for AC successive approximation refinement scan. + */ + +METHODDEF(boolean) +encode_mcu_AC_refine (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int temp; + register int r, k; + int EOB; + char *BR_buffer; + unsigned int BR; + int Se, Al; + const int * natural_order; + JBLOCKROW block; + int absvalues[DCTSIZE2]; + + entropy->next_output_byte = cinfo->dest->next_output_byte; + entropy->free_in_buffer = cinfo->dest->free_in_buffer; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) + if (entropy->restarts_to_go == 0) + emit_restart_e(entropy, entropy->next_restart_num); + + Se = cinfo->Se; + Al = cinfo->Al; + natural_order = cinfo->natural_order; + + /* Encode the MCU data block */ + block = MCU_data[0]; + + /* It is convenient to make a pre-pass to determine the transformed + * coefficients' absolute values and the EOB position. + */ + EOB = 0; + for (k = cinfo->Ss; k <= Se; k++) { + temp = (*block)[natural_order[k]]; + /* We must apply the point transform by Al. For AC coefficients this + * is an integer division with rounding towards 0. To do this portably + * in C, we shift after obtaining the absolute value. + */ + if (temp < 0) + temp = -temp; /* temp is abs value of input */ + temp >>= Al; /* apply the point transform */ + absvalues[k] = temp; /* save abs value for main pass */ + if (temp == 1) + EOB = k; /* EOB = index of last newly-nonzero coef */ + } + + /* Encode the AC coefficients per section G.1.2.3, fig. G.7 */ + + r = 0; /* r = run length of zeros */ + BR = 0; /* BR = count of buffered bits added now */ + BR_buffer = entropy->bit_buffer + entropy->BE; /* Append bits to buffer */ + + for (k = cinfo->Ss; k <= Se; k++) { + if ((temp = absvalues[k]) == 0) { + r++; + continue; + } + + /* Emit any required ZRLs, but not if they can be folded into EOB */ + while (r > 15 && k <= EOB) { + /* emit any pending EOBRUN and the BE correction bits */ + emit_eobrun(entropy); + /* Emit ZRL */ + emit_ac_symbol(entropy, entropy->ac_tbl_no, 0xF0); + r -= 16; + /* Emit buffered correction bits that must be associated with ZRL */ + emit_buffered_bits(entropy, BR_buffer, BR); + BR_buffer = entropy->bit_buffer; /* BE bits are gone now */ + BR = 0; + } + + /* If the coef was previously nonzero, it only needs a correction bit. + * NOTE: a straight translation of the spec's figure G.7 would suggest + * that we also need to test r > 15. But if r > 15, we can only get here + * if k > EOB, which implies that this coefficient is not 1. + */ + if (temp > 1) { + /* The correction bit is the next bit of the absolute value. */ + BR_buffer[BR++] = (char) (temp & 1); + continue; + } + + /* Emit any pending EOBRUN and the BE correction bits */ + emit_eobrun(entropy); + + /* Count/emit Huffman symbol for run length / number of bits */ + emit_ac_symbol(entropy, entropy->ac_tbl_no, (r << 4) + 1); + + /* Emit output bit for newly-nonzero coef */ + temp = ((*block)[natural_order[k]] < 0) ? 0 : 1; + emit_bits_e(entropy, (unsigned int) temp, 1); + + /* Emit buffered correction bits that must be associated with this code */ + emit_buffered_bits(entropy, BR_buffer, BR); + BR_buffer = entropy->bit_buffer; /* BE bits are gone now */ + BR = 0; + r = 0; /* reset zero run length */ + } + + if (r > 0 || BR > 0) { /* If there are trailing zeroes, */ + entropy->EOBRUN++; /* count an EOB */ + entropy->BE += BR; /* concat my correction bits to older ones */ + /* We force out the EOB if we risk either: + * 1. overflow of the EOB counter; + * 2. overflow of the correction bit buffer during the next MCU. + */ + if (entropy->EOBRUN == 0x7FFF || entropy->BE > (MAX_CORR_BITS-DCTSIZE2+1)) + emit_eobrun(entropy); + } + + cinfo->dest->next_output_byte = entropy->next_output_byte; + cinfo->dest->free_in_buffer = entropy->free_in_buffer; + + /* Update restart-interval state too */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + return TRUE; +} + + +/* Encode a single block's worth of coefficients */ + +LOCAL(boolean) +encode_one_block (working_state * state, JCOEFPTR block, int last_dc_val, + c_derived_tbl *dctbl, c_derived_tbl *actbl) +{ + register int temp, temp2; + register int nbits; + register int k, r, i; + int Se = state->cinfo->lim_Se; + const int * natural_order = state->cinfo->natural_order; + + /* Encode the DC coefficient difference per section F.1.2.1 */ + + temp = temp2 = block[0] - last_dc_val; + + if (temp < 0) { + temp = -temp; /* temp is abs value of input */ + /* For a negative input, want temp2 = bitwise complement of abs(input) */ + /* This code assumes we are on a two's complement machine */ + temp2--; + } + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 0; + while (temp) { + nbits++; + temp >>= 1; + } + /* Check for out-of-range coefficient values. + * Since we're encoding a difference, the range limit is twice as much. + */ + if (nbits > MAX_COEF_BITS+1) + ERREXIT(state->cinfo, JERR_BAD_DCT_COEF); + + /* Emit the Huffman-coded symbol for the number of bits */ + if (! emit_bits_s(state, dctbl->ehufco[nbits], dctbl->ehufsi[nbits])) + return FALSE; + + /* Emit that number of bits of the value, if positive, */ + /* or the complement of its magnitude, if negative. */ + if (nbits) /* emit_bits rejects calls with size 0 */ + if (! emit_bits_s(state, (unsigned int) temp2, nbits)) + return FALSE; + + /* Encode the AC coefficients per section F.1.2.2 */ + + r = 0; /* r = run length of zeros */ + + for (k = 1; k <= Se; k++) { + if ((temp = block[natural_order[k]]) == 0) { + r++; + } else { + /* if run length > 15, must emit special run-length-16 codes (0xF0) */ + while (r > 15) { + if (! emit_bits_s(state, actbl->ehufco[0xF0], actbl->ehufsi[0xF0])) + return FALSE; + r -= 16; + } + + temp2 = temp; + if (temp < 0) { + temp = -temp; /* temp is abs value of input */ + /* This code assumes we are on a two's complement machine */ + temp2--; + } + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 1; /* there must be at least one 1 bit */ + while ((temp >>= 1)) + nbits++; + /* Check for out-of-range coefficient values */ + if (nbits > MAX_COEF_BITS) + ERREXIT(state->cinfo, JERR_BAD_DCT_COEF); + + /* Emit Huffman symbol for run length / number of bits */ + i = (r << 4) + nbits; + if (! emit_bits_s(state, actbl->ehufco[i], actbl->ehufsi[i])) + return FALSE; + + /* Emit that number of bits of the value, if positive, */ + /* or the complement of its magnitude, if negative. */ + if (! emit_bits_s(state, (unsigned int) temp2, nbits)) + return FALSE; + + r = 0; + } + } + + /* If the last coef(s) were zero, emit an end-of-block code */ + if (r > 0) + if (! emit_bits_s(state, actbl->ehufco[0], actbl->ehufsi[0])) + return FALSE; + + return TRUE; +} + + +/* + * Encode and output one MCU's worth of Huffman-compressed coefficients. + */ + +METHODDEF(boolean) +encode_mcu_huff (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + working_state state; + int blkn, ci; + jpeg_component_info * compptr; + + /* Load up working state */ + state.next_output_byte = cinfo->dest->next_output_byte; + state.free_in_buffer = cinfo->dest->free_in_buffer; + ASSIGN_STATE(state.cur, entropy->saved); + state.cinfo = cinfo; + + /* Emit restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! emit_restart_s(&state, entropy->next_restart_num)) + return FALSE; + } + + /* Encode the MCU data blocks */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + if (! encode_one_block(&state, + MCU_data[blkn][0], state.cur.last_dc_val[ci], + entropy->dc_derived_tbls[compptr->dc_tbl_no], + entropy->ac_derived_tbls[compptr->ac_tbl_no])) + return FALSE; + /* Update last_dc_val */ + state.cur.last_dc_val[ci] = MCU_data[blkn][0][0]; + } + + /* Completed MCU, so update state */ + cinfo->dest->next_output_byte = state.next_output_byte; + cinfo->dest->free_in_buffer = state.free_in_buffer; + ASSIGN_STATE(entropy->saved, state.cur); + + /* Update restart-interval state too */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num++; + entropy->next_restart_num &= 7; + } + entropy->restarts_to_go--; + } + + return TRUE; +} + + +/* + * Finish up at the end of a Huffman-compressed scan. + */ + +METHODDEF(void) +finish_pass_huff (j_compress_ptr cinfo) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + working_state state; + + if (cinfo->progressive_mode) { + entropy->next_output_byte = cinfo->dest->next_output_byte; + entropy->free_in_buffer = cinfo->dest->free_in_buffer; + + /* Flush out any buffered data */ + emit_eobrun(entropy); + flush_bits_e(entropy); + + cinfo->dest->next_output_byte = entropy->next_output_byte; + cinfo->dest->free_in_buffer = entropy->free_in_buffer; + } else { + /* Load up working state ... flush_bits needs it */ + state.next_output_byte = cinfo->dest->next_output_byte; + state.free_in_buffer = cinfo->dest->free_in_buffer; + ASSIGN_STATE(state.cur, entropy->saved); + state.cinfo = cinfo; + + /* Flush out the last data */ + if (! flush_bits_s(&state)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); + + /* Update state */ + cinfo->dest->next_output_byte = state.next_output_byte; + cinfo->dest->free_in_buffer = state.free_in_buffer; + ASSIGN_STATE(entropy->saved, state.cur); + } +} + + +/* + * Huffman coding optimization. + * + * We first scan the supplied data and count the number of uses of each symbol + * that is to be Huffman-coded. (This process MUST agree with the code above.) + * Then we build a Huffman coding tree for the observed counts. + * Symbols which are not needed at all for the particular image are not + * assigned any code, which saves space in the DHT marker as well as in + * the compressed data. + */ + + +/* Process a single block's worth of coefficients */ + +LOCAL(void) +htest_one_block (j_compress_ptr cinfo, JCOEFPTR block, int last_dc_val, + long dc_counts[], long ac_counts[]) +{ + register int temp; + register int nbits; + register int k, r; + int Se = cinfo->lim_Se; + const int * natural_order = cinfo->natural_order; + + /* Encode the DC coefficient difference per section F.1.2.1 */ + + temp = block[0] - last_dc_val; + if (temp < 0) + temp = -temp; + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 0; + while (temp) { + nbits++; + temp >>= 1; + } + /* Check for out-of-range coefficient values. + * Since we're encoding a difference, the range limit is twice as much. + */ + if (nbits > MAX_COEF_BITS+1) + ERREXIT(cinfo, JERR_BAD_DCT_COEF); + + /* Count the Huffman symbol for the number of bits */ + dc_counts[nbits]++; + + /* Encode the AC coefficients per section F.1.2.2 */ + + r = 0; /* r = run length of zeros */ + + for (k = 1; k <= Se; k++) { + if ((temp = block[natural_order[k]]) == 0) { + r++; + } else { + /* if run length > 15, must emit special run-length-16 codes (0xF0) */ + while (r > 15) { + ac_counts[0xF0]++; + r -= 16; + } + + /* Find the number of bits needed for the magnitude of the coefficient */ + if (temp < 0) + temp = -temp; + + /* Find the number of bits needed for the magnitude of the coefficient */ + nbits = 1; /* there must be at least one 1 bit */ + while ((temp >>= 1)) + nbits++; + /* Check for out-of-range coefficient values */ + if (nbits > MAX_COEF_BITS) + ERREXIT(cinfo, JERR_BAD_DCT_COEF); + + /* Count Huffman symbol for run length / number of bits */ + ac_counts[(r << 4) + nbits]++; + + r = 0; + } + } + + /* If the last coef(s) were zero, emit an end-of-block code */ + if (r > 0) + ac_counts[0]++; +} + + +/* + * Trial-encode one MCU's worth of Huffman-compressed coefficients. + * No data is actually output, so no suspension return is possible. + */ + +METHODDEF(boolean) +encode_mcu_gather (j_compress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int blkn, ci; + jpeg_component_info * compptr; + + /* Take care of restart intervals if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) { + /* Re-initialize DC predictions to 0 */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) + entropy->saved.last_dc_val[ci] = 0; + /* Update restart state */ + entropy->restarts_to_go = cinfo->restart_interval; + } + entropy->restarts_to_go--; + } + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + htest_one_block(cinfo, MCU_data[blkn][0], entropy->saved.last_dc_val[ci], + entropy->dc_count_ptrs[compptr->dc_tbl_no], + entropy->ac_count_ptrs[compptr->ac_tbl_no]); + entropy->saved.last_dc_val[ci] = MCU_data[blkn][0][0]; + } + + return TRUE; +} + + +/* + * Generate the best Huffman code table for the given counts, fill htbl. + * + * The JPEG standard requires that no symbol be assigned a codeword of all + * one bits (so that padding bits added at the end of a compressed segment + * can't look like a valid code). Because of the canonical ordering of + * codewords, this just means that there must be an unused slot in the + * longest codeword length category. Section K.2 of the JPEG spec suggests + * reserving such a slot by pretending that symbol 256 is a valid symbol + * with count 1. In theory that's not optimal; giving it count zero but + * including it in the symbol set anyway should give a better Huffman code. + * But the theoretically better code actually seems to come out worse in + * practice, because it produces more all-ones bytes (which incur stuffed + * zero bytes in the final file). In any case the difference is tiny. + * + * The JPEG standard requires Huffman codes to be no more than 16 bits long. + * If some symbols have a very small but nonzero probability, the Huffman tree + * must be adjusted to meet the code length restriction. We currently use + * the adjustment method suggested in JPEG section K.2. This method is *not* + * optimal; it may not choose the best possible limited-length code. But + * typically only very-low-frequency symbols will be given less-than-optimal + * lengths, so the code is almost optimal. Experimental comparisons against + * an optimal limited-length-code algorithm indicate that the difference is + * microscopic --- usually less than a hundredth of a percent of total size. + * So the extra complexity of an optimal algorithm doesn't seem worthwhile. + */ + +LOCAL(void) +jpeg_gen_optimal_table (j_compress_ptr cinfo, JHUFF_TBL * htbl, long freq[]) +{ +#define MAX_CLEN 32 /* assumed maximum initial code length */ + UINT8 bits[MAX_CLEN+1]; /* bits[k] = # of symbols with code length k */ + int codesize[257]; /* codesize[k] = code length of symbol k */ + int others[257]; /* next symbol in current branch of tree */ + int c1, c2; + int p, i, j; + long v; + + /* This algorithm is explained in section K.2 of the JPEG standard */ + + MEMZERO(bits, SIZEOF(bits)); + MEMZERO(codesize, SIZEOF(codesize)); + for (i = 0; i < 257; i++) + others[i] = -1; /* init links to empty */ + + freq[256] = 1; /* make sure 256 has a nonzero count */ + /* Including the pseudo-symbol 256 in the Huffman procedure guarantees + * that no real symbol is given code-value of all ones, because 256 + * will be placed last in the largest codeword category. + */ + + /* Huffman's basic algorithm to assign optimal code lengths to symbols */ + + for (;;) { + /* Find the smallest nonzero frequency, set c1 = its symbol */ + /* In case of ties, take the larger symbol number */ + c1 = -1; + v = 1000000000L; + for (i = 0; i <= 256; i++) { + if (freq[i] && freq[i] <= v) { + v = freq[i]; + c1 = i; + } + } + + /* Find the next smallest nonzero frequency, set c2 = its symbol */ + /* In case of ties, take the larger symbol number */ + c2 = -1; + v = 1000000000L; + for (i = 0; i <= 256; i++) { + if (freq[i] && freq[i] <= v && i != c1) { + v = freq[i]; + c2 = i; + } + } + + /* Done if we've merged everything into one frequency */ + if (c2 < 0) + break; + + /* Else merge the two counts/trees */ + freq[c1] += freq[c2]; + freq[c2] = 0; + + /* Increment the codesize of everything in c1's tree branch */ + codesize[c1]++; + while (others[c1] >= 0) { + c1 = others[c1]; + codesize[c1]++; + } + + others[c1] = c2; /* chain c2 onto c1's tree branch */ + + /* Increment the codesize of everything in c2's tree branch */ + codesize[c2]++; + while (others[c2] >= 0) { + c2 = others[c2]; + codesize[c2]++; + } + } + + /* Now count the number of symbols of each code length */ + for (i = 0; i <= 256; i++) { + if (codesize[i]) { + /* The JPEG standard seems to think that this can't happen, */ + /* but I'm paranoid... */ + if (codesize[i] > MAX_CLEN) + ERREXIT(cinfo, JERR_HUFF_CLEN_OVERFLOW); + + bits[codesize[i]]++; + } + } + + /* JPEG doesn't allow symbols with code lengths over 16 bits, so if the pure + * Huffman procedure assigned any such lengths, we must adjust the coding. + * Here is what the JPEG spec says about how this next bit works: + * Since symbols are paired for the longest Huffman code, the symbols are + * removed from this length category two at a time. The prefix for the pair + * (which is one bit shorter) is allocated to one of the pair; then, + * skipping the BITS entry for that prefix length, a code word from the next + * shortest nonzero BITS entry is converted into a prefix for two code words + * one bit longer. + */ + + for (i = MAX_CLEN; i > 16; i--) { + while (bits[i] > 0) { + j = i - 2; /* find length of new prefix to be used */ + while (bits[j] == 0) + j--; + + bits[i] -= 2; /* remove two symbols */ + bits[i-1]++; /* one goes in this length */ + bits[j+1] += 2; /* two new symbols in this length */ + bits[j]--; /* symbol of this length is now a prefix */ + } + } + + /* Remove the count for the pseudo-symbol 256 from the largest codelength */ + while (bits[i] == 0) /* find largest codelength still in use */ + i--; + bits[i]--; + + /* Return final symbol counts (only for lengths 0..16) */ + MEMCOPY(htbl->bits, bits, SIZEOF(htbl->bits)); + + /* Return a list of the symbols sorted by code length */ + /* It's not real clear to me why we don't need to consider the codelength + * changes made above, but the JPEG spec seems to think this works. + */ + p = 0; + for (i = 1; i <= MAX_CLEN; i++) { + for (j = 0; j <= 255; j++) { + if (codesize[j] == i) { + htbl->huffval[p] = (UINT8) j; + p++; + } + } + } + + /* Set sent_table FALSE so updated table will be written to JPEG file. */ + htbl->sent_table = FALSE; +} + + +/* + * Finish up a statistics-gathering pass and create the new Huffman tables. + */ + +METHODDEF(void) +finish_pass_gather (j_compress_ptr cinfo) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int ci, tbl; + jpeg_component_info * compptr; + JHUFF_TBL **htblptr; + boolean did_dc[NUM_HUFF_TBLS]; + boolean did_ac[NUM_HUFF_TBLS]; + + /* It's important not to apply jpeg_gen_optimal_table more than once + * per table, because it clobbers the input frequency counts! + */ + if (cinfo->progressive_mode) + /* Flush out buffered data (all we care about is counting the EOB symbol) */ + emit_eobrun(entropy); + + MEMZERO(did_dc, SIZEOF(did_dc)); + MEMZERO(did_ac, SIZEOF(did_ac)); + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) { + tbl = compptr->dc_tbl_no; + if (! did_dc[tbl]) { + htblptr = & cinfo->dc_huff_tbl_ptrs[tbl]; + if (*htblptr == NULL) + *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + jpeg_gen_optimal_table(cinfo, *htblptr, entropy->dc_count_ptrs[tbl]); + did_dc[tbl] = TRUE; + } + } + /* AC needs no table when not present */ + if (cinfo->Se) { + tbl = compptr->ac_tbl_no; + if (! did_ac[tbl]) { + htblptr = & cinfo->ac_huff_tbl_ptrs[tbl]; + if (*htblptr == NULL) + *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + jpeg_gen_optimal_table(cinfo, *htblptr, entropy->ac_count_ptrs[tbl]); + did_ac[tbl] = TRUE; + } + } + } +} + + +/* + * Initialize for a Huffman-compressed scan. + * If gather_statistics is TRUE, we do not output anything during the scan, + * just count the Huffman symbols used and generate Huffman code tables. + */ + +METHODDEF(void) +start_pass_huff (j_compress_ptr cinfo, boolean gather_statistics) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int ci, tbl; + jpeg_component_info * compptr; + + if (gather_statistics) + entropy->pub.finish_pass = finish_pass_gather; + else + entropy->pub.finish_pass = finish_pass_huff; + + if (cinfo->progressive_mode) { + entropy->cinfo = cinfo; + entropy->gather_statistics = gather_statistics; + + /* We assume jcmaster.c already validated the scan parameters. */ + + /* Select execution routine */ + if (cinfo->Ah == 0) { + if (cinfo->Ss == 0) + entropy->pub.encode_mcu = encode_mcu_DC_first; + else + entropy->pub.encode_mcu = encode_mcu_AC_first; + } else { + if (cinfo->Ss == 0) + entropy->pub.encode_mcu = encode_mcu_DC_refine; + else { + entropy->pub.encode_mcu = encode_mcu_AC_refine; + /* AC refinement needs a correction bit buffer */ + if (entropy->bit_buffer == NULL) + entropy->bit_buffer = (char *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + MAX_CORR_BITS * SIZEOF(char)); + } + } + + /* Initialize AC stuff */ + entropy->ac_tbl_no = cinfo->cur_comp_info[0]->ac_tbl_no; + entropy->EOBRUN = 0; + entropy->BE = 0; + } else { + if (gather_statistics) + entropy->pub.encode_mcu = encode_mcu_gather; + else + entropy->pub.encode_mcu = encode_mcu_huff; + } + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) { + tbl = compptr->dc_tbl_no; + if (gather_statistics) { + /* Check for invalid table index */ + /* (make_c_derived_tbl does this in the other path) */ + if (tbl < 0 || tbl >= NUM_HUFF_TBLS) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tbl); + /* Allocate and zero the statistics tables */ + /* Note that jpeg_gen_optimal_table expects 257 entries in each table! */ + if (entropy->dc_count_ptrs[tbl] == NULL) + entropy->dc_count_ptrs[tbl] = (long *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + 257 * SIZEOF(long)); + MEMZERO(entropy->dc_count_ptrs[tbl], 257 * SIZEOF(long)); + } else { + /* Compute derived values for Huffman tables */ + /* We may do this more than once for a table, but it's not expensive */ + jpeg_make_c_derived_tbl(cinfo, TRUE, tbl, + & entropy->dc_derived_tbls[tbl]); + } + /* Initialize DC predictions to 0 */ + entropy->saved.last_dc_val[ci] = 0; + } + /* AC needs no table when not present */ + if (cinfo->Se) { + tbl = compptr->ac_tbl_no; + if (gather_statistics) { + if (tbl < 0 || tbl >= NUM_HUFF_TBLS) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tbl); + if (entropy->ac_count_ptrs[tbl] == NULL) + entropy->ac_count_ptrs[tbl] = (long *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + 257 * SIZEOF(long)); + MEMZERO(entropy->ac_count_ptrs[tbl], 257 * SIZEOF(long)); + } else { + jpeg_make_c_derived_tbl(cinfo, FALSE, tbl, + & entropy->ac_derived_tbls[tbl]); + } + } + } + + /* Initialize bit buffer to empty */ + entropy->saved.put_buffer = 0; + entropy->saved.put_bits = 0; + + /* Initialize restart stuff */ + entropy->restarts_to_go = cinfo->restart_interval; + entropy->next_restart_num = 0; +} + + +/* + * Module initialization routine for Huffman entropy encoding. + */ + +GLOBAL(void) +jinit_huff_encoder (j_compress_ptr cinfo) +{ + huff_entropy_ptr entropy; + int i; + + entropy = (huff_entropy_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(huff_entropy_encoder)); + cinfo->entropy = (struct jpeg_entropy_encoder *) entropy; + entropy->pub.start_pass = start_pass_huff; + + /* Mark tables unallocated */ + for (i = 0; i < NUM_HUFF_TBLS; i++) { + entropy->dc_derived_tbls[i] = entropy->ac_derived_tbls[i] = NULL; + entropy->dc_count_ptrs[i] = entropy->ac_count_ptrs[i] = NULL; + } + + if (cinfo->progressive_mode) + entropy->bit_buffer = NULL; /* needed only in AC refinement scan */ +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcinit.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcinit.c new file mode 100644 index 000000000..0ba310f21 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcinit.c @@ -0,0 +1,65 @@ +/* + * jcinit.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains initialization logic for the JPEG compressor. + * This routine is in charge of selecting the modules to be executed and + * making an initialization call to each one. + * + * Logically, this code belongs in jcmaster.c. It's split out because + * linking this routine implies linking the entire compression library. + * For a transcoding-only application, we want to be able to use jcmaster.c + * without linking in the whole library. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Master selection of compression modules. + * This is done once at the start of processing an image. We determine + * which modules will be used and give them appropriate initialization calls. + */ + +GLOBAL(void) +jinit_compress_master (j_compress_ptr cinfo) +{ + /* Initialize master control (includes parameter checking/processing) */ + jinit_c_master_control(cinfo, FALSE /* full compression */); + + /* Preprocessing */ + if (! cinfo->raw_data_in) { + jinit_color_converter(cinfo); + jinit_downsampler(cinfo); + jinit_c_prep_controller(cinfo, FALSE /* never need full buffer here */); + } + /* Forward DCT */ + jinit_forward_dct(cinfo); + /* Entropy encoding: either Huffman or arithmetic coding. */ + if (cinfo->arith_code) + jinit_arith_encoder(cinfo); + else { + jinit_huff_encoder(cinfo); + } + + /* Need a full-image coefficient buffer in any multi-pass mode. */ + jinit_c_coef_controller(cinfo, + (boolean) (cinfo->num_scans > 1 || cinfo->optimize_coding)); + jinit_c_main_controller(cinfo, FALSE /* never need full buffer here */); + + jinit_marker_writer(cinfo); + + /* We can now tell the memory manager to allocate virtual arrays. */ + (*cinfo->mem->realize_virt_arrays) ((j_common_ptr) cinfo); + + /* Write the datastream header (SOI) immediately. + * Frame and scan headers are postponed till later. + * This lets application insert special markers after the SOI. + */ + (*cinfo->marker->write_file_header) (cinfo); +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmainct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmainct.c new file mode 100644 index 000000000..7de75d167 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmainct.c @@ -0,0 +1,293 @@ +/* + * jcmainct.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the main buffer controller for compression. + * The main buffer lies between the pre-processor and the JPEG + * compressor proper; it holds downsampled data in the JPEG colorspace. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Note: currently, there is no operating mode in which a full-image buffer + * is needed at this step. If there were, that mode could not be used with + * "raw data" input, since this module is bypassed in that case. However, + * we've left the code here for possible use in special applications. + */ +#undef FULL_MAIN_BUFFER_SUPPORTED + + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_c_main_controller pub; /* public fields */ + + JDIMENSION cur_iMCU_row; /* number of current iMCU row */ + JDIMENSION rowgroup_ctr; /* counts row groups received in iMCU row */ + boolean suspended; /* remember if we suspended output */ + J_BUF_MODE pass_mode; /* current operating mode */ + + /* If using just a strip buffer, this points to the entire set of buffers + * (we allocate one for each component). In the full-image case, this + * points to the currently accessible strips of the virtual arrays. + */ + JSAMPARRAY buffer[MAX_COMPONENTS]; + +#ifdef FULL_MAIN_BUFFER_SUPPORTED + /* If using full-image storage, this array holds pointers to virtual-array + * control blocks for each component. Unused if not full-image storage. + */ + jvirt_sarray_ptr whole_image[MAX_COMPONENTS]; +#endif +} my_main_controller; + +typedef my_main_controller * my_main_ptr; + + +/* Forward declarations */ +METHODDEF(void) process_data_simple_main + JPP((j_compress_ptr cinfo, JSAMPARRAY input_buf, + JDIMENSION *in_row_ctr, JDIMENSION in_rows_avail)); +#ifdef FULL_MAIN_BUFFER_SUPPORTED +METHODDEF(void) process_data_buffer_main + JPP((j_compress_ptr cinfo, JSAMPARRAY input_buf, + JDIMENSION *in_row_ctr, JDIMENSION in_rows_avail)); +#endif + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_main (j_compress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + + /* Do nothing in raw-data mode. */ + if (cinfo->raw_data_in) + return; + + main->cur_iMCU_row = 0; /* initialize counters */ + main->rowgroup_ctr = 0; + main->suspended = FALSE; + main->pass_mode = pass_mode; /* save mode for use by process_data */ + + switch (pass_mode) { + case JBUF_PASS_THRU: +#ifdef FULL_MAIN_BUFFER_SUPPORTED + if (main->whole_image[0] != NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); +#endif + main->pub.process_data = process_data_simple_main; + break; +#ifdef FULL_MAIN_BUFFER_SUPPORTED + case JBUF_SAVE_SOURCE: + case JBUF_CRANK_DEST: + case JBUF_SAVE_AND_PASS: + if (main->whole_image[0] == NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + main->pub.process_data = process_data_buffer_main; + break; +#endif + default: + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + break; + } +} + + +/* + * Process some data. + * This routine handles the simple pass-through mode, + * where we have only a strip buffer. + */ + +METHODDEF(void) +process_data_simple_main (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + + while (main->cur_iMCU_row < cinfo->total_iMCU_rows) { + /* Read input data if we haven't filled the main buffer yet */ + if (main->rowgroup_ctr < (JDIMENSION) cinfo->min_DCT_v_scaled_size) + (*cinfo->prep->pre_process_data) (cinfo, + input_buf, in_row_ctr, in_rows_avail, + main->buffer, &main->rowgroup_ctr, + (JDIMENSION) cinfo->min_DCT_v_scaled_size); + + /* If we don't have a full iMCU row buffered, return to application for + * more data. Note that preprocessor will always pad to fill the iMCU row + * at the bottom of the image. + */ + if (main->rowgroup_ctr != (JDIMENSION) cinfo->min_DCT_v_scaled_size) + return; + + /* Send the completed row to the compressor */ + if (! (*cinfo->coef->compress_data) (cinfo, main->buffer)) { + /* If compressor did not consume the whole row, then we must need to + * suspend processing and return to the application. In this situation + * we pretend we didn't yet consume the last input row; otherwise, if + * it happened to be the last row of the image, the application would + * think we were done. + */ + if (! main->suspended) { + (*in_row_ctr)--; + main->suspended = TRUE; + } + return; + } + /* We did finish the row. Undo our little suspension hack if a previous + * call suspended; then mark the main buffer empty. + */ + if (main->suspended) { + (*in_row_ctr)++; + main->suspended = FALSE; + } + main->rowgroup_ctr = 0; + main->cur_iMCU_row++; + } +} + + +#ifdef FULL_MAIN_BUFFER_SUPPORTED + +/* + * Process some data. + * This routine handles all of the modes that use a full-size buffer. + */ + +METHODDEF(void) +process_data_buffer_main (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + int ci; + jpeg_component_info *compptr; + boolean writing = (main->pass_mode != JBUF_CRANK_DEST); + + while (main->cur_iMCU_row < cinfo->total_iMCU_rows) { + /* Realign the virtual buffers if at the start of an iMCU row. */ + if (main->rowgroup_ctr == 0) { + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + main->buffer[ci] = (*cinfo->mem->access_virt_sarray) + ((j_common_ptr) cinfo, main->whole_image[ci], + main->cur_iMCU_row * (compptr->v_samp_factor * DCTSIZE), + (JDIMENSION) (compptr->v_samp_factor * DCTSIZE), writing); + } + /* In a read pass, pretend we just read some source data. */ + if (! writing) { + *in_row_ctr += cinfo->max_v_samp_factor * DCTSIZE; + main->rowgroup_ctr = DCTSIZE; + } + } + + /* If a write pass, read input data until the current iMCU row is full. */ + /* Note: preprocessor will pad if necessary to fill the last iMCU row. */ + if (writing) { + (*cinfo->prep->pre_process_data) (cinfo, + input_buf, in_row_ctr, in_rows_avail, + main->buffer, &main->rowgroup_ctr, + (JDIMENSION) DCTSIZE); + /* Return to application if we need more data to fill the iMCU row. */ + if (main->rowgroup_ctr < DCTSIZE) + return; + } + + /* Emit data, unless this is a sink-only pass. */ + if (main->pass_mode != JBUF_SAVE_SOURCE) { + if (! (*cinfo->coef->compress_data) (cinfo, main->buffer)) { + /* If compressor did not consume the whole row, then we must need to + * suspend processing and return to the application. In this situation + * we pretend we didn't yet consume the last input row; otherwise, if + * it happened to be the last row of the image, the application would + * think we were done. + */ + if (! main->suspended) { + (*in_row_ctr)--; + main->suspended = TRUE; + } + return; + } + /* We did finish the row. Undo our little suspension hack if a previous + * call suspended; then mark the main buffer empty. + */ + if (main->suspended) { + (*in_row_ctr)++; + main->suspended = FALSE; + } + } + + /* If get here, we are done with this iMCU row. Mark buffer empty. */ + main->rowgroup_ctr = 0; + main->cur_iMCU_row++; + } +} + +#endif /* FULL_MAIN_BUFFER_SUPPORTED */ + + +/* + * Initialize main buffer controller. + */ + +GLOBAL(void) +jinit_c_main_controller (j_compress_ptr cinfo, boolean need_full_buffer) +{ + my_main_ptr main; + int ci; + jpeg_component_info *compptr; + + main = (my_main_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_main_controller)); + cinfo->main = (struct jpeg_c_main_controller *) main; + main->pub.start_pass = start_pass_main; + + /* We don't need to create a buffer in raw-data mode. */ + if (cinfo->raw_data_in) + return; + + /* Create the buffer. It holds downsampled data, so each component + * may be of a different size. + */ + if (need_full_buffer) { +#ifdef FULL_MAIN_BUFFER_SUPPORTED + /* Allocate a full-image virtual array for each component */ + /* Note we pad the bottom to a multiple of the iMCU height */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + main->whole_image[ci] = (*cinfo->mem->request_virt_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, FALSE, + compptr->width_in_blocks * compptr->DCT_h_scaled_size, + (JDIMENSION) jround_up((long) compptr->height_in_blocks, + (long) compptr->v_samp_factor) * DCTSIZE, + (JDIMENSION) (compptr->v_samp_factor * compptr->DCT_v_scaled_size)); + } +#else + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); +#endif + } else { +#ifdef FULL_MAIN_BUFFER_SUPPORTED + main->whole_image[0] = NULL; /* flag for no virtual arrays */ +#endif + /* Allocate a strip buffer for each component */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + main->buffer[ci] = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + compptr->width_in_blocks * compptr->DCT_h_scaled_size, + (JDIMENSION) (compptr->v_samp_factor * compptr->DCT_v_scaled_size)); + } + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmarker.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmarker.c new file mode 100644 index 000000000..606c19af3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmarker.c @@ -0,0 +1,682 @@ +/* + * jcmarker.c + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * Modified 2003-2010 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains routines to write JPEG datastream markers. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +typedef enum { /* JPEG marker codes */ + M_SOF0 = 0xc0, + M_SOF1 = 0xc1, + M_SOF2 = 0xc2, + M_SOF3 = 0xc3, + + M_SOF5 = 0xc5, + M_SOF6 = 0xc6, + M_SOF7 = 0xc7, + + M_JPG = 0xc8, + M_SOF9 = 0xc9, + M_SOF10 = 0xca, + M_SOF11 = 0xcb, + + M_SOF13 = 0xcd, + M_SOF14 = 0xce, + M_SOF15 = 0xcf, + + M_DHT = 0xc4, + + M_DAC = 0xcc, + + M_RST0 = 0xd0, + M_RST1 = 0xd1, + M_RST2 = 0xd2, + M_RST3 = 0xd3, + M_RST4 = 0xd4, + M_RST5 = 0xd5, + M_RST6 = 0xd6, + M_RST7 = 0xd7, + + M_SOI = 0xd8, + M_EOI = 0xd9, + M_SOS = 0xda, + M_DQT = 0xdb, + M_DNL = 0xdc, + M_DRI = 0xdd, + M_DHP = 0xde, + M_EXP = 0xdf, + + M_APP0 = 0xe0, + M_APP1 = 0xe1, + M_APP2 = 0xe2, + M_APP3 = 0xe3, + M_APP4 = 0xe4, + M_APP5 = 0xe5, + M_APP6 = 0xe6, + M_APP7 = 0xe7, + M_APP8 = 0xe8, + M_APP9 = 0xe9, + M_APP10 = 0xea, + M_APP11 = 0xeb, + M_APP12 = 0xec, + M_APP13 = 0xed, + M_APP14 = 0xee, + M_APP15 = 0xef, + + M_JPG0 = 0xf0, + M_JPG13 = 0xfd, + M_COM = 0xfe, + + M_TEM = 0x01, + + M_ERROR = 0x100 +} JPEG_MARKER; + + +/* Private state */ + +typedef struct { + struct jpeg_marker_writer pub; /* public fields */ + + unsigned int last_restart_interval; /* last DRI value emitted; 0 after SOI */ +} my_marker_writer; + +typedef my_marker_writer * my_marker_ptr; + + +/* + * Basic output routines. + * + * Note that we do not support suspension while writing a marker. + * Therefore, an application using suspension must ensure that there is + * enough buffer space for the initial markers (typ. 600-700 bytes) before + * calling jpeg_start_compress, and enough space to write the trailing EOI + * (a few bytes) before calling jpeg_finish_compress. Multipass compression + * modes are not supported at all with suspension, so those two are the only + * points where markers will be written. + */ + +LOCAL(void) +emit_byte (j_compress_ptr cinfo, int val) +/* Emit a byte */ +{ + struct jpeg_destination_mgr * dest = cinfo->dest; + + *(dest->next_output_byte)++ = (JOCTET) val; + if (--dest->free_in_buffer == 0) { + if (! (*dest->empty_output_buffer) (cinfo)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); + } +} + + +LOCAL(void) +emit_marker (j_compress_ptr cinfo, JPEG_MARKER mark) +/* Emit a marker code */ +{ + emit_byte(cinfo, 0xFF); + emit_byte(cinfo, (int) mark); +} + + +LOCAL(void) +emit_2bytes (j_compress_ptr cinfo, int value) +/* Emit a 2-byte integer; these are always MSB first in JPEG files */ +{ + emit_byte(cinfo, (value >> 8) & 0xFF); + emit_byte(cinfo, value & 0xFF); +} + + +/* + * Routines to write specific marker types. + */ + +LOCAL(int) +emit_dqt (j_compress_ptr cinfo, int index) +/* Emit a DQT marker */ +/* Returns the precision used (0 = 8bits, 1 = 16bits) for baseline checking */ +{ + JQUANT_TBL * qtbl = cinfo->quant_tbl_ptrs[index]; + int prec; + int i; + + if (qtbl == NULL) + ERREXIT1(cinfo, JERR_NO_QUANT_TABLE, index); + + prec = 0; + for (i = 0; i <= cinfo->lim_Se; i++) { + if (qtbl->quantval[cinfo->natural_order[i]] > 255) + prec = 1; + } + + if (! qtbl->sent_table) { + emit_marker(cinfo, M_DQT); + + emit_2bytes(cinfo, + prec ? cinfo->lim_Se * 2 + 2 + 1 + 2 : cinfo->lim_Se + 1 + 1 + 2); + + emit_byte(cinfo, index + (prec<<4)); + + for (i = 0; i <= cinfo->lim_Se; i++) { + /* The table entries must be emitted in zigzag order. */ + unsigned int qval = qtbl->quantval[cinfo->natural_order[i]]; + if (prec) + emit_byte(cinfo, (int) (qval >> 8)); + emit_byte(cinfo, (int) (qval & 0xFF)); + } + + qtbl->sent_table = TRUE; + } + + return prec; +} + + +LOCAL(void) +emit_dht (j_compress_ptr cinfo, int index, boolean is_ac) +/* Emit a DHT marker */ +{ + JHUFF_TBL * htbl; + int length, i; + + if (is_ac) { + htbl = cinfo->ac_huff_tbl_ptrs[index]; + index += 0x10; /* output index has AC bit set */ + } else { + htbl = cinfo->dc_huff_tbl_ptrs[index]; + } + + if (htbl == NULL) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, index); + + if (! htbl->sent_table) { + emit_marker(cinfo, M_DHT); + + length = 0; + for (i = 1; i <= 16; i++) + length += htbl->bits[i]; + + emit_2bytes(cinfo, length + 2 + 1 + 16); + emit_byte(cinfo, index); + + for (i = 1; i <= 16; i++) + emit_byte(cinfo, htbl->bits[i]); + + for (i = 0; i < length; i++) + emit_byte(cinfo, htbl->huffval[i]); + + htbl->sent_table = TRUE; + } +} + + +LOCAL(void) +emit_dac (j_compress_ptr cinfo) +/* Emit a DAC marker */ +/* Since the useful info is so small, we want to emit all the tables in */ +/* one DAC marker. Therefore this routine does its own scan of the table. */ +{ +#ifdef C_ARITH_CODING_SUPPORTED + char dc_in_use[NUM_ARITH_TBLS]; + char ac_in_use[NUM_ARITH_TBLS]; + int length, i; + jpeg_component_info *compptr; + + for (i = 0; i < NUM_ARITH_TBLS; i++) + dc_in_use[i] = ac_in_use[i] = 0; + + for (i = 0; i < cinfo->comps_in_scan; i++) { + compptr = cinfo->cur_comp_info[i]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) + dc_in_use[compptr->dc_tbl_no] = 1; + /* AC needs no table when not present */ + if (cinfo->Se) + ac_in_use[compptr->ac_tbl_no] = 1; + } + + length = 0; + for (i = 0; i < NUM_ARITH_TBLS; i++) + length += dc_in_use[i] + ac_in_use[i]; + + if (length) { + emit_marker(cinfo, M_DAC); + + emit_2bytes(cinfo, length*2 + 2); + + for (i = 0; i < NUM_ARITH_TBLS; i++) { + if (dc_in_use[i]) { + emit_byte(cinfo, i); + emit_byte(cinfo, cinfo->arith_dc_L[i] + (cinfo->arith_dc_U[i]<<4)); + } + if (ac_in_use[i]) { + emit_byte(cinfo, i + 0x10); + emit_byte(cinfo, cinfo->arith_ac_K[i]); + } + } + } +#endif /* C_ARITH_CODING_SUPPORTED */ +} + + +LOCAL(void) +emit_dri (j_compress_ptr cinfo) +/* Emit a DRI marker */ +{ + emit_marker(cinfo, M_DRI); + + emit_2bytes(cinfo, 4); /* fixed length */ + + emit_2bytes(cinfo, (int) cinfo->restart_interval); +} + + +LOCAL(void) +emit_sof (j_compress_ptr cinfo, JPEG_MARKER code) +/* Emit a SOF marker */ +{ + int ci; + jpeg_component_info *compptr; + + emit_marker(cinfo, code); + + emit_2bytes(cinfo, 3 * cinfo->num_components + 2 + 5 + 1); /* length */ + + /* Make sure image isn't bigger than SOF field can handle */ + if ((long) cinfo->jpeg_height > 65535L || + (long) cinfo->jpeg_width > 65535L) + ERREXIT1(cinfo, JERR_IMAGE_TOO_BIG, (unsigned int) 65535); + + emit_byte(cinfo, cinfo->data_precision); + emit_2bytes(cinfo, (int) cinfo->jpeg_height); + emit_2bytes(cinfo, (int) cinfo->jpeg_width); + + emit_byte(cinfo, cinfo->num_components); + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + emit_byte(cinfo, compptr->component_id); + emit_byte(cinfo, (compptr->h_samp_factor << 4) + compptr->v_samp_factor); + emit_byte(cinfo, compptr->quant_tbl_no); + } +} + + +LOCAL(void) +emit_sos (j_compress_ptr cinfo) +/* Emit a SOS marker */ +{ + int i, td, ta; + jpeg_component_info *compptr; + + emit_marker(cinfo, M_SOS); + + emit_2bytes(cinfo, 2 * cinfo->comps_in_scan + 2 + 1 + 3); /* length */ + + emit_byte(cinfo, cinfo->comps_in_scan); + + for (i = 0; i < cinfo->comps_in_scan; i++) { + compptr = cinfo->cur_comp_info[i]; + emit_byte(cinfo, compptr->component_id); + + /* We emit 0 for unused field(s); this is recommended by the P&M text + * but does not seem to be specified in the standard. + */ + + /* DC needs no table for refinement scan */ + td = cinfo->Ss == 0 && cinfo->Ah == 0 ? compptr->dc_tbl_no : 0; + /* AC needs no table when not present */ + ta = cinfo->Se ? compptr->ac_tbl_no : 0; + + emit_byte(cinfo, (td << 4) + ta); + } + + emit_byte(cinfo, cinfo->Ss); + emit_byte(cinfo, cinfo->Se); + emit_byte(cinfo, (cinfo->Ah << 4) + cinfo->Al); +} + + +LOCAL(void) +emit_pseudo_sos (j_compress_ptr cinfo) +/* Emit a pseudo SOS marker */ +{ + emit_marker(cinfo, M_SOS); + + emit_2bytes(cinfo, 2 + 1 + 3); /* length */ + + emit_byte(cinfo, 0); /* Ns */ + + emit_byte(cinfo, 0); /* Ss */ + emit_byte(cinfo, cinfo->block_size * cinfo->block_size - 1); /* Se */ + emit_byte(cinfo, 0); /* Ah/Al */ +} + + +LOCAL(void) +emit_jfif_app0 (j_compress_ptr cinfo) +/* Emit a JFIF-compliant APP0 marker */ +{ + /* + * Length of APP0 block (2 bytes) + * Block ID (4 bytes - ASCII "JFIF") + * Zero byte (1 byte to terminate the ID string) + * Version Major, Minor (2 bytes - major first) + * Units (1 byte - 0x00 = none, 0x01 = inch, 0x02 = cm) + * Xdpu (2 bytes - dots per unit horizontal) + * Ydpu (2 bytes - dots per unit vertical) + * Thumbnail X size (1 byte) + * Thumbnail Y size (1 byte) + */ + + emit_marker(cinfo, M_APP0); + + emit_2bytes(cinfo, 2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1); /* length */ + + emit_byte(cinfo, 0x4A); /* Identifier: ASCII "JFIF" */ + emit_byte(cinfo, 0x46); + emit_byte(cinfo, 0x49); + emit_byte(cinfo, 0x46); + emit_byte(cinfo, 0); + emit_byte(cinfo, cinfo->JFIF_major_version); /* Version fields */ + emit_byte(cinfo, cinfo->JFIF_minor_version); + emit_byte(cinfo, cinfo->density_unit); /* Pixel size information */ + emit_2bytes(cinfo, (int) cinfo->X_density); + emit_2bytes(cinfo, (int) cinfo->Y_density); + emit_byte(cinfo, 0); /* No thumbnail image */ + emit_byte(cinfo, 0); +} + + +LOCAL(void) +emit_adobe_app14 (j_compress_ptr cinfo) +/* Emit an Adobe APP14 marker */ +{ + /* + * Length of APP14 block (2 bytes) + * Block ID (5 bytes - ASCII "Adobe") + * Version Number (2 bytes - currently 100) + * Flags0 (2 bytes - currently 0) + * Flags1 (2 bytes - currently 0) + * Color transform (1 byte) + * + * Although Adobe TN 5116 mentions Version = 101, all the Adobe files + * now in circulation seem to use Version = 100, so that's what we write. + * + * We write the color transform byte as 1 if the JPEG color space is + * YCbCr, 2 if it's YCCK, 0 otherwise. Adobe's definition has to do with + * whether the encoder performed a transformation, which is pretty useless. + */ + + emit_marker(cinfo, M_APP14); + + emit_2bytes(cinfo, 2 + 5 + 2 + 2 + 2 + 1); /* length */ + + emit_byte(cinfo, 0x41); /* Identifier: ASCII "Adobe" */ + emit_byte(cinfo, 0x64); + emit_byte(cinfo, 0x6F); + emit_byte(cinfo, 0x62); + emit_byte(cinfo, 0x65); + emit_2bytes(cinfo, 100); /* Version */ + emit_2bytes(cinfo, 0); /* Flags0 */ + emit_2bytes(cinfo, 0); /* Flags1 */ + switch (cinfo->jpeg_color_space) { + case JCS_YCbCr: + emit_byte(cinfo, 1); /* Color transform = 1 */ + break; + case JCS_YCCK: + emit_byte(cinfo, 2); /* Color transform = 2 */ + break; + default: + emit_byte(cinfo, 0); /* Color transform = 0 */ + break; + } +} + + +/* + * These routines allow writing an arbitrary marker with parameters. + * The only intended use is to emit COM or APPn markers after calling + * write_file_header and before calling write_frame_header. + * Other uses are not guaranteed to produce desirable results. + * Counting the parameter bytes properly is the caller's responsibility. + */ + +METHODDEF(void) +write_marker_header (j_compress_ptr cinfo, int marker, unsigned int datalen) +/* Emit an arbitrary marker header */ +{ + if (datalen > (unsigned int) 65533) /* safety check */ + ERREXIT(cinfo, JERR_BAD_LENGTH); + + emit_marker(cinfo, (JPEG_MARKER) marker); + + emit_2bytes(cinfo, (int) (datalen + 2)); /* total length */ +} + +METHODDEF(void) +write_marker_byte (j_compress_ptr cinfo, int val) +/* Emit one byte of marker parameters following write_marker_header */ +{ + emit_byte(cinfo, val); +} + + +/* + * Write datastream header. + * This consists of an SOI and optional APPn markers. + * We recommend use of the JFIF marker, but not the Adobe marker, + * when using YCbCr or grayscale data. The JFIF marker should NOT + * be used for any other JPEG colorspace. The Adobe marker is helpful + * to distinguish RGB, CMYK, and YCCK colorspaces. + * Note that an application can write additional header markers after + * jpeg_start_compress returns. + */ + +METHODDEF(void) +write_file_header (j_compress_ptr cinfo) +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + + emit_marker(cinfo, M_SOI); /* first the SOI */ + + /* SOI is defined to reset restart interval to 0 */ + marker->last_restart_interval = 0; + + if (cinfo->write_JFIF_header) /* next an optional JFIF APP0 */ + emit_jfif_app0(cinfo); + if (cinfo->write_Adobe_marker) /* next an optional Adobe APP14 */ + emit_adobe_app14(cinfo); +} + + +/* + * Write frame header. + * This consists of DQT and SOFn markers, and a conditional pseudo SOS marker. + * Note that we do not emit the SOF until we have emitted the DQT(s). + * This avoids compatibility problems with incorrect implementations that + * try to error-check the quant table numbers as soon as they see the SOF. + */ + +METHODDEF(void) +write_frame_header (j_compress_ptr cinfo) +{ + int ci, prec; + boolean is_baseline; + jpeg_component_info *compptr; + + /* Emit DQT for each quantization table. + * Note that emit_dqt() suppresses any duplicate tables. + */ + prec = 0; + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + prec += emit_dqt(cinfo, compptr->quant_tbl_no); + } + /* now prec is nonzero iff there are any 16-bit quant tables. */ + + /* Check for a non-baseline specification. + * Note we assume that Huffman table numbers won't be changed later. + */ + if (cinfo->arith_code || cinfo->progressive_mode || + cinfo->data_precision != 8 || cinfo->block_size != DCTSIZE) { + is_baseline = FALSE; + } else { + is_baseline = TRUE; + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + if (compptr->dc_tbl_no > 1 || compptr->ac_tbl_no > 1) + is_baseline = FALSE; + } + if (prec && is_baseline) { + is_baseline = FALSE; + /* If it's baseline except for quantizer size, warn the user */ + TRACEMS(cinfo, 0, JTRC_16BIT_TABLES); + } + } + + /* Emit the proper SOF marker */ + if (cinfo->arith_code) { + if (cinfo->progressive_mode) + emit_sof(cinfo, M_SOF10); /* SOF code for progressive arithmetic */ + else + emit_sof(cinfo, M_SOF9); /* SOF code for sequential arithmetic */ + } else { + if (cinfo->progressive_mode) + emit_sof(cinfo, M_SOF2); /* SOF code for progressive Huffman */ + else if (is_baseline) + emit_sof(cinfo, M_SOF0); /* SOF code for baseline implementation */ + else + emit_sof(cinfo, M_SOF1); /* SOF code for non-baseline Huffman file */ + } + + /* Check to emit pseudo SOS marker */ + if (cinfo->progressive_mode && cinfo->block_size != DCTSIZE) + emit_pseudo_sos(cinfo); +} + + +/* + * Write scan header. + * This consists of DHT or DAC markers, optional DRI, and SOS. + * Compressed data will be written following the SOS. + */ + +METHODDEF(void) +write_scan_header (j_compress_ptr cinfo) +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + int i; + jpeg_component_info *compptr; + + if (cinfo->arith_code) { + /* Emit arith conditioning info. We may have some duplication + * if the file has multiple scans, but it's so small it's hardly + * worth worrying about. + */ + emit_dac(cinfo); + } else { + /* Emit Huffman tables. + * Note that emit_dht() suppresses any duplicate tables. + */ + for (i = 0; i < cinfo->comps_in_scan; i++) { + compptr = cinfo->cur_comp_info[i]; + /* DC needs no table for refinement scan */ + if (cinfo->Ss == 0 && cinfo->Ah == 0) + emit_dht(cinfo, compptr->dc_tbl_no, FALSE); + /* AC needs no table when not present */ + if (cinfo->Se) + emit_dht(cinfo, compptr->ac_tbl_no, TRUE); + } + } + + /* Emit DRI if required --- note that DRI value could change for each scan. + * We avoid wasting space with unnecessary DRIs, however. + */ + if (cinfo->restart_interval != marker->last_restart_interval) { + emit_dri(cinfo); + marker->last_restart_interval = cinfo->restart_interval; + } + + emit_sos(cinfo); +} + + +/* + * Write datastream trailer. + */ + +METHODDEF(void) +write_file_trailer (j_compress_ptr cinfo) +{ + emit_marker(cinfo, M_EOI); +} + + +/* + * Write an abbreviated table-specification datastream. + * This consists of SOI, DQT and DHT tables, and EOI. + * Any table that is defined and not marked sent_table = TRUE will be + * emitted. Note that all tables will be marked sent_table = TRUE at exit. + */ + +METHODDEF(void) +write_tables_only (j_compress_ptr cinfo) +{ + int i; + + emit_marker(cinfo, M_SOI); + + for (i = 0; i < NUM_QUANT_TBLS; i++) { + if (cinfo->quant_tbl_ptrs[i] != NULL) + (void) emit_dqt(cinfo, i); + } + + if (! cinfo->arith_code) { + for (i = 0; i < NUM_HUFF_TBLS; i++) { + if (cinfo->dc_huff_tbl_ptrs[i] != NULL) + emit_dht(cinfo, i, FALSE); + if (cinfo->ac_huff_tbl_ptrs[i] != NULL) + emit_dht(cinfo, i, TRUE); + } + } + + emit_marker(cinfo, M_EOI); +} + + +/* + * Initialize the marker writer module. + */ + +GLOBAL(void) +jinit_marker_writer (j_compress_ptr cinfo) +{ + my_marker_ptr marker; + + /* Create the subobject */ + marker = (my_marker_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_marker_writer)); + cinfo->marker = (struct jpeg_marker_writer *) marker; + /* Initialize method pointers */ + marker->pub.write_file_header = write_file_header; + marker->pub.write_frame_header = write_frame_header; + marker->pub.write_scan_header = write_scan_header; + marker->pub.write_file_trailer = write_file_trailer; + marker->pub.write_tables_only = write_tables_only; + marker->pub.write_marker_header = write_marker_header; + marker->pub.write_marker_byte = write_marker_byte; + /* Initialize private state */ + marker->last_restart_interval = 0; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmaster.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmaster.c new file mode 100644 index 000000000..caf80a53b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcmaster.c @@ -0,0 +1,858 @@ +/* + * jcmaster.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2003-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains master control logic for the JPEG compressor. + * These routines are concerned with parameter validation, initial setup, + * and inter-pass control (determining the number of passes and the work + * to be done in each pass). + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private state */ + +typedef enum { + main_pass, /* input data, also do first output step */ + huff_opt_pass, /* Huffman code optimization pass */ + output_pass /* data output pass */ +} c_pass_type; + +typedef struct { + struct jpeg_comp_master pub; /* public fields */ + + c_pass_type pass_type; /* the type of the current pass */ + + int pass_number; /* # of passes completed */ + int total_passes; /* total # of passes needed */ + + int scan_number; /* current index in scan_info[] */ +} my_comp_master; + +typedef my_comp_master * my_master_ptr; + + +/* + * Support routines that do various essential calculations. + */ + +/* + * Compute JPEG image dimensions and related values. + * NOTE: this is exported for possible use by application. + * Hence it mustn't do anything that can't be done twice. + */ + +GLOBAL(void) +jpeg_calc_jpeg_dimensions (j_compress_ptr cinfo) +/* Do computations that are needed before master selection phase */ +{ +#ifdef DCT_SCALING_SUPPORTED + + /* Sanity check on input image dimensions to prevent overflow in + * following calculation. + * We do check jpeg_width and jpeg_height in initial_setup below, + * but image_width and image_height can come from arbitrary data, + * and we need some space for multiplication by block_size. + */ + if (((long) cinfo->image_width >> 24) || ((long) cinfo->image_height >> 24)) + ERREXIT1(cinfo, JERR_IMAGE_TOO_BIG, (unsigned int) JPEG_MAX_DIMENSION); + + /* Compute actual JPEG image dimensions and DCT scaling choices. */ + if (cinfo->scale_num >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/1 scaling */ + cinfo->jpeg_width = cinfo->image_width * cinfo->block_size; + cinfo->jpeg_height = cinfo->image_height * cinfo->block_size; + cinfo->min_DCT_h_scaled_size = 1; + cinfo->min_DCT_v_scaled_size = 1; + } else if (cinfo->scale_num * 2 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/2 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 2L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 2L); + cinfo->min_DCT_h_scaled_size = 2; + cinfo->min_DCT_v_scaled_size = 2; + } else if (cinfo->scale_num * 3 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/3 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 3L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 3L); + cinfo->min_DCT_h_scaled_size = 3; + cinfo->min_DCT_v_scaled_size = 3; + } else if (cinfo->scale_num * 4 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/4 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 4L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 4L); + cinfo->min_DCT_h_scaled_size = 4; + cinfo->min_DCT_v_scaled_size = 4; + } else if (cinfo->scale_num * 5 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/5 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 5L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 5L); + cinfo->min_DCT_h_scaled_size = 5; + cinfo->min_DCT_v_scaled_size = 5; + } else if (cinfo->scale_num * 6 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/6 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 6L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 6L); + cinfo->min_DCT_h_scaled_size = 6; + cinfo->min_DCT_v_scaled_size = 6; + } else if (cinfo->scale_num * 7 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/7 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 7L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 7L); + cinfo->min_DCT_h_scaled_size = 7; + cinfo->min_DCT_v_scaled_size = 7; + } else if (cinfo->scale_num * 8 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/8 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 8L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 8L); + cinfo->min_DCT_h_scaled_size = 8; + cinfo->min_DCT_v_scaled_size = 8; + } else if (cinfo->scale_num * 9 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/9 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 9L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 9L); + cinfo->min_DCT_h_scaled_size = 9; + cinfo->min_DCT_v_scaled_size = 9; + } else if (cinfo->scale_num * 10 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/10 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 10L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 10L); + cinfo->min_DCT_h_scaled_size = 10; + cinfo->min_DCT_v_scaled_size = 10; + } else if (cinfo->scale_num * 11 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/11 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 11L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 11L); + cinfo->min_DCT_h_scaled_size = 11; + cinfo->min_DCT_v_scaled_size = 11; + } else if (cinfo->scale_num * 12 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/12 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 12L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 12L); + cinfo->min_DCT_h_scaled_size = 12; + cinfo->min_DCT_v_scaled_size = 12; + } else if (cinfo->scale_num * 13 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/13 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 13L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 13L); + cinfo->min_DCT_h_scaled_size = 13; + cinfo->min_DCT_v_scaled_size = 13; + } else if (cinfo->scale_num * 14 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/14 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 14L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 14L); + cinfo->min_DCT_h_scaled_size = 14; + cinfo->min_DCT_v_scaled_size = 14; + } else if (cinfo->scale_num * 15 >= cinfo->scale_denom * cinfo->block_size) { + /* Provide block_size/15 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 15L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 15L); + cinfo->min_DCT_h_scaled_size = 15; + cinfo->min_DCT_v_scaled_size = 15; + } else { + /* Provide block_size/16 scaling */ + cinfo->jpeg_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * cinfo->block_size, 16L); + cinfo->jpeg_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * cinfo->block_size, 16L); + cinfo->min_DCT_h_scaled_size = 16; + cinfo->min_DCT_v_scaled_size = 16; + } + +#else /* !DCT_SCALING_SUPPORTED */ + + /* Hardwire it to "no scaling" */ + cinfo->jpeg_width = cinfo->image_width; + cinfo->jpeg_height = cinfo->image_height; + cinfo->min_DCT_h_scaled_size = DCTSIZE; + cinfo->min_DCT_v_scaled_size = DCTSIZE; + +#endif /* DCT_SCALING_SUPPORTED */ +} + + +LOCAL(void) +jpeg_calc_trans_dimensions (j_compress_ptr cinfo) +{ + if (cinfo->min_DCT_h_scaled_size != cinfo->min_DCT_v_scaled_size) + ERREXIT2(cinfo, JERR_BAD_DCTSIZE, + cinfo->min_DCT_h_scaled_size, cinfo->min_DCT_v_scaled_size); + + cinfo->block_size = cinfo->min_DCT_h_scaled_size; +} + + +LOCAL(void) +initial_setup (j_compress_ptr cinfo, boolean transcode_only) +/* Do computations that are needed before master selection phase */ +{ + int ci, ssize; + jpeg_component_info *compptr; + long samplesperrow; + JDIMENSION jd_samplesperrow; + + if (transcode_only) + jpeg_calc_trans_dimensions(cinfo); + else + jpeg_calc_jpeg_dimensions(cinfo); + + /* Sanity check on block_size */ + if (cinfo->block_size < 1 || cinfo->block_size > 16) + ERREXIT2(cinfo, JERR_BAD_DCTSIZE, cinfo->block_size, cinfo->block_size); + + /* Derive natural_order from block_size */ + switch (cinfo->block_size) { + case 2: cinfo->natural_order = jpeg_natural_order2; break; + case 3: cinfo->natural_order = jpeg_natural_order3; break; + case 4: cinfo->natural_order = jpeg_natural_order4; break; + case 5: cinfo->natural_order = jpeg_natural_order5; break; + case 6: cinfo->natural_order = jpeg_natural_order6; break; + case 7: cinfo->natural_order = jpeg_natural_order7; break; + default: cinfo->natural_order = jpeg_natural_order; break; + } + + /* Derive lim_Se from block_size */ + cinfo->lim_Se = cinfo->block_size < DCTSIZE ? + cinfo->block_size * cinfo->block_size - 1 : DCTSIZE2-1; + + /* Sanity check on image dimensions */ + if (cinfo->jpeg_height <= 0 || cinfo->jpeg_width <= 0 || + cinfo->num_components <= 0 || cinfo->input_components <= 0) + ERREXIT(cinfo, JERR_EMPTY_IMAGE); + + /* Make sure image isn't bigger than I can handle */ + if ((long) cinfo->jpeg_height > (long) JPEG_MAX_DIMENSION || + (long) cinfo->jpeg_width > (long) JPEG_MAX_DIMENSION) + ERREXIT1(cinfo, JERR_IMAGE_TOO_BIG, (unsigned int) JPEG_MAX_DIMENSION); + + /* Width of an input scanline must be representable as JDIMENSION. */ + samplesperrow = (long) cinfo->image_width * (long) cinfo->input_components; + jd_samplesperrow = (JDIMENSION) samplesperrow; + if ((long) jd_samplesperrow != samplesperrow) + ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); + + /* For now, precision must match compiled-in value... */ + if (cinfo->data_precision != BITS_IN_JSAMPLE) + ERREXIT1(cinfo, JERR_BAD_PRECISION, cinfo->data_precision); + + /* Check that number of components won't exceed internal array sizes */ + if (cinfo->num_components > MAX_COMPONENTS) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, + MAX_COMPONENTS); + + /* Compute maximum sampling factors; check factor validity */ + cinfo->max_h_samp_factor = 1; + cinfo->max_v_samp_factor = 1; + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + if (compptr->h_samp_factor<=0 || compptr->h_samp_factor>MAX_SAMP_FACTOR || + compptr->v_samp_factor<=0 || compptr->v_samp_factor>MAX_SAMP_FACTOR) + ERREXIT(cinfo, JERR_BAD_SAMPLING); + cinfo->max_h_samp_factor = MAX(cinfo->max_h_samp_factor, + compptr->h_samp_factor); + cinfo->max_v_samp_factor = MAX(cinfo->max_v_samp_factor, + compptr->v_samp_factor); + } + + /* Compute dimensions of components */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Fill in the correct component_index value; don't rely on application */ + compptr->component_index = ci; + /* In selecting the actual DCT scaling for each component, we try to + * scale down the chroma components via DCT scaling rather than downsampling. + * This saves time if the downsampler gets to use 1:1 scaling. + * Note this code adapts subsampling ratios which are powers of 2. + */ + ssize = 1; +#ifdef DCT_SCALING_SUPPORTED + while (cinfo->min_DCT_h_scaled_size * ssize <= + (cinfo->do_fancy_downsampling ? DCTSIZE : DCTSIZE / 2) && + (cinfo->max_h_samp_factor % (compptr->h_samp_factor * ssize * 2)) == 0) { + ssize = ssize * 2; + } +#endif + compptr->DCT_h_scaled_size = cinfo->min_DCT_h_scaled_size * ssize; + ssize = 1; +#ifdef DCT_SCALING_SUPPORTED + while (cinfo->min_DCT_v_scaled_size * ssize <= + (cinfo->do_fancy_downsampling ? DCTSIZE : DCTSIZE / 2) && + (cinfo->max_v_samp_factor % (compptr->v_samp_factor * ssize * 2)) == 0) { + ssize = ssize * 2; + } +#endif + compptr->DCT_v_scaled_size = cinfo->min_DCT_v_scaled_size * ssize; + + /* We don't support DCT ratios larger than 2. */ + if (compptr->DCT_h_scaled_size > compptr->DCT_v_scaled_size * 2) + compptr->DCT_h_scaled_size = compptr->DCT_v_scaled_size * 2; + else if (compptr->DCT_v_scaled_size > compptr->DCT_h_scaled_size * 2) + compptr->DCT_v_scaled_size = compptr->DCT_h_scaled_size * 2; + + /* Size in DCT blocks */ + compptr->width_in_blocks = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_width * (long) compptr->h_samp_factor, + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + compptr->height_in_blocks = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_height * (long) compptr->v_samp_factor, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + /* Size in samples */ + compptr->downsampled_width = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_width * + (long) (compptr->h_samp_factor * compptr->DCT_h_scaled_size), + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + compptr->downsampled_height = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_height * + (long) (compptr->v_samp_factor * compptr->DCT_v_scaled_size), + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + /* Mark component needed (this flag isn't actually used for compression) */ + compptr->component_needed = TRUE; + } + + /* Compute number of fully interleaved MCU rows (number of times that + * main controller will call coefficient controller). + */ + cinfo->total_iMCU_rows = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_height, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); +} + + +#ifdef C_MULTISCAN_FILES_SUPPORTED + +LOCAL(void) +validate_script (j_compress_ptr cinfo) +/* Verify that the scan script in cinfo->scan_info[] is valid; also + * determine whether it uses progressive JPEG, and set cinfo->progressive_mode. + */ +{ + const jpeg_scan_info * scanptr; + int scanno, ncomps, ci, coefi, thisi; + int Ss, Se, Ah, Al; + boolean component_sent[MAX_COMPONENTS]; +#ifdef C_PROGRESSIVE_SUPPORTED + int * last_bitpos_ptr; + int last_bitpos[MAX_COMPONENTS][DCTSIZE2]; + /* -1 until that coefficient has been seen; then last Al for it */ +#endif + + if (cinfo->num_scans <= 0) + ERREXIT1(cinfo, JERR_BAD_SCAN_SCRIPT, 0); + + /* For sequential JPEG, all scans must have Ss=0, Se=DCTSIZE2-1; + * for progressive JPEG, no scan can have this. + */ + scanptr = cinfo->scan_info; + if (scanptr->Ss != 0 || scanptr->Se != DCTSIZE2-1) { +#ifdef C_PROGRESSIVE_SUPPORTED + cinfo->progressive_mode = TRUE; + last_bitpos_ptr = & last_bitpos[0][0]; + for (ci = 0; ci < cinfo->num_components; ci++) + for (coefi = 0; coefi < DCTSIZE2; coefi++) + *last_bitpos_ptr++ = -1; +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } else { + cinfo->progressive_mode = FALSE; + for (ci = 0; ci < cinfo->num_components; ci++) + component_sent[ci] = FALSE; + } + + for (scanno = 1; scanno <= cinfo->num_scans; scanptr++, scanno++) { + /* Validate component indexes */ + ncomps = scanptr->comps_in_scan; + if (ncomps <= 0 || ncomps > MAX_COMPS_IN_SCAN) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, ncomps, MAX_COMPS_IN_SCAN); + for (ci = 0; ci < ncomps; ci++) { + thisi = scanptr->component_index[ci]; + if (thisi < 0 || thisi >= cinfo->num_components) + ERREXIT1(cinfo, JERR_BAD_SCAN_SCRIPT, scanno); + /* Components must appear in SOF order within each scan */ + if (ci > 0 && thisi <= scanptr->component_index[ci-1]) + ERREXIT1(cinfo, JERR_BAD_SCAN_SCRIPT, scanno); + } + /* Validate progression parameters */ + Ss = scanptr->Ss; + Se = scanptr->Se; + Ah = scanptr->Ah; + Al = scanptr->Al; + if (cinfo->progressive_mode) { +#ifdef C_PROGRESSIVE_SUPPORTED + /* The JPEG spec simply gives the ranges 0..13 for Ah and Al, but that + * seems wrong: the upper bound ought to depend on data precision. + * Perhaps they really meant 0..N+1 for N-bit precision. + * Here we allow 0..10 for 8-bit data; Al larger than 10 results in + * out-of-range reconstructed DC values during the first DC scan, + * which might cause problems for some decoders. + */ +#if BITS_IN_JSAMPLE == 8 +#define MAX_AH_AL 10 +#else +#define MAX_AH_AL 13 +#endif + if (Ss < 0 || Ss >= DCTSIZE2 || Se < Ss || Se >= DCTSIZE2 || + Ah < 0 || Ah > MAX_AH_AL || Al < 0 || Al > MAX_AH_AL) + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + if (Ss == 0) { + if (Se != 0) /* DC and AC together not OK */ + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + } else { + if (ncomps != 1) /* AC scans must be for only one component */ + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + } + for (ci = 0; ci < ncomps; ci++) { + last_bitpos_ptr = & last_bitpos[scanptr->component_index[ci]][0]; + if (Ss != 0 && last_bitpos_ptr[0] < 0) /* AC without prior DC scan */ + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + for (coefi = Ss; coefi <= Se; coefi++) { + if (last_bitpos_ptr[coefi] < 0) { + /* first scan of this coefficient */ + if (Ah != 0) + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + } else { + /* not first scan */ + if (Ah != last_bitpos_ptr[coefi] || Al != Ah-1) + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + } + last_bitpos_ptr[coefi] = Al; + } + } +#endif + } else { + /* For sequential JPEG, all progression parameters must be these: */ + if (Ss != 0 || Se != DCTSIZE2-1 || Ah != 0 || Al != 0) + ERREXIT1(cinfo, JERR_BAD_PROG_SCRIPT, scanno); + /* Make sure components are not sent twice */ + for (ci = 0; ci < ncomps; ci++) { + thisi = scanptr->component_index[ci]; + if (component_sent[thisi]) + ERREXIT1(cinfo, JERR_BAD_SCAN_SCRIPT, scanno); + component_sent[thisi] = TRUE; + } + } + } + + /* Now verify that everything got sent. */ + if (cinfo->progressive_mode) { +#ifdef C_PROGRESSIVE_SUPPORTED + /* For progressive mode, we only check that at least some DC data + * got sent for each component; the spec does not require that all bits + * of all coefficients be transmitted. Would it be wiser to enforce + * transmission of all coefficient bits?? + */ + for (ci = 0; ci < cinfo->num_components; ci++) { + if (last_bitpos[ci][0] < 0) + ERREXIT(cinfo, JERR_MISSING_DATA); + } +#endif + } else { + for (ci = 0; ci < cinfo->num_components; ci++) { + if (! component_sent[ci]) + ERREXIT(cinfo, JERR_MISSING_DATA); + } + } +} + + +LOCAL(void) +reduce_script (j_compress_ptr cinfo) +/* Adapt scan script for use with reduced block size; + * assume that script has been validated before. + */ +{ + jpeg_scan_info * scanptr; + int idxout, idxin; + + /* Circumvent const declaration for this function */ + scanptr = (jpeg_scan_info *) cinfo->scan_info; + idxout = 0; + + for (idxin = 0; idxin < cinfo->num_scans; idxin++) { + /* After skipping, idxout becomes smaller than idxin */ + if (idxin != idxout) + /* Copy rest of data; + * note we stay in given chunk of allocated memory. + */ + scanptr[idxout] = scanptr[idxin]; + if (scanptr[idxout].Ss > cinfo->lim_Se) + /* Entire scan out of range - skip this entry */ + continue; + if (scanptr[idxout].Se > cinfo->lim_Se) + /* Limit scan to end of block */ + scanptr[idxout].Se = cinfo->lim_Se; + idxout++; + } + + cinfo->num_scans = idxout; +} + +#endif /* C_MULTISCAN_FILES_SUPPORTED */ + + +LOCAL(void) +select_scan_parameters (j_compress_ptr cinfo) +/* Set up the scan parameters for the current scan */ +{ + int ci; + +#ifdef C_MULTISCAN_FILES_SUPPORTED + if (cinfo->scan_info != NULL) { + /* Prepare for current scan --- the script is already validated */ + my_master_ptr master = (my_master_ptr) cinfo->master; + const jpeg_scan_info * scanptr = cinfo->scan_info + master->scan_number; + + cinfo->comps_in_scan = scanptr->comps_in_scan; + for (ci = 0; ci < scanptr->comps_in_scan; ci++) { + cinfo->cur_comp_info[ci] = + &cinfo->comp_info[scanptr->component_index[ci]]; + } + if (cinfo->progressive_mode) { + cinfo->Ss = scanptr->Ss; + cinfo->Se = scanptr->Se; + cinfo->Ah = scanptr->Ah; + cinfo->Al = scanptr->Al; + return; + } + } + else +#endif + { + /* Prepare for single sequential-JPEG scan containing all components */ + if (cinfo->num_components > MAX_COMPS_IN_SCAN) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, + MAX_COMPS_IN_SCAN); + cinfo->comps_in_scan = cinfo->num_components; + for (ci = 0; ci < cinfo->num_components; ci++) { + cinfo->cur_comp_info[ci] = &cinfo->comp_info[ci]; + } + } + cinfo->Ss = 0; + cinfo->Se = cinfo->block_size * cinfo->block_size - 1; + cinfo->Ah = 0; + cinfo->Al = 0; +} + + +LOCAL(void) +per_scan_setup (j_compress_ptr cinfo) +/* Do computations that are needed before processing a JPEG scan */ +/* cinfo->comps_in_scan and cinfo->cur_comp_info[] are already set */ +{ + int ci, mcublks, tmp; + jpeg_component_info *compptr; + + if (cinfo->comps_in_scan == 1) { + + /* Noninterleaved (single-component) scan */ + compptr = cinfo->cur_comp_info[0]; + + /* Overall image size in MCUs */ + cinfo->MCUs_per_row = compptr->width_in_blocks; + cinfo->MCU_rows_in_scan = compptr->height_in_blocks; + + /* For noninterleaved scan, always one block per MCU */ + compptr->MCU_width = 1; + compptr->MCU_height = 1; + compptr->MCU_blocks = 1; + compptr->MCU_sample_width = compptr->DCT_h_scaled_size; + compptr->last_col_width = 1; + /* For noninterleaved scans, it is convenient to define last_row_height + * as the number of block rows present in the last iMCU row. + */ + tmp = (int) (compptr->height_in_blocks % compptr->v_samp_factor); + if (tmp == 0) tmp = compptr->v_samp_factor; + compptr->last_row_height = tmp; + + /* Prepare array describing MCU composition */ + cinfo->blocks_in_MCU = 1; + cinfo->MCU_membership[0] = 0; + + } else { + + /* Interleaved (multi-component) scan */ + if (cinfo->comps_in_scan <= 0 || cinfo->comps_in_scan > MAX_COMPS_IN_SCAN) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->comps_in_scan, + MAX_COMPS_IN_SCAN); + + /* Overall image size in MCUs */ + cinfo->MCUs_per_row = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_width, + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + cinfo->MCU_rows_in_scan = (JDIMENSION) + jdiv_round_up((long) cinfo->jpeg_height, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + + cinfo->blocks_in_MCU = 0; + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* Sampling factors give # of blocks of component in each MCU */ + compptr->MCU_width = compptr->h_samp_factor; + compptr->MCU_height = compptr->v_samp_factor; + compptr->MCU_blocks = compptr->MCU_width * compptr->MCU_height; + compptr->MCU_sample_width = compptr->MCU_width * compptr->DCT_h_scaled_size; + /* Figure number of non-dummy blocks in last MCU column & row */ + tmp = (int) (compptr->width_in_blocks % compptr->MCU_width); + if (tmp == 0) tmp = compptr->MCU_width; + compptr->last_col_width = tmp; + tmp = (int) (compptr->height_in_blocks % compptr->MCU_height); + if (tmp == 0) tmp = compptr->MCU_height; + compptr->last_row_height = tmp; + /* Prepare array describing MCU composition */ + mcublks = compptr->MCU_blocks; + if (cinfo->blocks_in_MCU + mcublks > C_MAX_BLOCKS_IN_MCU) + ERREXIT(cinfo, JERR_BAD_MCU_SIZE); + while (mcublks-- > 0) { + cinfo->MCU_membership[cinfo->blocks_in_MCU++] = ci; + } + } + + } + + /* Convert restart specified in rows to actual MCU count. */ + /* Note that count must fit in 16 bits, so we provide limiting. */ + if (cinfo->restart_in_rows > 0) { + long nominal = (long) cinfo->restart_in_rows * (long) cinfo->MCUs_per_row; + cinfo->restart_interval = (unsigned int) MIN(nominal, 65535L); + } +} + + +/* + * Per-pass setup. + * This is called at the beginning of each pass. We determine which modules + * will be active during this pass and give them appropriate start_pass calls. + * We also set is_last_pass to indicate whether any more passes will be + * required. + */ + +METHODDEF(void) +prepare_for_pass (j_compress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + + switch (master->pass_type) { + case main_pass: + /* Initial pass: will collect input data, and do either Huffman + * optimization or data output for the first scan. + */ + select_scan_parameters(cinfo); + per_scan_setup(cinfo); + if (! cinfo->raw_data_in) { + (*cinfo->cconvert->start_pass) (cinfo); + (*cinfo->downsample->start_pass) (cinfo); + (*cinfo->prep->start_pass) (cinfo, JBUF_PASS_THRU); + } + (*cinfo->fdct->start_pass) (cinfo); + (*cinfo->entropy->start_pass) (cinfo, cinfo->optimize_coding); + (*cinfo->coef->start_pass) (cinfo, + (master->total_passes > 1 ? + JBUF_SAVE_AND_PASS : JBUF_PASS_THRU)); + (*cinfo->main->start_pass) (cinfo, JBUF_PASS_THRU); + if (cinfo->optimize_coding) { + /* No immediate data output; postpone writing frame/scan headers */ + master->pub.call_pass_startup = FALSE; + } else { + /* Will write frame/scan headers at first jpeg_write_scanlines call */ + master->pub.call_pass_startup = TRUE; + } + break; +#ifdef ENTROPY_OPT_SUPPORTED + case huff_opt_pass: + /* Do Huffman optimization for a scan after the first one. */ + select_scan_parameters(cinfo); + per_scan_setup(cinfo); + if (cinfo->Ss != 0 || cinfo->Ah == 0) { + (*cinfo->entropy->start_pass) (cinfo, TRUE); + (*cinfo->coef->start_pass) (cinfo, JBUF_CRANK_DEST); + master->pub.call_pass_startup = FALSE; + break; + } + /* Special case: Huffman DC refinement scans need no Huffman table + * and therefore we can skip the optimization pass for them. + */ + master->pass_type = output_pass; + master->pass_number++; + /*FALLTHROUGH*/ +#endif + case output_pass: + /* Do a data-output pass. */ + /* We need not repeat per-scan setup if prior optimization pass did it. */ + if (! cinfo->optimize_coding) { + select_scan_parameters(cinfo); + per_scan_setup(cinfo); + } + (*cinfo->entropy->start_pass) (cinfo, FALSE); + (*cinfo->coef->start_pass) (cinfo, JBUF_CRANK_DEST); + /* We emit frame/scan headers now */ + if (master->scan_number == 0) + (*cinfo->marker->write_frame_header) (cinfo); + (*cinfo->marker->write_scan_header) (cinfo); + master->pub.call_pass_startup = FALSE; + break; + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + } + + master->pub.is_last_pass = (master->pass_number == master->total_passes-1); + + /* Set up progress monitor's pass info if present */ + if (cinfo->progress != NULL) { + cinfo->progress->completed_passes = master->pass_number; + cinfo->progress->total_passes = master->total_passes; + } +} + + +/* + * Special start-of-pass hook. + * This is called by jpeg_write_scanlines if call_pass_startup is TRUE. + * In single-pass processing, we need this hook because we don't want to + * write frame/scan headers during jpeg_start_compress; we want to let the + * application write COM markers etc. between jpeg_start_compress and the + * jpeg_write_scanlines loop. + * In multi-pass processing, this routine is not used. + */ + +METHODDEF(void) +pass_startup (j_compress_ptr cinfo) +{ + cinfo->master->call_pass_startup = FALSE; /* reset flag so call only once */ + + (*cinfo->marker->write_frame_header) (cinfo); + (*cinfo->marker->write_scan_header) (cinfo); +} + + +/* + * Finish up at end of pass. + */ + +METHODDEF(void) +finish_pass_master (j_compress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + + /* The entropy coder always needs an end-of-pass call, + * either to analyze statistics or to flush its output buffer. + */ + (*cinfo->entropy->finish_pass) (cinfo); + + /* Update state for next pass */ + switch (master->pass_type) { + case main_pass: + /* next pass is either output of scan 0 (after optimization) + * or output of scan 1 (if no optimization). + */ + master->pass_type = output_pass; + if (! cinfo->optimize_coding) + master->scan_number++; + break; + case huff_opt_pass: + /* next pass is always output of current scan */ + master->pass_type = output_pass; + break; + case output_pass: + /* next pass is either optimization or output of next scan */ + if (cinfo->optimize_coding) + master->pass_type = huff_opt_pass; + master->scan_number++; + break; + } + + master->pass_number++; +} + + +/* + * Initialize master compression control. + */ + +GLOBAL(void) +jinit_c_master_control (j_compress_ptr cinfo, boolean transcode_only) +{ + my_master_ptr master; + + master = (my_master_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_comp_master)); + cinfo->master = (struct jpeg_comp_master *) master; + master->pub.prepare_for_pass = prepare_for_pass; + master->pub.pass_startup = pass_startup; + master->pub.finish_pass = finish_pass_master; + master->pub.is_last_pass = FALSE; + + /* Validate parameters, determine derived values */ + initial_setup(cinfo, transcode_only); + + if (cinfo->scan_info != NULL) { +#ifdef C_MULTISCAN_FILES_SUPPORTED + validate_script(cinfo); + if (cinfo->block_size < DCTSIZE) + reduce_script(cinfo); +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } else { + cinfo->progressive_mode = FALSE; + cinfo->num_scans = 1; + } + + if ((cinfo->progressive_mode || cinfo->block_size < DCTSIZE) && + !cinfo->arith_code) /* TEMPORARY HACK ??? */ + /* assume default tables no good for progressive or downscale mode */ + cinfo->optimize_coding = TRUE; + + /* Initialize my private state */ + if (transcode_only) { + /* no main pass in transcoding */ + if (cinfo->optimize_coding) + master->pass_type = huff_opt_pass; + else + master->pass_type = output_pass; + } else { + /* for normal compression, first pass is always this type: */ + master->pass_type = main_pass; + } + master->scan_number = 0; + master->pass_number = 0; + if (cinfo->optimize_coding) + master->total_passes = cinfo->num_scans * 2; + else + master->total_passes = cinfo->num_scans; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcomapi.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcomapi.c new file mode 100644 index 000000000..9b1fa7568 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcomapi.c @@ -0,0 +1,106 @@ +/* + * jcomapi.c + * + * Copyright (C) 1994-1997, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains application interface routines that are used for both + * compression and decompression. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Abort processing of a JPEG compression or decompression operation, + * but don't destroy the object itself. + * + * For this, we merely clean up all the nonpermanent memory pools. + * Note that temp files (virtual arrays) are not allowed to belong to + * the permanent pool, so we will be able to close all temp files here. + * Closing a data source or destination, if necessary, is the application's + * responsibility. + */ + +GLOBAL(void) +jpeg_abort (j_common_ptr cinfo) +{ + int pool; + + /* Do nothing if called on a not-initialized or destroyed JPEG object. */ + if (cinfo->mem == NULL) + return; + + /* Releasing pools in reverse order might help avoid fragmentation + * with some (brain-damaged) malloc libraries. + */ + for (pool = JPOOL_NUMPOOLS-1; pool > JPOOL_PERMANENT; pool--) { + (*cinfo->mem->free_pool) (cinfo, pool); + } + + /* Reset overall state for possible reuse of object */ + if (cinfo->is_decompressor) { + cinfo->global_state = DSTATE_START; + /* Try to keep application from accessing now-deleted marker list. + * A bit kludgy to do it here, but this is the most central place. + */ + ((j_decompress_ptr) cinfo)->marker_list = NULL; + } else { + cinfo->global_state = CSTATE_START; + } +} + + +/* + * Destruction of a JPEG object. + * + * Everything gets deallocated except the master jpeg_compress_struct itself + * and the error manager struct. Both of these are supplied by the application + * and must be freed, if necessary, by the application. (Often they are on + * the stack and so don't need to be freed anyway.) + * Closing a data source or destination, if necessary, is the application's + * responsibility. + */ + +GLOBAL(void) +jpeg_destroy (j_common_ptr cinfo) +{ + /* We need only tell the memory manager to release everything. */ + /* NB: mem pointer is NULL if memory mgr failed to initialize. */ + if (cinfo->mem != NULL) + (*cinfo->mem->self_destruct) (cinfo); + cinfo->mem = NULL; /* be safe if jpeg_destroy is called twice */ + cinfo->global_state = 0; /* mark it destroyed */ +} + + +/* + * Convenience routines for allocating quantization and Huffman tables. + * (Would jutils.c be a more reasonable place to put these?) + */ + +GLOBAL(JQUANT_TBL *) +jpeg_alloc_quant_table (j_common_ptr cinfo) +{ + JQUANT_TBL *tbl; + + tbl = (JQUANT_TBL *) + (*cinfo->mem->alloc_small) (cinfo, JPOOL_PERMANENT, SIZEOF(JQUANT_TBL)); + tbl->sent_table = FALSE; /* make sure this is false in any new table */ + return tbl; +} + + +GLOBAL(JHUFF_TBL *) +jpeg_alloc_huff_table (j_common_ptr cinfo) +{ + JHUFF_TBL *tbl; + + tbl = (JHUFF_TBL *) + (*cinfo->mem->alloc_small) (cinfo, JPOOL_PERMANENT, SIZEOF(JHUFF_TBL)); + tbl->sent_table = FALSE; /* make sure this is false in any new table */ + return tbl; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcparam.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcparam.c new file mode 100644 index 000000000..c5e85dda5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcparam.c @@ -0,0 +1,632 @@ +/* + * jcparam.c + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * Modified 2003-2008 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains optional default-setting code for the JPEG compressor. + * Applications do not have to use this file, but those that don't use it + * must know a lot more about the innards of the JPEG code. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Quantization table setup routines + */ + +GLOBAL(void) +jpeg_add_quant_table (j_compress_ptr cinfo, int which_tbl, + const unsigned int *basic_table, + int scale_factor, boolean force_baseline) +/* Define a quantization table equal to the basic_table times + * a scale factor (given as a percentage). + * If force_baseline is TRUE, the computed quantization table entries + * are limited to 1..255 for JPEG baseline compatibility. + */ +{ + JQUANT_TBL ** qtblptr; + int i; + long temp; + + /* Safety check to ensure start_compress not called yet. */ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + if (which_tbl < 0 || which_tbl >= NUM_QUANT_TBLS) + ERREXIT1(cinfo, JERR_DQT_INDEX, which_tbl); + + qtblptr = & cinfo->quant_tbl_ptrs[which_tbl]; + + if (*qtblptr == NULL) + *qtblptr = jpeg_alloc_quant_table((j_common_ptr) cinfo); + + for (i = 0; i < DCTSIZE2; i++) { + temp = ((long) basic_table[i] * scale_factor + 50L) / 100L; + /* limit the values to the valid range */ + if (temp <= 0L) temp = 1L; + if (temp > 32767L) temp = 32767L; /* max quantizer needed for 12 bits */ + if (force_baseline && temp > 255L) + temp = 255L; /* limit to baseline range if requested */ + (*qtblptr)->quantval[i] = (UINT16) temp; + } + + /* Initialize sent_table FALSE so table will be written to JPEG file. */ + (*qtblptr)->sent_table = FALSE; +} + + +/* These are the sample quantization tables given in JPEG spec section K.1. + * The spec says that the values given produce "good" quality, and + * when divided by 2, "very good" quality. + */ +static const unsigned int std_luminance_quant_tbl[DCTSIZE2] = { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 +}; +static const unsigned int std_chrominance_quant_tbl[DCTSIZE2] = { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 +}; + + +GLOBAL(void) +jpeg_default_qtables (j_compress_ptr cinfo, boolean force_baseline) +/* Set or change the 'quality' (quantization) setting, using default tables + * and straight percentage-scaling quality scales. + * This entry point allows different scalings for luminance and chrominance. + */ +{ + /* Set up two quantization tables using the specified scaling */ + jpeg_add_quant_table(cinfo, 0, std_luminance_quant_tbl, + cinfo->q_scale_factor[0], force_baseline); + jpeg_add_quant_table(cinfo, 1, std_chrominance_quant_tbl, + cinfo->q_scale_factor[1], force_baseline); +} + + +GLOBAL(void) +jpeg_set_linear_quality (j_compress_ptr cinfo, int scale_factor, + boolean force_baseline) +/* Set or change the 'quality' (quantization) setting, using default tables + * and a straight percentage-scaling quality scale. In most cases it's better + * to use jpeg_set_quality (below); this entry point is provided for + * applications that insist on a linear percentage scaling. + */ +{ + /* Set up two quantization tables using the specified scaling */ + jpeg_add_quant_table(cinfo, 0, std_luminance_quant_tbl, + scale_factor, force_baseline); + jpeg_add_quant_table(cinfo, 1, std_chrominance_quant_tbl, + scale_factor, force_baseline); +} + + +GLOBAL(int) +jpeg_quality_scaling (int quality) +/* Convert a user-specified quality rating to a percentage scaling factor + * for an underlying quantization table, using our recommended scaling curve. + * The input 'quality' factor should be 0 (terrible) to 100 (very good). + */ +{ + /* Safety limit on quality factor. Convert 0 to 1 to avoid zero divide. */ + if (quality <= 0) quality = 1; + if (quality > 100) quality = 100; + + /* The basic table is used as-is (scaling 100) for a quality of 50. + * Qualities 50..100 are converted to scaling percentage 200 - 2*Q; + * note that at Q=100 the scaling is 0, which will cause jpeg_add_quant_table + * to make all the table entries 1 (hence, minimum quantization loss). + * Qualities 1..50 are converted to scaling percentage 5000/Q. + */ + if (quality < 50) + quality = 5000 / quality; + else + quality = 200 - quality*2; + + return quality; +} + + +GLOBAL(void) +jpeg_set_quality (j_compress_ptr cinfo, int quality, boolean force_baseline) +/* Set or change the 'quality' (quantization) setting, using default tables. + * This is the standard quality-adjusting entry point for typical user + * interfaces; only those who want detailed control over quantization tables + * would use the preceding three routines directly. + */ +{ + /* Convert user 0-100 rating to percentage scaling */ + quality = jpeg_quality_scaling(quality); + + /* Set up standard quality tables */ + jpeg_set_linear_quality(cinfo, quality, force_baseline); +} + + +/* + * Huffman table setup routines + */ + +LOCAL(void) +add_huff_table (j_compress_ptr cinfo, + JHUFF_TBL **htblptr, const UINT8 *bits, const UINT8 *val) +/* Define a Huffman table */ +{ + int nsymbols, len; + + if (*htblptr == NULL) + *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + + /* Copy the number-of-symbols-of-each-code-length counts */ + MEMCOPY((*htblptr)->bits, bits, SIZEOF((*htblptr)->bits)); + + /* Validate the counts. We do this here mainly so we can copy the right + * number of symbols from the val[] array, without risking marching off + * the end of memory. jchuff.c will do a more thorough test later. + */ + nsymbols = 0; + for (len = 1; len <= 16; len++) + nsymbols += bits[len]; + if (nsymbols < 1 || nsymbols > 256) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + + MEMCOPY((*htblptr)->huffval, val, nsymbols * SIZEOF(UINT8)); + + /* Initialize sent_table FALSE so table will be written to JPEG file. */ + (*htblptr)->sent_table = FALSE; +} + + +LOCAL(void) +std_huff_tables (j_compress_ptr cinfo) +/* Set up the standard Huffman tables (cf. JPEG standard section K.3) */ +/* IMPORTANT: these are only valid for 8-bit data precision! */ +{ + static const UINT8 bits_dc_luminance[17] = + { /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; + static const UINT8 val_dc_luminance[] = + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; + + static const UINT8 bits_dc_chrominance[17] = + { /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; + static const UINT8 val_dc_chrominance[] = + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; + + static const UINT8 bits_ac_luminance[17] = + { /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; + static const UINT8 val_ac_luminance[] = + { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, + 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, + 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, + 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, + 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, + 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, + 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, + 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, + 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, + 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa }; + + static const UINT8 bits_ac_chrominance[17] = + { /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }; + static const UINT8 val_ac_chrominance[] = + { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, + 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, + 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, + 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, + 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, + 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, + 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, + 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, + 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa }; + + add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[0], + bits_dc_luminance, val_dc_luminance); + add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[0], + bits_ac_luminance, val_ac_luminance); + add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[1], + bits_dc_chrominance, val_dc_chrominance); + add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[1], + bits_ac_chrominance, val_ac_chrominance); +} + + +/* + * Default parameter setup for compression. + * + * Applications that don't choose to use this routine must do their + * own setup of all these parameters. Alternately, you can call this + * to establish defaults and then alter parameters selectively. This + * is the recommended approach since, if we add any new parameters, + * your code will still work (they'll be set to reasonable defaults). + */ + +GLOBAL(void) +jpeg_set_defaults (j_compress_ptr cinfo) +{ + int i; + + /* Safety check to ensure start_compress not called yet. */ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + /* Allocate comp_info array large enough for maximum component count. + * Array is made permanent in case application wants to compress + * multiple images at same param settings. + */ + if (cinfo->comp_info == NULL) + cinfo->comp_info = (jpeg_component_info *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + MAX_COMPONENTS * SIZEOF(jpeg_component_info)); + + /* Initialize everything not dependent on the color space */ + + cinfo->scale_num = 1; /* 1:1 scaling */ + cinfo->scale_denom = 1; + cinfo->data_precision = BITS_IN_JSAMPLE; + /* Set up two quantization tables using default quality of 75 */ + jpeg_set_quality(cinfo, 75, TRUE); + /* Set up two Huffman tables */ + std_huff_tables(cinfo); + + /* Initialize default arithmetic coding conditioning */ + for (i = 0; i < NUM_ARITH_TBLS; i++) { + cinfo->arith_dc_L[i] = 0; + cinfo->arith_dc_U[i] = 1; + cinfo->arith_ac_K[i] = 5; + } + + /* Default is no multiple-scan output */ + cinfo->scan_info = NULL; + cinfo->num_scans = 0; + + /* Expect normal source image, not raw downsampled data */ + cinfo->raw_data_in = FALSE; + + /* Use Huffman coding, not arithmetic coding, by default */ + cinfo->arith_code = FALSE; + + /* By default, don't do extra passes to optimize entropy coding */ + cinfo->optimize_coding = FALSE; + /* The standard Huffman tables are only valid for 8-bit data precision. + * If the precision is higher, force optimization on so that usable + * tables will be computed. This test can be removed if default tables + * are supplied that are valid for the desired precision. + */ + if (cinfo->data_precision > 8) + cinfo->optimize_coding = TRUE; + + /* By default, use the simpler non-cosited sampling alignment */ + cinfo->CCIR601_sampling = FALSE; + + /* By default, apply fancy downsampling */ + cinfo->do_fancy_downsampling = TRUE; + + /* No input smoothing */ + cinfo->smoothing_factor = 0; + + /* DCT algorithm preference */ + cinfo->dct_method = JDCT_DEFAULT; + + /* No restart markers */ + cinfo->restart_interval = 0; + cinfo->restart_in_rows = 0; + + /* Fill in default JFIF marker parameters. Note that whether the marker + * will actually be written is determined by jpeg_set_colorspace. + * + * By default, the library emits JFIF version code 1.01. + * An application that wants to emit JFIF 1.02 extension markers should set + * JFIF_minor_version to 2. We could probably get away with just defaulting + * to 1.02, but there may still be some decoders in use that will complain + * about that; saying 1.01 should minimize compatibility problems. + */ + cinfo->JFIF_major_version = 1; /* Default JFIF version = 1.01 */ + cinfo->JFIF_minor_version = 1; + cinfo->density_unit = 0; /* Pixel size is unknown by default */ + cinfo->X_density = 1; /* Pixel aspect ratio is square by default */ + cinfo->Y_density = 1; + + /* Choose JPEG colorspace based on input space, set defaults accordingly */ + + jpeg_default_colorspace(cinfo); +} + + +/* + * Select an appropriate JPEG colorspace for in_color_space. + */ + +GLOBAL(void) +jpeg_default_colorspace (j_compress_ptr cinfo) +{ + switch (cinfo->in_color_space) { + case JCS_GRAYSCALE: + jpeg_set_colorspace(cinfo, JCS_GRAYSCALE); + break; + case JCS_RGB: + jpeg_set_colorspace(cinfo, JCS_YCbCr); + break; + case JCS_YCbCr: + jpeg_set_colorspace(cinfo, JCS_YCbCr); + break; + case JCS_CMYK: + jpeg_set_colorspace(cinfo, JCS_CMYK); /* By default, no translation */ + break; + case JCS_YCCK: + jpeg_set_colorspace(cinfo, JCS_YCCK); + break; + case JCS_UNKNOWN: + jpeg_set_colorspace(cinfo, JCS_UNKNOWN); + break; + default: + ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); + } +} + + +/* + * Set the JPEG colorspace, and choose colorspace-dependent default values. + */ + +GLOBAL(void) +jpeg_set_colorspace (j_compress_ptr cinfo, J_COLOR_SPACE colorspace) +{ + jpeg_component_info * compptr; + int ci; + +#define SET_COMP(index,id,hsamp,vsamp,quant,dctbl,actbl) \ + (compptr = &cinfo->comp_info[index], \ + compptr->component_id = (id), \ + compptr->h_samp_factor = (hsamp), \ + compptr->v_samp_factor = (vsamp), \ + compptr->quant_tbl_no = (quant), \ + compptr->dc_tbl_no = (dctbl), \ + compptr->ac_tbl_no = (actbl) ) + + /* Safety check to ensure start_compress not called yet. */ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + /* For all colorspaces, we use Q and Huff tables 0 for luminance components, + * tables 1 for chrominance components. + */ + + cinfo->jpeg_color_space = colorspace; + + cinfo->write_JFIF_header = FALSE; /* No marker for non-JFIF colorspaces */ + cinfo->write_Adobe_marker = FALSE; /* write no Adobe marker by default */ + + switch (colorspace) { + case JCS_GRAYSCALE: + cinfo->write_JFIF_header = TRUE; /* Write a JFIF marker */ + cinfo->num_components = 1; + /* JFIF specifies component ID 1 */ + SET_COMP(0, 1, 1,1, 0, 0,0); + break; + case JCS_RGB: + cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag RGB */ + cinfo->num_components = 3; + SET_COMP(0, 0x52 /* 'R' */, 1,1, 0, 0,0); + SET_COMP(1, 0x47 /* 'G' */, 1,1, 0, 0,0); + SET_COMP(2, 0x42 /* 'B' */, 1,1, 0, 0,0); + break; + case JCS_YCbCr: + cinfo->write_JFIF_header = TRUE; /* Write a JFIF marker */ + cinfo->num_components = 3; + /* JFIF specifies component IDs 1,2,3 */ + /* We default to 2x2 subsamples of chrominance */ + SET_COMP(0, 1, 2,2, 0, 0,0); + SET_COMP(1, 2, 1,1, 1, 1,1); + SET_COMP(2, 3, 1,1, 1, 1,1); + break; + case JCS_CMYK: + cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag CMYK */ + cinfo->num_components = 4; + SET_COMP(0, 0x43 /* 'C' */, 1,1, 0, 0,0); + SET_COMP(1, 0x4D /* 'M' */, 1,1, 0, 0,0); + SET_COMP(2, 0x59 /* 'Y' */, 1,1, 0, 0,0); + SET_COMP(3, 0x4B /* 'K' */, 1,1, 0, 0,0); + break; + case JCS_YCCK: + cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag YCCK */ + cinfo->num_components = 4; + SET_COMP(0, 1, 2,2, 0, 0,0); + SET_COMP(1, 2, 1,1, 1, 1,1); + SET_COMP(2, 3, 1,1, 1, 1,1); + SET_COMP(3, 4, 2,2, 0, 0,0); + break; + case JCS_UNKNOWN: + cinfo->num_components = cinfo->input_components; + if (cinfo->num_components < 1 || cinfo->num_components > MAX_COMPONENTS) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, + MAX_COMPONENTS); + for (ci = 0; ci < cinfo->num_components; ci++) { + SET_COMP(ci, ci, 1,1, 0, 0,0); + } + break; + default: + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + } +} + + +#ifdef C_PROGRESSIVE_SUPPORTED + +LOCAL(jpeg_scan_info *) +fill_a_scan (jpeg_scan_info * scanptr, int ci, + int Ss, int Se, int Ah, int Al) +/* Support routine: generate one scan for specified component */ +{ + scanptr->comps_in_scan = 1; + scanptr->component_index[0] = ci; + scanptr->Ss = Ss; + scanptr->Se = Se; + scanptr->Ah = Ah; + scanptr->Al = Al; + scanptr++; + return scanptr; +} + +LOCAL(jpeg_scan_info *) +fill_scans (jpeg_scan_info * scanptr, int ncomps, + int Ss, int Se, int Ah, int Al) +/* Support routine: generate one scan for each component */ +{ + int ci; + + for (ci = 0; ci < ncomps; ci++) { + scanptr->comps_in_scan = 1; + scanptr->component_index[0] = ci; + scanptr->Ss = Ss; + scanptr->Se = Se; + scanptr->Ah = Ah; + scanptr->Al = Al; + scanptr++; + } + return scanptr; +} + +LOCAL(jpeg_scan_info *) +fill_dc_scans (jpeg_scan_info * scanptr, int ncomps, int Ah, int Al) +/* Support routine: generate interleaved DC scan if possible, else N scans */ +{ + int ci; + + if (ncomps <= MAX_COMPS_IN_SCAN) { + /* Single interleaved DC scan */ + scanptr->comps_in_scan = ncomps; + for (ci = 0; ci < ncomps; ci++) + scanptr->component_index[ci] = ci; + scanptr->Ss = scanptr->Se = 0; + scanptr->Ah = Ah; + scanptr->Al = Al; + scanptr++; + } else { + /* Noninterleaved DC scan for each component */ + scanptr = fill_scans(scanptr, ncomps, 0, 0, Ah, Al); + } + return scanptr; +} + + +/* + * Create a recommended progressive-JPEG script. + * cinfo->num_components and cinfo->jpeg_color_space must be correct. + */ + +GLOBAL(void) +jpeg_simple_progression (j_compress_ptr cinfo) +{ + int ncomps = cinfo->num_components; + int nscans; + jpeg_scan_info * scanptr; + + /* Safety check to ensure start_compress not called yet. */ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + /* Figure space needed for script. Calculation must match code below! */ + if (ncomps == 3 && cinfo->jpeg_color_space == JCS_YCbCr) { + /* Custom script for YCbCr color images. */ + nscans = 10; + } else { + /* All-purpose script for other color spaces. */ + if (ncomps > MAX_COMPS_IN_SCAN) + nscans = 6 * ncomps; /* 2 DC + 4 AC scans per component */ + else + nscans = 2 + 4 * ncomps; /* 2 DC scans; 4 AC scans per component */ + } + + /* Allocate space for script. + * We need to put it in the permanent pool in case the application performs + * multiple compressions without changing the settings. To avoid a memory + * leak if jpeg_simple_progression is called repeatedly for the same JPEG + * object, we try to re-use previously allocated space, and we allocate + * enough space to handle YCbCr even if initially asked for grayscale. + */ + if (cinfo->script_space == NULL || cinfo->script_space_size < nscans) { + cinfo->script_space_size = MAX(nscans, 10); + cinfo->script_space = (jpeg_scan_info *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + cinfo->script_space_size * SIZEOF(jpeg_scan_info)); + } + scanptr = cinfo->script_space; + cinfo->scan_info = scanptr; + cinfo->num_scans = nscans; + + if (ncomps == 3 && cinfo->jpeg_color_space == JCS_YCbCr) { + /* Custom script for YCbCr color images. */ + /* Initial DC scan */ + scanptr = fill_dc_scans(scanptr, ncomps, 0, 1); + /* Initial AC scan: get some luma data out in a hurry */ + scanptr = fill_a_scan(scanptr, 0, 1, 5, 0, 2); + /* Chroma data is too small to be worth expending many scans on */ + scanptr = fill_a_scan(scanptr, 2, 1, 63, 0, 1); + scanptr = fill_a_scan(scanptr, 1, 1, 63, 0, 1); + /* Complete spectral selection for luma AC */ + scanptr = fill_a_scan(scanptr, 0, 6, 63, 0, 2); + /* Refine next bit of luma AC */ + scanptr = fill_a_scan(scanptr, 0, 1, 63, 2, 1); + /* Finish DC successive approximation */ + scanptr = fill_dc_scans(scanptr, ncomps, 1, 0); + /* Finish AC successive approximation */ + scanptr = fill_a_scan(scanptr, 2, 1, 63, 1, 0); + scanptr = fill_a_scan(scanptr, 1, 1, 63, 1, 0); + /* Luma bottom bit comes last since it's usually largest scan */ + scanptr = fill_a_scan(scanptr, 0, 1, 63, 1, 0); + } else { + /* All-purpose script for other color spaces. */ + /* Successive approximation first pass */ + scanptr = fill_dc_scans(scanptr, ncomps, 0, 1); + scanptr = fill_scans(scanptr, ncomps, 1, 5, 0, 2); + scanptr = fill_scans(scanptr, ncomps, 6, 63, 0, 2); + /* Successive approximation second pass */ + scanptr = fill_scans(scanptr, ncomps, 1, 63, 2, 1); + /* Successive approximation final pass */ + scanptr = fill_dc_scans(scanptr, ncomps, 1, 0); + scanptr = fill_scans(scanptr, ncomps, 1, 63, 1, 0); + } +} + +#endif /* C_PROGRESSIVE_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcprepct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcprepct.c new file mode 100644 index 000000000..be44cc4b4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcprepct.c @@ -0,0 +1,358 @@ +/* + * jcprepct.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the compression preprocessing controller. + * This controller manages the color conversion, downsampling, + * and edge expansion steps. + * + * Most of the complexity here is associated with buffering input rows + * as required by the downsampler. See the comments at the head of + * jcsample.c for the downsampler's needs. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* At present, jcsample.c can request context rows only for smoothing. + * In the future, we might also need context rows for CCIR601 sampling + * or other more-complex downsampling procedures. The code to support + * context rows should be compiled only if needed. + */ +#ifdef INPUT_SMOOTHING_SUPPORTED +#define CONTEXT_ROWS_SUPPORTED +#endif + + +/* + * For the simple (no-context-row) case, we just need to buffer one + * row group's worth of pixels for the downsampling step. At the bottom of + * the image, we pad to a full row group by replicating the last pixel row. + * The downsampler's last output row is then replicated if needed to pad + * out to a full iMCU row. + * + * When providing context rows, we must buffer three row groups' worth of + * pixels. Three row groups are physically allocated, but the row pointer + * arrays are made five row groups high, with the extra pointers above and + * below "wrapping around" to point to the last and first real row groups. + * This allows the downsampler to access the proper context rows. + * At the top and bottom of the image, we create dummy context rows by + * copying the first or last real pixel row. This copying could be avoided + * by pointer hacking as is done in jdmainct.c, but it doesn't seem worth the + * trouble on the compression side. + */ + + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_c_prep_controller pub; /* public fields */ + + /* Downsampling input buffer. This buffer holds color-converted data + * until we have enough to do a downsample step. + */ + JSAMPARRAY color_buf[MAX_COMPONENTS]; + + JDIMENSION rows_to_go; /* counts rows remaining in source image */ + int next_buf_row; /* index of next row to store in color_buf */ + +#ifdef CONTEXT_ROWS_SUPPORTED /* only needed for context case */ + int this_row_group; /* starting row index of group to process */ + int next_buf_stop; /* downsample when we reach this index */ +#endif +} my_prep_controller; + +typedef my_prep_controller * my_prep_ptr; + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_prep (j_compress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_prep_ptr prep = (my_prep_ptr) cinfo->prep; + + if (pass_mode != JBUF_PASS_THRU) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + + /* Initialize total-height counter for detecting bottom of image */ + prep->rows_to_go = cinfo->image_height; + /* Mark the conversion buffer empty */ + prep->next_buf_row = 0; +#ifdef CONTEXT_ROWS_SUPPORTED + /* Preset additional state variables for context mode. + * These aren't used in non-context mode, so we needn't test which mode. + */ + prep->this_row_group = 0; + /* Set next_buf_stop to stop after two row groups have been read in. */ + prep->next_buf_stop = 2 * cinfo->max_v_samp_factor; +#endif +} + + +/* + * Expand an image vertically from height input_rows to height output_rows, + * by duplicating the bottom row. + */ + +LOCAL(void) +expand_bottom_edge (JSAMPARRAY image_data, JDIMENSION num_cols, + int input_rows, int output_rows) +{ + register int row; + + for (row = input_rows; row < output_rows; row++) { + jcopy_sample_rows(image_data, input_rows-1, image_data, row, + 1, num_cols); + } +} + + +/* + * Process some data in the simple no-context case. + * + * Preprocessor output data is counted in "row groups". A row group + * is defined to be v_samp_factor sample rows of each component. + * Downsampling will produce this much data from each max_v_samp_factor + * input rows. + */ + +METHODDEF(void) +pre_process_data (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail, + JSAMPIMAGE output_buf, JDIMENSION *out_row_group_ctr, + JDIMENSION out_row_groups_avail) +{ + my_prep_ptr prep = (my_prep_ptr) cinfo->prep; + int numrows, ci; + JDIMENSION inrows; + jpeg_component_info * compptr; + + while (*in_row_ctr < in_rows_avail && + *out_row_group_ctr < out_row_groups_avail) { + /* Do color conversion to fill the conversion buffer. */ + inrows = in_rows_avail - *in_row_ctr; + numrows = cinfo->max_v_samp_factor - prep->next_buf_row; + numrows = (int) MIN((JDIMENSION) numrows, inrows); + (*cinfo->cconvert->color_convert) (cinfo, input_buf + *in_row_ctr, + prep->color_buf, + (JDIMENSION) prep->next_buf_row, + numrows); + *in_row_ctr += numrows; + prep->next_buf_row += numrows; + prep->rows_to_go -= numrows; + /* If at bottom of image, pad to fill the conversion buffer. */ + if (prep->rows_to_go == 0 && + prep->next_buf_row < cinfo->max_v_samp_factor) { + for (ci = 0; ci < cinfo->num_components; ci++) { + expand_bottom_edge(prep->color_buf[ci], cinfo->image_width, + prep->next_buf_row, cinfo->max_v_samp_factor); + } + prep->next_buf_row = cinfo->max_v_samp_factor; + } + /* If we've filled the conversion buffer, empty it. */ + if (prep->next_buf_row == cinfo->max_v_samp_factor) { + (*cinfo->downsample->downsample) (cinfo, + prep->color_buf, (JDIMENSION) 0, + output_buf, *out_row_group_ctr); + prep->next_buf_row = 0; + (*out_row_group_ctr)++; + } + /* If at bottom of image, pad the output to a full iMCU height. + * Note we assume the caller is providing a one-iMCU-height output buffer! + */ + if (prep->rows_to_go == 0 && + *out_row_group_ctr < out_row_groups_avail) { + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + numrows = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; + expand_bottom_edge(output_buf[ci], + compptr->width_in_blocks * compptr->DCT_h_scaled_size, + (int) (*out_row_group_ctr * numrows), + (int) (out_row_groups_avail * numrows)); + } + *out_row_group_ctr = out_row_groups_avail; + break; /* can exit outer loop without test */ + } + } +} + + +#ifdef CONTEXT_ROWS_SUPPORTED + +/* + * Process some data in the context case. + */ + +METHODDEF(void) +pre_process_context (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JDIMENSION *in_row_ctr, + JDIMENSION in_rows_avail, + JSAMPIMAGE output_buf, JDIMENSION *out_row_group_ctr, + JDIMENSION out_row_groups_avail) +{ + my_prep_ptr prep = (my_prep_ptr) cinfo->prep; + int numrows, ci; + int buf_height = cinfo->max_v_samp_factor * 3; + JDIMENSION inrows; + + while (*out_row_group_ctr < out_row_groups_avail) { + if (*in_row_ctr < in_rows_avail) { + /* Do color conversion to fill the conversion buffer. */ + inrows = in_rows_avail - *in_row_ctr; + numrows = prep->next_buf_stop - prep->next_buf_row; + numrows = (int) MIN((JDIMENSION) numrows, inrows); + (*cinfo->cconvert->color_convert) (cinfo, input_buf + *in_row_ctr, + prep->color_buf, + (JDIMENSION) prep->next_buf_row, + numrows); + /* Pad at top of image, if first time through */ + if (prep->rows_to_go == cinfo->image_height) { + for (ci = 0; ci < cinfo->num_components; ci++) { + int row; + for (row = 1; row <= cinfo->max_v_samp_factor; row++) { + jcopy_sample_rows(prep->color_buf[ci], 0, + prep->color_buf[ci], -row, + 1, cinfo->image_width); + } + } + } + *in_row_ctr += numrows; + prep->next_buf_row += numrows; + prep->rows_to_go -= numrows; + } else { + /* Return for more data, unless we are at the bottom of the image. */ + if (prep->rows_to_go != 0) + break; + /* When at bottom of image, pad to fill the conversion buffer. */ + if (prep->next_buf_row < prep->next_buf_stop) { + for (ci = 0; ci < cinfo->num_components; ci++) { + expand_bottom_edge(prep->color_buf[ci], cinfo->image_width, + prep->next_buf_row, prep->next_buf_stop); + } + prep->next_buf_row = prep->next_buf_stop; + } + } + /* If we've gotten enough data, downsample a row group. */ + if (prep->next_buf_row == prep->next_buf_stop) { + (*cinfo->downsample->downsample) (cinfo, + prep->color_buf, + (JDIMENSION) prep->this_row_group, + output_buf, *out_row_group_ctr); + (*out_row_group_ctr)++; + /* Advance pointers with wraparound as necessary. */ + prep->this_row_group += cinfo->max_v_samp_factor; + if (prep->this_row_group >= buf_height) + prep->this_row_group = 0; + if (prep->next_buf_row >= buf_height) + prep->next_buf_row = 0; + prep->next_buf_stop = prep->next_buf_row + cinfo->max_v_samp_factor; + } + } +} + + +/* + * Create the wrapped-around downsampling input buffer needed for context mode. + */ + +LOCAL(void) +create_context_buffer (j_compress_ptr cinfo) +{ + my_prep_ptr prep = (my_prep_ptr) cinfo->prep; + int rgroup_height = cinfo->max_v_samp_factor; + int ci, i; + jpeg_component_info * compptr; + JSAMPARRAY true_buffer, fake_buffer; + + /* Grab enough space for fake row pointers for all the components; + * we need five row groups' worth of pointers for each component. + */ + fake_buffer = (JSAMPARRAY) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (cinfo->num_components * 5 * rgroup_height) * + SIZEOF(JSAMPROW)); + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Allocate the actual buffer space (3 row groups) for this component. + * We make the buffer wide enough to allow the downsampler to edge-expand + * horizontally within the buffer, if it so chooses. + */ + true_buffer = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (JDIMENSION) (((long) compptr->width_in_blocks * + cinfo->min_DCT_h_scaled_size * + cinfo->max_h_samp_factor) / compptr->h_samp_factor), + (JDIMENSION) (3 * rgroup_height)); + /* Copy true buffer row pointers into the middle of the fake row array */ + MEMCOPY(fake_buffer + rgroup_height, true_buffer, + 3 * rgroup_height * SIZEOF(JSAMPROW)); + /* Fill in the above and below wraparound pointers */ + for (i = 0; i < rgroup_height; i++) { + fake_buffer[i] = true_buffer[2 * rgroup_height + i]; + fake_buffer[4 * rgroup_height + i] = true_buffer[i]; + } + prep->color_buf[ci] = fake_buffer + rgroup_height; + fake_buffer += 5 * rgroup_height; /* point to space for next component */ + } +} + +#endif /* CONTEXT_ROWS_SUPPORTED */ + + +/* + * Initialize preprocessing controller. + */ + +GLOBAL(void) +jinit_c_prep_controller (j_compress_ptr cinfo, boolean need_full_buffer) +{ + my_prep_ptr prep; + int ci; + jpeg_component_info * compptr; + + if (need_full_buffer) /* safety check */ + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + + prep = (my_prep_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_prep_controller)); + cinfo->prep = (struct jpeg_c_prep_controller *) prep; + prep->pub.start_pass = start_pass_prep; + + /* Allocate the color conversion buffer. + * We make the buffer wide enough to allow the downsampler to edge-expand + * horizontally within the buffer, if it so chooses. + */ + if (cinfo->downsample->need_context_rows) { + /* Set up to provide context rows */ +#ifdef CONTEXT_ROWS_SUPPORTED + prep->pub.pre_process_data = pre_process_context; + create_context_buffer(cinfo); +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } else { + /* No context, just make it tall enough for one row group */ + prep->pub.pre_process_data = pre_process_data; + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + prep->color_buf[ci] = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (JDIMENSION) (((long) compptr->width_in_blocks * + cinfo->min_DCT_h_scaled_size * + cinfo->max_h_samp_factor) / compptr->h_samp_factor), + (JDIMENSION) cinfo->max_v_samp_factor); + } + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcsample.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcsample.c new file mode 100644 index 000000000..4d36f85f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jcsample.c @@ -0,0 +1,545 @@ +/* + * jcsample.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains downsampling routines. + * + * Downsampling input data is counted in "row groups". A row group + * is defined to be max_v_samp_factor pixel rows of each component, + * from which the downsampler produces v_samp_factor sample rows. + * A single row group is processed in each call to the downsampler module. + * + * The downsampler is responsible for edge-expansion of its output data + * to fill an integral number of DCT blocks horizontally. The source buffer + * may be modified if it is helpful for this purpose (the source buffer is + * allocated wide enough to correspond to the desired output width). + * The caller (the prep controller) is responsible for vertical padding. + * + * The downsampler may request "context rows" by setting need_context_rows + * during startup. In this case, the input arrays will contain at least + * one row group's worth of pixels above and below the passed-in data; + * the caller will create dummy rows at image top and bottom by replicating + * the first or last real pixel row. + * + * An excellent reference for image resampling is + * Digital Image Warping, George Wolberg, 1990. + * Pub. by IEEE Computer Society Press, Los Alamitos, CA. ISBN 0-8186-8944-7. + * + * The downsampling algorithm used here is a simple average of the source + * pixels covered by the output pixel. The hi-falutin sampling literature + * refers to this as a "box filter". In general the characteristics of a box + * filter are not very good, but for the specific cases we normally use (1:1 + * and 2:1 ratios) the box is equivalent to a "triangle filter" which is not + * nearly so bad. If you intend to use other sampling ratios, you'd be well + * advised to improve this code. + * + * A simple input-smoothing capability is provided. This is mainly intended + * for cleaning up color-dithered GIF input files (if you find it inadequate, + * we suggest using an external filtering program such as pnmconvol). When + * enabled, each input pixel P is replaced by a weighted sum of itself and its + * eight neighbors. P's weight is 1-8*SF and each neighbor's weight is SF, + * where SF = (smoothing_factor / 1024). + * Currently, smoothing is only supported for 2h2v sampling factors. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Pointer to routine to downsample a single component */ +typedef JMETHOD(void, downsample1_ptr, + (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data)); + +/* Private subobject */ + +typedef struct { + struct jpeg_downsampler pub; /* public fields */ + + /* Downsampling method pointers, one per component */ + downsample1_ptr methods[MAX_COMPONENTS]; + + /* Height of an output row group for each component. */ + int rowgroup_height[MAX_COMPONENTS]; + + /* These arrays save pixel expansion factors so that int_downsample need not + * recompute them each time. They are unused for other downsampling methods. + */ + UINT8 h_expand[MAX_COMPONENTS]; + UINT8 v_expand[MAX_COMPONENTS]; +} my_downsampler; + +typedef my_downsampler * my_downsample_ptr; + + +/* + * Initialize for a downsampling pass. + */ + +METHODDEF(void) +start_pass_downsample (j_compress_ptr cinfo) +{ + /* no work for now */ +} + + +/* + * Expand a component horizontally from width input_cols to width output_cols, + * by duplicating the rightmost samples. + */ + +LOCAL(void) +expand_right_edge (JSAMPARRAY image_data, int num_rows, + JDIMENSION input_cols, JDIMENSION output_cols) +{ + register JSAMPROW ptr; + register JSAMPLE pixval; + register int count; + int row; + int numcols = (int) (output_cols - input_cols); + + if (numcols > 0) { + for (row = 0; row < num_rows; row++) { + ptr = image_data[row] + input_cols; + pixval = ptr[-1]; /* don't need GETJSAMPLE() here */ + for (count = numcols; count > 0; count--) + *ptr++ = pixval; + } + } +} + + +/* + * Do downsampling for a whole row group (all components). + * + * In this version we simply downsample each component independently. + */ + +METHODDEF(void) +sep_downsample (j_compress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION in_row_index, + JSAMPIMAGE output_buf, JDIMENSION out_row_group_index) +{ + my_downsample_ptr downsample = (my_downsample_ptr) cinfo->downsample; + int ci; + jpeg_component_info * compptr; + JSAMPARRAY in_ptr, out_ptr; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + in_ptr = input_buf[ci] + in_row_index; + out_ptr = output_buf[ci] + + (out_row_group_index * downsample->rowgroup_height[ci]); + (*downsample->methods[ci]) (cinfo, compptr, in_ptr, out_ptr); + } +} + + +/* + * Downsample pixel values of a single component. + * One row group is processed per call. + * This version handles arbitrary integral sampling ratios, without smoothing. + * Note that this version is not actually used for customary sampling ratios. + */ + +METHODDEF(void) +int_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + my_downsample_ptr downsample = (my_downsample_ptr) cinfo->downsample; + int inrow, outrow, h_expand, v_expand, numpix, numpix2, h, v; + JDIMENSION outcol, outcol_h; /* outcol_h == outcol*h_expand */ + JDIMENSION output_cols = compptr->width_in_blocks * compptr->DCT_h_scaled_size; + JSAMPROW inptr, outptr; + INT32 outvalue; + + h_expand = downsample->h_expand[compptr->component_index]; + v_expand = downsample->v_expand[compptr->component_index]; + numpix = h_expand * v_expand; + numpix2 = numpix/2; + + /* Expand input data enough to let all the output samples be generated + * by the standard loop. Special-casing padded output would be more + * efficient. + */ + expand_right_edge(input_data, cinfo->max_v_samp_factor, + cinfo->image_width, output_cols * h_expand); + + inrow = outrow = 0; + while (inrow < cinfo->max_v_samp_factor) { + outptr = output_data[outrow]; + for (outcol = 0, outcol_h = 0; outcol < output_cols; + outcol++, outcol_h += h_expand) { + outvalue = 0; + for (v = 0; v < v_expand; v++) { + inptr = input_data[inrow+v] + outcol_h; + for (h = 0; h < h_expand; h++) { + outvalue += (INT32) GETJSAMPLE(*inptr++); + } + } + *outptr++ = (JSAMPLE) ((outvalue + numpix2) / numpix); + } + inrow += v_expand; + outrow++; + } +} + + +/* + * Downsample pixel values of a single component. + * This version handles the special case of a full-size component, + * without smoothing. + */ + +METHODDEF(void) +fullsize_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + /* Copy the data */ + jcopy_sample_rows(input_data, 0, output_data, 0, + cinfo->max_v_samp_factor, cinfo->image_width); + /* Edge-expand */ + expand_right_edge(output_data, cinfo->max_v_samp_factor, cinfo->image_width, + compptr->width_in_blocks * compptr->DCT_h_scaled_size); +} + + +/* + * Downsample pixel values of a single component. + * This version handles the common case of 2:1 horizontal and 1:1 vertical, + * without smoothing. + * + * A note about the "bias" calculations: when rounding fractional values to + * integer, we do not want to always round 0.5 up to the next integer. + * If we did that, we'd introduce a noticeable bias towards larger values. + * Instead, this code is arranged so that 0.5 will be rounded up or down at + * alternate pixel locations (a simple ordered dither pattern). + */ + +METHODDEF(void) +h2v1_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + int inrow; + JDIMENSION outcol; + JDIMENSION output_cols = compptr->width_in_blocks * compptr->DCT_h_scaled_size; + register JSAMPROW inptr, outptr; + register int bias; + + /* Expand input data enough to let all the output samples be generated + * by the standard loop. Special-casing padded output would be more + * efficient. + */ + expand_right_edge(input_data, cinfo->max_v_samp_factor, + cinfo->image_width, output_cols * 2); + + for (inrow = 0; inrow < cinfo->max_v_samp_factor; inrow++) { + outptr = output_data[inrow]; + inptr = input_data[inrow]; + bias = 0; /* bias = 0,1,0,1,... for successive samples */ + for (outcol = 0; outcol < output_cols; outcol++) { + *outptr++ = (JSAMPLE) ((GETJSAMPLE(*inptr) + GETJSAMPLE(inptr[1]) + + bias) >> 1); + bias ^= 1; /* 0=>1, 1=>0 */ + inptr += 2; + } + } +} + + +/* + * Downsample pixel values of a single component. + * This version handles the standard case of 2:1 horizontal and 2:1 vertical, + * without smoothing. + */ + +METHODDEF(void) +h2v2_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + int inrow, outrow; + JDIMENSION outcol; + JDIMENSION output_cols = compptr->width_in_blocks * compptr->DCT_h_scaled_size; + register JSAMPROW inptr0, inptr1, outptr; + register int bias; + + /* Expand input data enough to let all the output samples be generated + * by the standard loop. Special-casing padded output would be more + * efficient. + */ + expand_right_edge(input_data, cinfo->max_v_samp_factor, + cinfo->image_width, output_cols * 2); + + inrow = outrow = 0; + while (inrow < cinfo->max_v_samp_factor) { + outptr = output_data[outrow]; + inptr0 = input_data[inrow]; + inptr1 = input_data[inrow+1]; + bias = 1; /* bias = 1,2,1,2,... for successive samples */ + for (outcol = 0; outcol < output_cols; outcol++) { + *outptr++ = (JSAMPLE) ((GETJSAMPLE(*inptr0) + GETJSAMPLE(inptr0[1]) + + GETJSAMPLE(*inptr1) + GETJSAMPLE(inptr1[1]) + + bias) >> 2); + bias ^= 3; /* 1=>2, 2=>1 */ + inptr0 += 2; inptr1 += 2; + } + inrow += 2; + outrow++; + } +} + + +#ifdef INPUT_SMOOTHING_SUPPORTED + +/* + * Downsample pixel values of a single component. + * This version handles the standard case of 2:1 horizontal and 2:1 vertical, + * with smoothing. One row of context is required. + */ + +METHODDEF(void) +h2v2_smooth_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + int inrow, outrow; + JDIMENSION colctr; + JDIMENSION output_cols = compptr->width_in_blocks * compptr->DCT_h_scaled_size; + register JSAMPROW inptr0, inptr1, above_ptr, below_ptr, outptr; + INT32 membersum, neighsum, memberscale, neighscale; + + /* Expand input data enough to let all the output samples be generated + * by the standard loop. Special-casing padded output would be more + * efficient. + */ + expand_right_edge(input_data - 1, cinfo->max_v_samp_factor + 2, + cinfo->image_width, output_cols * 2); + + /* We don't bother to form the individual "smoothed" input pixel values; + * we can directly compute the output which is the average of the four + * smoothed values. Each of the four member pixels contributes a fraction + * (1-8*SF) to its own smoothed image and a fraction SF to each of the three + * other smoothed pixels, therefore a total fraction (1-5*SF)/4 to the final + * output. The four corner-adjacent neighbor pixels contribute a fraction + * SF to just one smoothed pixel, or SF/4 to the final output; while the + * eight edge-adjacent neighbors contribute SF to each of two smoothed + * pixels, or SF/2 overall. In order to use integer arithmetic, these + * factors are scaled by 2^16 = 65536. + * Also recall that SF = smoothing_factor / 1024. + */ + + memberscale = 16384 - cinfo->smoothing_factor * 80; /* scaled (1-5*SF)/4 */ + neighscale = cinfo->smoothing_factor * 16; /* scaled SF/4 */ + + inrow = outrow = 0; + while (inrow < cinfo->max_v_samp_factor) { + outptr = output_data[outrow]; + inptr0 = input_data[inrow]; + inptr1 = input_data[inrow+1]; + above_ptr = input_data[inrow-1]; + below_ptr = input_data[inrow+2]; + + /* Special case for first column: pretend column -1 is same as column 0 */ + membersum = GETJSAMPLE(*inptr0) + GETJSAMPLE(inptr0[1]) + + GETJSAMPLE(*inptr1) + GETJSAMPLE(inptr1[1]); + neighsum = GETJSAMPLE(*above_ptr) + GETJSAMPLE(above_ptr[1]) + + GETJSAMPLE(*below_ptr) + GETJSAMPLE(below_ptr[1]) + + GETJSAMPLE(*inptr0) + GETJSAMPLE(inptr0[2]) + + GETJSAMPLE(*inptr1) + GETJSAMPLE(inptr1[2]); + neighsum += neighsum; + neighsum += GETJSAMPLE(*above_ptr) + GETJSAMPLE(above_ptr[2]) + + GETJSAMPLE(*below_ptr) + GETJSAMPLE(below_ptr[2]); + membersum = membersum * memberscale + neighsum * neighscale; + *outptr++ = (JSAMPLE) ((membersum + 32768) >> 16); + inptr0 += 2; inptr1 += 2; above_ptr += 2; below_ptr += 2; + + for (colctr = output_cols - 2; colctr > 0; colctr--) { + /* sum of pixels directly mapped to this output element */ + membersum = GETJSAMPLE(*inptr0) + GETJSAMPLE(inptr0[1]) + + GETJSAMPLE(*inptr1) + GETJSAMPLE(inptr1[1]); + /* sum of edge-neighbor pixels */ + neighsum = GETJSAMPLE(*above_ptr) + GETJSAMPLE(above_ptr[1]) + + GETJSAMPLE(*below_ptr) + GETJSAMPLE(below_ptr[1]) + + GETJSAMPLE(inptr0[-1]) + GETJSAMPLE(inptr0[2]) + + GETJSAMPLE(inptr1[-1]) + GETJSAMPLE(inptr1[2]); + /* The edge-neighbors count twice as much as corner-neighbors */ + neighsum += neighsum; + /* Add in the corner-neighbors */ + neighsum += GETJSAMPLE(above_ptr[-1]) + GETJSAMPLE(above_ptr[2]) + + GETJSAMPLE(below_ptr[-1]) + GETJSAMPLE(below_ptr[2]); + /* form final output scaled up by 2^16 */ + membersum = membersum * memberscale + neighsum * neighscale; + /* round, descale and output it */ + *outptr++ = (JSAMPLE) ((membersum + 32768) >> 16); + inptr0 += 2; inptr1 += 2; above_ptr += 2; below_ptr += 2; + } + + /* Special case for last column */ + membersum = GETJSAMPLE(*inptr0) + GETJSAMPLE(inptr0[1]) + + GETJSAMPLE(*inptr1) + GETJSAMPLE(inptr1[1]); + neighsum = GETJSAMPLE(*above_ptr) + GETJSAMPLE(above_ptr[1]) + + GETJSAMPLE(*below_ptr) + GETJSAMPLE(below_ptr[1]) + + GETJSAMPLE(inptr0[-1]) + GETJSAMPLE(inptr0[1]) + + GETJSAMPLE(inptr1[-1]) + GETJSAMPLE(inptr1[1]); + neighsum += neighsum; + neighsum += GETJSAMPLE(above_ptr[-1]) + GETJSAMPLE(above_ptr[1]) + + GETJSAMPLE(below_ptr[-1]) + GETJSAMPLE(below_ptr[1]); + membersum = membersum * memberscale + neighsum * neighscale; + *outptr = (JSAMPLE) ((membersum + 32768) >> 16); + + inrow += 2; + outrow++; + } +} + + +/* + * Downsample pixel values of a single component. + * This version handles the special case of a full-size component, + * with smoothing. One row of context is required. + */ + +METHODDEF(void) +fullsize_smooth_downsample (j_compress_ptr cinfo, jpeg_component_info *compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ + int inrow; + JDIMENSION colctr; + JDIMENSION output_cols = compptr->width_in_blocks * compptr->DCT_h_scaled_size; + register JSAMPROW inptr, above_ptr, below_ptr, outptr; + INT32 membersum, neighsum, memberscale, neighscale; + int colsum, lastcolsum, nextcolsum; + + /* Expand input data enough to let all the output samples be generated + * by the standard loop. Special-casing padded output would be more + * efficient. + */ + expand_right_edge(input_data - 1, cinfo->max_v_samp_factor + 2, + cinfo->image_width, output_cols); + + /* Each of the eight neighbor pixels contributes a fraction SF to the + * smoothed pixel, while the main pixel contributes (1-8*SF). In order + * to use integer arithmetic, these factors are multiplied by 2^16 = 65536. + * Also recall that SF = smoothing_factor / 1024. + */ + + memberscale = 65536L - cinfo->smoothing_factor * 512L; /* scaled 1-8*SF */ + neighscale = cinfo->smoothing_factor * 64; /* scaled SF */ + + for (inrow = 0; inrow < cinfo->max_v_samp_factor; inrow++) { + outptr = output_data[inrow]; + inptr = input_data[inrow]; + above_ptr = input_data[inrow-1]; + below_ptr = input_data[inrow+1]; + + /* Special case for first column */ + colsum = GETJSAMPLE(*above_ptr++) + GETJSAMPLE(*below_ptr++) + + GETJSAMPLE(*inptr); + membersum = GETJSAMPLE(*inptr++); + nextcolsum = GETJSAMPLE(*above_ptr) + GETJSAMPLE(*below_ptr) + + GETJSAMPLE(*inptr); + neighsum = colsum + (colsum - membersum) + nextcolsum; + membersum = membersum * memberscale + neighsum * neighscale; + *outptr++ = (JSAMPLE) ((membersum + 32768) >> 16); + lastcolsum = colsum; colsum = nextcolsum; + + for (colctr = output_cols - 2; colctr > 0; colctr--) { + membersum = GETJSAMPLE(*inptr++); + above_ptr++; below_ptr++; + nextcolsum = GETJSAMPLE(*above_ptr) + GETJSAMPLE(*below_ptr) + + GETJSAMPLE(*inptr); + neighsum = lastcolsum + (colsum - membersum) + nextcolsum; + membersum = membersum * memberscale + neighsum * neighscale; + *outptr++ = (JSAMPLE) ((membersum + 32768) >> 16); + lastcolsum = colsum; colsum = nextcolsum; + } + + /* Special case for last column */ + membersum = GETJSAMPLE(*inptr); + neighsum = lastcolsum + (colsum - membersum) + colsum; + membersum = membersum * memberscale + neighsum * neighscale; + *outptr = (JSAMPLE) ((membersum + 32768) >> 16); + + } +} + +#endif /* INPUT_SMOOTHING_SUPPORTED */ + + +/* + * Module initialization routine for downsampling. + * Note that we must select a routine for each component. + */ + +GLOBAL(void) +jinit_downsampler (j_compress_ptr cinfo) +{ + my_downsample_ptr downsample; + int ci; + jpeg_component_info * compptr; + boolean smoothok = TRUE; + int h_in_group, v_in_group, h_out_group, v_out_group; + + downsample = (my_downsample_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_downsampler)); + cinfo->downsample = (struct jpeg_downsampler *) downsample; + downsample->pub.start_pass = start_pass_downsample; + downsample->pub.downsample = sep_downsample; + downsample->pub.need_context_rows = FALSE; + + if (cinfo->CCIR601_sampling) + ERREXIT(cinfo, JERR_CCIR601_NOTIMPL); + + /* Verify we can handle the sampling factors, and set up method pointers */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Compute size of an "output group" for DCT scaling. This many samples + * are to be converted from max_h_samp_factor * max_v_samp_factor pixels. + */ + h_out_group = (compptr->h_samp_factor * compptr->DCT_h_scaled_size) / + cinfo->min_DCT_h_scaled_size; + v_out_group = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; + h_in_group = cinfo->max_h_samp_factor; + v_in_group = cinfo->max_v_samp_factor; + downsample->rowgroup_height[ci] = v_out_group; /* save for use later */ + if (h_in_group == h_out_group && v_in_group == v_out_group) { +#ifdef INPUT_SMOOTHING_SUPPORTED + if (cinfo->smoothing_factor) { + downsample->methods[ci] = fullsize_smooth_downsample; + downsample->pub.need_context_rows = TRUE; + } else +#endif + downsample->methods[ci] = fullsize_downsample; + } else if (h_in_group == h_out_group * 2 && + v_in_group == v_out_group) { + smoothok = FALSE; + downsample->methods[ci] = h2v1_downsample; + } else if (h_in_group == h_out_group * 2 && + v_in_group == v_out_group * 2) { +#ifdef INPUT_SMOOTHING_SUPPORTED + if (cinfo->smoothing_factor) { + downsample->methods[ci] = h2v2_smooth_downsample; + downsample->pub.need_context_rows = TRUE; + } else +#endif + downsample->methods[ci] = h2v2_downsample; + } else if ((h_in_group % h_out_group) == 0 && + (v_in_group % v_out_group) == 0) { + smoothok = FALSE; + downsample->methods[ci] = int_downsample; + downsample->h_expand[ci] = (UINT8) (h_in_group / h_out_group); + downsample->v_expand[ci] = (UINT8) (v_in_group / v_out_group); + } else + ERREXIT(cinfo, JERR_FRACT_SAMPLE_NOTIMPL); + } + +#ifdef INPUT_SMOOTHING_SUPPORTED + if (cinfo->smoothing_factor && !smoothok) + TRACEMS(cinfo, 0, JTRC_SMOOTH_NOTIMPL); +#endif +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jctrans.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jctrans.c new file mode 100644 index 000000000..f7d7b8149 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jctrans.c @@ -0,0 +1,382 @@ +/* + * jctrans.c + * + * Copyright (C) 1995-1998, Thomas G. Lane. + * Modified 2000-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains library routines for transcoding compression, + * that is, writing raw DCT coefficient arrays to an output JPEG file. + * The routines in jcapimin.c will also be needed by a transcoder. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Forward declarations */ +LOCAL(void) transencode_master_selection + JPP((j_compress_ptr cinfo, jvirt_barray_ptr * coef_arrays)); +LOCAL(void) transencode_coef_controller + JPP((j_compress_ptr cinfo, jvirt_barray_ptr * coef_arrays)); + + +/* + * Compression initialization for writing raw-coefficient data. + * Before calling this, all parameters and a data destination must be set up. + * Call jpeg_finish_compress() to actually write the data. + * + * The number of passed virtual arrays must match cinfo->num_components. + * Note that the virtual arrays need not be filled or even realized at + * the time write_coefficients is called; indeed, if the virtual arrays + * were requested from this compression object's memory manager, they + * typically will be realized during this routine and filled afterwards. + */ + +GLOBAL(void) +jpeg_write_coefficients (j_compress_ptr cinfo, jvirt_barray_ptr * coef_arrays) +{ + if (cinfo->global_state != CSTATE_START) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + /* Mark all tables to be written */ + jpeg_suppress_tables(cinfo, FALSE); + /* (Re)initialize error mgr and destination modules */ + (*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo); + (*cinfo->dest->init_destination) (cinfo); + /* Perform master selection of active modules */ + transencode_master_selection(cinfo, coef_arrays); + /* Wait for jpeg_finish_compress() call */ + cinfo->next_scanline = 0; /* so jpeg_write_marker works */ + cinfo->global_state = CSTATE_WRCOEFS; +} + + +/* + * Initialize the compression object with default parameters, + * then copy from the source object all parameters needed for lossless + * transcoding. Parameters that can be varied without loss (such as + * scan script and Huffman optimization) are left in their default states. + */ + +GLOBAL(void) +jpeg_copy_critical_parameters (j_decompress_ptr srcinfo, + j_compress_ptr dstinfo) +{ + JQUANT_TBL ** qtblptr; + jpeg_component_info *incomp, *outcomp; + JQUANT_TBL *c_quant, *slot_quant; + int tblno, ci, coefi; + + /* Safety check to ensure start_compress not called yet. */ + if (dstinfo->global_state != CSTATE_START) + ERREXIT1(dstinfo, JERR_BAD_STATE, dstinfo->global_state); + /* Copy fundamental image dimensions */ + dstinfo->image_width = srcinfo->image_width; + dstinfo->image_height = srcinfo->image_height; + dstinfo->input_components = srcinfo->num_components; + dstinfo->in_color_space = srcinfo->jpeg_color_space; + dstinfo->jpeg_width = srcinfo->output_width; + dstinfo->jpeg_height = srcinfo->output_height; + dstinfo->min_DCT_h_scaled_size = srcinfo->min_DCT_h_scaled_size; + dstinfo->min_DCT_v_scaled_size = srcinfo->min_DCT_v_scaled_size; + /* Initialize all parameters to default values */ + jpeg_set_defaults(dstinfo); + /* jpeg_set_defaults may choose wrong colorspace, eg YCbCr if input is RGB. + * Fix it to get the right header markers for the image colorspace. + */ + jpeg_set_colorspace(dstinfo, srcinfo->jpeg_color_space); + dstinfo->data_precision = srcinfo->data_precision; + dstinfo->CCIR601_sampling = srcinfo->CCIR601_sampling; + /* Copy the source's quantization tables. */ + for (tblno = 0; tblno < NUM_QUANT_TBLS; tblno++) { + if (srcinfo->quant_tbl_ptrs[tblno] != NULL) { + qtblptr = & dstinfo->quant_tbl_ptrs[tblno]; + if (*qtblptr == NULL) + *qtblptr = jpeg_alloc_quant_table((j_common_ptr) dstinfo); + MEMCOPY((*qtblptr)->quantval, + srcinfo->quant_tbl_ptrs[tblno]->quantval, + SIZEOF((*qtblptr)->quantval)); + (*qtblptr)->sent_table = FALSE; + } + } + /* Copy the source's per-component info. + * Note we assume jpeg_set_defaults has allocated the dest comp_info array. + */ + dstinfo->num_components = srcinfo->num_components; + if (dstinfo->num_components < 1 || dstinfo->num_components > MAX_COMPONENTS) + ERREXIT2(dstinfo, JERR_COMPONENT_COUNT, dstinfo->num_components, + MAX_COMPONENTS); + for (ci = 0, incomp = srcinfo->comp_info, outcomp = dstinfo->comp_info; + ci < dstinfo->num_components; ci++, incomp++, outcomp++) { + outcomp->component_id = incomp->component_id; + outcomp->h_samp_factor = incomp->h_samp_factor; + outcomp->v_samp_factor = incomp->v_samp_factor; + outcomp->quant_tbl_no = incomp->quant_tbl_no; + /* Make sure saved quantization table for component matches the qtable + * slot. If not, the input file re-used this qtable slot. + * IJG encoder currently cannot duplicate this. + */ + tblno = outcomp->quant_tbl_no; + if (tblno < 0 || tblno >= NUM_QUANT_TBLS || + srcinfo->quant_tbl_ptrs[tblno] == NULL) + ERREXIT1(dstinfo, JERR_NO_QUANT_TABLE, tblno); + slot_quant = srcinfo->quant_tbl_ptrs[tblno]; + c_quant = incomp->quant_table; + if (c_quant != NULL) { + for (coefi = 0; coefi < DCTSIZE2; coefi++) { + if (c_quant->quantval[coefi] != slot_quant->quantval[coefi]) + ERREXIT1(dstinfo, JERR_MISMATCHED_QUANT_TABLE, tblno); + } + } + /* Note: we do not copy the source's Huffman table assignments; + * instead we rely on jpeg_set_colorspace to have made a suitable choice. + */ + } + /* Also copy JFIF version and resolution information, if available. + * Strictly speaking this isn't "critical" info, but it's nearly + * always appropriate to copy it if available. In particular, + * if the application chooses to copy JFIF 1.02 extension markers from + * the source file, we need to copy the version to make sure we don't + * emit a file that has 1.02 extensions but a claimed version of 1.01. + * We will *not*, however, copy version info from mislabeled "2.01" files. + */ + if (srcinfo->saw_JFIF_marker) { + if (srcinfo->JFIF_major_version == 1) { + dstinfo->JFIF_major_version = srcinfo->JFIF_major_version; + dstinfo->JFIF_minor_version = srcinfo->JFIF_minor_version; + } + dstinfo->density_unit = srcinfo->density_unit; + dstinfo->X_density = srcinfo->X_density; + dstinfo->Y_density = srcinfo->Y_density; + } +} + + +/* + * Master selection of compression modules for transcoding. + * This substitutes for jcinit.c's initialization of the full compressor. + */ + +LOCAL(void) +transencode_master_selection (j_compress_ptr cinfo, + jvirt_barray_ptr * coef_arrays) +{ + /* Initialize master control (includes parameter checking/processing) */ + jinit_c_master_control(cinfo, TRUE /* transcode only */); + + /* Entropy encoding: either Huffman or arithmetic coding. */ + if (cinfo->arith_code) + jinit_arith_encoder(cinfo); + else { + jinit_huff_encoder(cinfo); + } + + /* We need a special coefficient buffer controller. */ + transencode_coef_controller(cinfo, coef_arrays); + + jinit_marker_writer(cinfo); + + /* We can now tell the memory manager to allocate virtual arrays. */ + (*cinfo->mem->realize_virt_arrays) ((j_common_ptr) cinfo); + + /* Write the datastream header (SOI, JFIF) immediately. + * Frame and scan headers are postponed till later. + * This lets application insert special markers after the SOI. + */ + (*cinfo->marker->write_file_header) (cinfo); +} + + +/* + * The rest of this file is a special implementation of the coefficient + * buffer controller. This is similar to jccoefct.c, but it handles only + * output from presupplied virtual arrays. Furthermore, we generate any + * dummy padding blocks on-the-fly rather than expecting them to be present + * in the arrays. + */ + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_c_coef_controller pub; /* public fields */ + + JDIMENSION iMCU_row_num; /* iMCU row # within image */ + JDIMENSION mcu_ctr; /* counts MCUs processed in current row */ + int MCU_vert_offset; /* counts MCU rows within iMCU row */ + int MCU_rows_per_iMCU_row; /* number of such rows needed */ + + /* Virtual block array for each component. */ + jvirt_barray_ptr * whole_image; + + /* Workspace for constructing dummy blocks at right/bottom edges. */ + JBLOCKROW dummy_buffer[C_MAX_BLOCKS_IN_MCU]; +} my_coef_controller; + +typedef my_coef_controller * my_coef_ptr; + + +LOCAL(void) +start_iMCU_row (j_compress_ptr cinfo) +/* Reset within-iMCU-row counters for a new row */ +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + /* In an interleaved scan, an MCU row is the same as an iMCU row. + * In a noninterleaved scan, an iMCU row has v_samp_factor MCU rows. + * But at the bottom of the image, process only what's left. + */ + if (cinfo->comps_in_scan > 1) { + coef->MCU_rows_per_iMCU_row = 1; + } else { + if (coef->iMCU_row_num < (cinfo->total_iMCU_rows-1)) + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->v_samp_factor; + else + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->last_row_height; + } + + coef->mcu_ctr = 0; + coef->MCU_vert_offset = 0; +} + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_coef (j_compress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + if (pass_mode != JBUF_CRANK_DEST) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + + coef->iMCU_row_num = 0; + start_iMCU_row(cinfo); +} + + +/* + * Process some data. + * We process the equivalent of one fully interleaved MCU row ("iMCU" row) + * per call, ie, v_samp_factor block rows for each component in the scan. + * The data is obtained from the virtual arrays and fed to the entropy coder. + * Returns TRUE if the iMCU row is completed, FALSE if suspended. + * + * NB: input_buf is ignored; it is likely to be a NULL pointer. + */ + +METHODDEF(boolean) +compress_output (j_compress_ptr cinfo, JSAMPIMAGE input_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION MCU_col_num; /* index of current MCU within row */ + JDIMENSION last_MCU_col = cinfo->MCUs_per_row - 1; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + int blkn, ci, xindex, yindex, yoffset, blockcnt; + JDIMENSION start_col; + JBLOCKARRAY buffer[MAX_COMPS_IN_SCAN]; + JBLOCKROW MCU_buffer[C_MAX_BLOCKS_IN_MCU]; + JBLOCKROW buffer_ptr; + jpeg_component_info *compptr; + + /* Align the virtual buffers for the components used in this scan. */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + buffer[ci] = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[compptr->component_index], + coef->iMCU_row_num * compptr->v_samp_factor, + (JDIMENSION) compptr->v_samp_factor, FALSE); + } + + /* Loop to process one whole iMCU row */ + for (yoffset = coef->MCU_vert_offset; yoffset < coef->MCU_rows_per_iMCU_row; + yoffset++) { + for (MCU_col_num = coef->mcu_ctr; MCU_col_num < cinfo->MCUs_per_row; + MCU_col_num++) { + /* Construct list of pointers to DCT blocks belonging to this MCU */ + blkn = 0; /* index of current DCT block within MCU */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + start_col = MCU_col_num * compptr->MCU_width; + blockcnt = (MCU_col_num < last_MCU_col) ? compptr->MCU_width + : compptr->last_col_width; + for (yindex = 0; yindex < compptr->MCU_height; yindex++) { + if (coef->iMCU_row_num < last_iMCU_row || + yindex+yoffset < compptr->last_row_height) { + /* Fill in pointers to real blocks in this row */ + buffer_ptr = buffer[ci][yindex+yoffset] + start_col; + for (xindex = 0; xindex < blockcnt; xindex++) + MCU_buffer[blkn++] = buffer_ptr++; + } else { + /* At bottom of image, need a whole row of dummy blocks */ + xindex = 0; + } + /* Fill in any dummy blocks needed in this row. + * Dummy blocks are filled in the same way as in jccoefct.c: + * all zeroes in the AC entries, DC entries equal to previous + * block's DC value. The init routine has already zeroed the + * AC entries, so we need only set the DC entries correctly. + */ + for (; xindex < compptr->MCU_width; xindex++) { + MCU_buffer[blkn] = coef->dummy_buffer[blkn]; + MCU_buffer[blkn][0][0] = MCU_buffer[blkn-1][0][0]; + blkn++; + } + } + } + /* Try to write the MCU. */ + if (! (*cinfo->entropy->encode_mcu) (cinfo, MCU_buffer)) { + /* Suspension forced; update state counters and exit */ + coef->MCU_vert_offset = yoffset; + coef->mcu_ctr = MCU_col_num; + return FALSE; + } + } + /* Completed an MCU row, but perhaps not an iMCU row */ + coef->mcu_ctr = 0; + } + /* Completed the iMCU row, advance counters for next one */ + coef->iMCU_row_num++; + start_iMCU_row(cinfo); + return TRUE; +} + + +/* + * Initialize coefficient buffer controller. + * + * Each passed coefficient array must be the right size for that + * coefficient: width_in_blocks wide and height_in_blocks high, + * with unitheight at least v_samp_factor. + */ + +LOCAL(void) +transencode_coef_controller (j_compress_ptr cinfo, + jvirt_barray_ptr * coef_arrays) +{ + my_coef_ptr coef; + JBLOCKROW buffer; + int i; + + coef = (my_coef_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_coef_controller)); + cinfo->coef = (struct jpeg_c_coef_controller *) coef; + coef->pub.start_pass = start_pass_coef; + coef->pub.compress_data = compress_output; + + /* Save pointer to virtual arrays */ + coef->whole_image = coef_arrays; + + /* Allocate and pre-zero space for dummy DCT blocks. */ + buffer = (JBLOCKROW) + (*cinfo->mem->alloc_large) ((j_common_ptr) cinfo, JPOOL_IMAGE, + C_MAX_BLOCKS_IN_MCU * SIZEOF(JBLOCK)); + FMEMZERO((void FAR *) buffer, C_MAX_BLOCKS_IN_MCU * SIZEOF(JBLOCK)); + for (i = 0; i < C_MAX_BLOCKS_IN_MCU; i++) { + coef->dummy_buffer[i] = buffer + i; + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapimin.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapimin.c new file mode 100644 index 000000000..7f1ce4c05 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapimin.c @@ -0,0 +1,396 @@ +/* + * jdapimin.c + * + * Copyright (C) 1994-1998, Thomas G. Lane. + * Modified 2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains application interface code for the decompression half + * of the JPEG library. These are the "minimum" API routines that may be + * needed in either the normal full-decompression case or the + * transcoding-only case. + * + * Most of the routines intended to be called directly by an application + * are in this file or in jdapistd.c. But also see jcomapi.c for routines + * shared by compression and decompression, and jdtrans.c for the transcoding + * case. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * Initialization of a JPEG decompression object. + * The error manager must already be set up (in case memory manager fails). + */ + +GLOBAL(void) +jpeg_CreateDecompress (j_decompress_ptr cinfo, int version, size_t structsize) +{ + int i; + + /* Guard against version mismatches between library and caller. */ + cinfo->mem = NULL; /* so jpeg_destroy knows mem mgr not called */ + if (version != JPEG_LIB_VERSION) + ERREXIT2(cinfo, JERR_BAD_LIB_VERSION, JPEG_LIB_VERSION, version); + if (structsize != SIZEOF(struct jpeg_decompress_struct)) + ERREXIT2(cinfo, JERR_BAD_STRUCT_SIZE, + (int) SIZEOF(struct jpeg_decompress_struct), (int) structsize); + + /* For debugging purposes, we zero the whole master structure. + * But the application has already set the err pointer, and may have set + * client_data, so we have to save and restore those fields. + * Note: if application hasn't set client_data, tools like Purify may + * complain here. + */ + { + struct jpeg_error_mgr * err = cinfo->err; + void * client_data = cinfo->client_data; /* ignore Purify complaint here */ + MEMZERO(cinfo, SIZEOF(struct jpeg_decompress_struct)); + cinfo->err = err; + cinfo->client_data = client_data; + } + cinfo->is_decompressor = TRUE; + + /* Initialize a memory manager instance for this object */ + jinit_memory_mgr((j_common_ptr) cinfo); + + /* Zero out pointers to permanent structures. */ + cinfo->progress = NULL; + cinfo->src = NULL; + + for (i = 0; i < NUM_QUANT_TBLS; i++) + cinfo->quant_tbl_ptrs[i] = NULL; + + for (i = 0; i < NUM_HUFF_TBLS; i++) { + cinfo->dc_huff_tbl_ptrs[i] = NULL; + cinfo->ac_huff_tbl_ptrs[i] = NULL; + } + + /* Initialize marker processor so application can override methods + * for COM, APPn markers before calling jpeg_read_header. + */ + cinfo->marker_list = NULL; + jinit_marker_reader(cinfo); + + /* And initialize the overall input controller. */ + jinit_input_controller(cinfo); + + /* OK, I'm ready */ + cinfo->global_state = DSTATE_START; +} + + +/* + * Destruction of a JPEG decompression object + */ + +GLOBAL(void) +jpeg_destroy_decompress (j_decompress_ptr cinfo) +{ + jpeg_destroy((j_common_ptr) cinfo); /* use common routine */ +} + + +/* + * Abort processing of a JPEG decompression operation, + * but don't destroy the object itself. + */ + +GLOBAL(void) +jpeg_abort_decompress (j_decompress_ptr cinfo) +{ + jpeg_abort((j_common_ptr) cinfo); /* use common routine */ +} + + +/* + * Set default decompression parameters. + */ + +LOCAL(void) +default_decompress_parms (j_decompress_ptr cinfo) +{ + /* Guess the input colorspace, and set output colorspace accordingly. */ + /* (Wish JPEG committee had provided a real way to specify this...) */ + /* Note application may override our guesses. */ + switch (cinfo->num_components) { + case 1: + cinfo->jpeg_color_space = JCS_GRAYSCALE; + cinfo->out_color_space = JCS_GRAYSCALE; + break; + + case 3: + if (cinfo->saw_JFIF_marker) { + cinfo->jpeg_color_space = JCS_YCbCr; /* JFIF implies YCbCr */ + } else if (cinfo->saw_Adobe_marker) { + switch (cinfo->Adobe_transform) { + case 0: + cinfo->jpeg_color_space = JCS_RGB; + break; + case 1: + cinfo->jpeg_color_space = JCS_YCbCr; + break; + default: + WARNMS1(cinfo, JWRN_ADOBE_XFORM, cinfo->Adobe_transform); + cinfo->jpeg_color_space = JCS_YCbCr; /* assume it's YCbCr */ + break; + } + } else { + /* Saw no special markers, try to guess from the component IDs */ + int cid0 = cinfo->comp_info[0].component_id; + int cid1 = cinfo->comp_info[1].component_id; + int cid2 = cinfo->comp_info[2].component_id; + + if (cid0 == 1 && cid1 == 2 && cid2 == 3) + cinfo->jpeg_color_space = JCS_YCbCr; /* assume JFIF w/out marker */ + else if (cid0 == 82 && cid1 == 71 && cid2 == 66) + cinfo->jpeg_color_space = JCS_RGB; /* ASCII 'R', 'G', 'B' */ + else { + TRACEMS3(cinfo, 1, JTRC_UNKNOWN_IDS, cid0, cid1, cid2); + cinfo->jpeg_color_space = JCS_YCbCr; /* assume it's YCbCr */ + } + } + /* Always guess RGB is proper output colorspace. */ + cinfo->out_color_space = JCS_RGB; + break; + + case 4: + if (cinfo->saw_Adobe_marker) { + switch (cinfo->Adobe_transform) { + case 0: + cinfo->jpeg_color_space = JCS_CMYK; + break; + case 2: + cinfo->jpeg_color_space = JCS_YCCK; + break; + default: + WARNMS1(cinfo, JWRN_ADOBE_XFORM, cinfo->Adobe_transform); + cinfo->jpeg_color_space = JCS_YCCK; /* assume it's YCCK */ + break; + } + } else { + /* No special markers, assume straight CMYK. */ + cinfo->jpeg_color_space = JCS_CMYK; + } + cinfo->out_color_space = JCS_CMYK; + break; + + default: + cinfo->jpeg_color_space = JCS_UNKNOWN; + cinfo->out_color_space = JCS_UNKNOWN; + break; + } + + /* Set defaults for other decompression parameters. */ + cinfo->scale_num = cinfo->block_size; /* 1:1 scaling */ + cinfo->scale_denom = cinfo->block_size; + cinfo->output_gamma = 1.0; + cinfo->buffered_image = FALSE; + cinfo->raw_data_out = FALSE; + cinfo->dct_method = JDCT_DEFAULT; + cinfo->do_fancy_upsampling = TRUE; + cinfo->do_block_smoothing = TRUE; + cinfo->quantize_colors = FALSE; + /* We set these in case application only sets quantize_colors. */ + cinfo->dither_mode = JDITHER_FS; +#ifdef QUANT_2PASS_SUPPORTED + cinfo->two_pass_quantize = TRUE; +#else + cinfo->two_pass_quantize = FALSE; +#endif + cinfo->desired_number_of_colors = 256; + cinfo->colormap = NULL; + /* Initialize for no mode change in buffered-image mode. */ + cinfo->enable_1pass_quant = FALSE; + cinfo->enable_external_quant = FALSE; + cinfo->enable_2pass_quant = FALSE; +} + + +/* + * Decompression startup: read start of JPEG datastream to see what's there. + * Need only initialize JPEG object and supply a data source before calling. + * + * This routine will read as far as the first SOS marker (ie, actual start of + * compressed data), and will save all tables and parameters in the JPEG + * object. It will also initialize the decompression parameters to default + * values, and finally return JPEG_HEADER_OK. On return, the application may + * adjust the decompression parameters and then call jpeg_start_decompress. + * (Or, if the application only wanted to determine the image parameters, + * the data need not be decompressed. In that case, call jpeg_abort or + * jpeg_destroy to release any temporary space.) + * If an abbreviated (tables only) datastream is presented, the routine will + * return JPEG_HEADER_TABLES_ONLY upon reaching EOI. The application may then + * re-use the JPEG object to read the abbreviated image datastream(s). + * It is unnecessary (but OK) to call jpeg_abort in this case. + * The JPEG_SUSPENDED return code only occurs if the data source module + * requests suspension of the decompressor. In this case the application + * should load more source data and then re-call jpeg_read_header to resume + * processing. + * If a non-suspending data source is used and require_image is TRUE, then the + * return code need not be inspected since only JPEG_HEADER_OK is possible. + * + * This routine is now just a front end to jpeg_consume_input, with some + * extra error checking. + */ + +GLOBAL(int) +jpeg_read_header (j_decompress_ptr cinfo, boolean require_image) +{ + int retcode; + + if (cinfo->global_state != DSTATE_START && + cinfo->global_state != DSTATE_INHEADER) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + retcode = jpeg_consume_input(cinfo); + + switch (retcode) { + case JPEG_REACHED_SOS: + retcode = JPEG_HEADER_OK; + break; + case JPEG_REACHED_EOI: + if (require_image) /* Complain if application wanted an image */ + ERREXIT(cinfo, JERR_NO_IMAGE); + /* Reset to start state; it would be safer to require the application to + * call jpeg_abort, but we can't change it now for compatibility reasons. + * A side effect is to free any temporary memory (there shouldn't be any). + */ + jpeg_abort((j_common_ptr) cinfo); /* sets state = DSTATE_START */ + retcode = JPEG_HEADER_TABLES_ONLY; + break; + case JPEG_SUSPENDED: + /* no work */ + break; + } + + return retcode; +} + + +/* + * Consume data in advance of what the decompressor requires. + * This can be called at any time once the decompressor object has + * been created and a data source has been set up. + * + * This routine is essentially a state machine that handles a couple + * of critical state-transition actions, namely initial setup and + * transition from header scanning to ready-for-start_decompress. + * All the actual input is done via the input controller's consume_input + * method. + */ + +GLOBAL(int) +jpeg_consume_input (j_decompress_ptr cinfo) +{ + int retcode = JPEG_SUSPENDED; + + /* NB: every possible DSTATE value should be listed in this switch */ + switch (cinfo->global_state) { + case DSTATE_START: + /* Start-of-datastream actions: reset appropriate modules */ + (*cinfo->inputctl->reset_input_controller) (cinfo); + /* Initialize application's data source module */ + (*cinfo->src->init_source) (cinfo); + cinfo->global_state = DSTATE_INHEADER; + /*FALLTHROUGH*/ + case DSTATE_INHEADER: + retcode = (*cinfo->inputctl->consume_input) (cinfo); + if (retcode == JPEG_REACHED_SOS) { /* Found SOS, prepare to decompress */ + /* Set up default parameters based on header data */ + default_decompress_parms(cinfo); + /* Set global state: ready for start_decompress */ + cinfo->global_state = DSTATE_READY; + } + break; + case DSTATE_READY: + /* Can't advance past first SOS until start_decompress is called */ + retcode = JPEG_REACHED_SOS; + break; + case DSTATE_PRELOAD: + case DSTATE_PRESCAN: + case DSTATE_SCANNING: + case DSTATE_RAW_OK: + case DSTATE_BUFIMAGE: + case DSTATE_BUFPOST: + case DSTATE_STOPPING: + retcode = (*cinfo->inputctl->consume_input) (cinfo); + break; + default: + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + } + return retcode; +} + + +/* + * Have we finished reading the input file? + */ + +GLOBAL(boolean) +jpeg_input_complete (j_decompress_ptr cinfo) +{ + /* Check for valid jpeg object */ + if (cinfo->global_state < DSTATE_START || + cinfo->global_state > DSTATE_STOPPING) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + return cinfo->inputctl->eoi_reached; +} + + +/* + * Is there more than one scan? + */ + +GLOBAL(boolean) +jpeg_has_multiple_scans (j_decompress_ptr cinfo) +{ + /* Only valid after jpeg_read_header completes */ + if (cinfo->global_state < DSTATE_READY || + cinfo->global_state > DSTATE_STOPPING) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + return cinfo->inputctl->has_multiple_scans; +} + + +/* + * Finish JPEG decompression. + * + * This will normally just verify the file trailer and release temp storage. + * + * Returns FALSE if suspended. The return value need be inspected only if + * a suspending data source is used. + */ + +GLOBAL(boolean) +jpeg_finish_decompress (j_decompress_ptr cinfo) +{ + if ((cinfo->global_state == DSTATE_SCANNING || + cinfo->global_state == DSTATE_RAW_OK) && ! cinfo->buffered_image) { + /* Terminate final pass of non-buffered mode */ + if (cinfo->output_scanline < cinfo->output_height) + ERREXIT(cinfo, JERR_TOO_LITTLE_DATA); + (*cinfo->master->finish_output_pass) (cinfo); + cinfo->global_state = DSTATE_STOPPING; + } else if (cinfo->global_state == DSTATE_BUFIMAGE) { + /* Finishing after a buffered-image operation */ + cinfo->global_state = DSTATE_STOPPING; + } else if (cinfo->global_state != DSTATE_STOPPING) { + /* STOPPING = repeat call after a suspension, anything else is error */ + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + } + /* Read until EOI */ + while (! cinfo->inputctl->eoi_reached) { + if ((*cinfo->inputctl->consume_input) (cinfo) == JPEG_SUSPENDED) + return FALSE; /* Suspend, come back later */ + } + /* Do final cleanup */ + (*cinfo->src->term_source) (cinfo); + /* We can use jpeg_abort to release memory and reset global_state */ + jpeg_abort((j_common_ptr) cinfo); + return TRUE; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapistd.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapistd.c new file mode 100644 index 000000000..9d7453777 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdapistd.c @@ -0,0 +1,275 @@ +/* + * jdapistd.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains application interface code for the decompression half + * of the JPEG library. These are the "standard" API routines that are + * used in the normal full-decompression case. They are not used by a + * transcoding-only application. Note that if an application links in + * jpeg_start_decompress, it will end up linking in the entire decompressor. + * We thus must separate this file from jdapimin.c to avoid linking the + * whole decompression library into a transcoder. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Forward declarations */ +LOCAL(boolean) output_pass_setup JPP((j_decompress_ptr cinfo)); + + +/* + * Decompression initialization. + * jpeg_read_header must be completed before calling this. + * + * If a multipass operating mode was selected, this will do all but the + * last pass, and thus may take a great deal of time. + * + * Returns FALSE if suspended. The return value need be inspected only if + * a suspending data source is used. + */ + +GLOBAL(boolean) +jpeg_start_decompress (j_decompress_ptr cinfo) +{ + if (cinfo->global_state == DSTATE_READY) { + /* First call: initialize master control, select active modules */ + jinit_master_decompress(cinfo); + if (cinfo->buffered_image) { + /* No more work here; expecting jpeg_start_output next */ + cinfo->global_state = DSTATE_BUFIMAGE; + return TRUE; + } + cinfo->global_state = DSTATE_PRELOAD; + } + if (cinfo->global_state == DSTATE_PRELOAD) { + /* If file has multiple scans, absorb them all into the coef buffer */ + if (cinfo->inputctl->has_multiple_scans) { +#ifdef D_MULTISCAN_FILES_SUPPORTED + for (;;) { + int retcode; + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + /* Absorb some more input */ + retcode = (*cinfo->inputctl->consume_input) (cinfo); + if (retcode == JPEG_SUSPENDED) + return FALSE; + if (retcode == JPEG_REACHED_EOI) + break; + /* Advance progress counter if appropriate */ + if (cinfo->progress != NULL && + (retcode == JPEG_ROW_COMPLETED || retcode == JPEG_REACHED_SOS)) { + if (++cinfo->progress->pass_counter >= cinfo->progress->pass_limit) { + /* jdmaster underestimated number of scans; ratchet up one scan */ + cinfo->progress->pass_limit += (long) cinfo->total_iMCU_rows; + } + } + } +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif /* D_MULTISCAN_FILES_SUPPORTED */ + } + cinfo->output_scan_number = cinfo->input_scan_number; + } else if (cinfo->global_state != DSTATE_PRESCAN) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + /* Perform any dummy output passes, and set up for the final pass */ + return output_pass_setup(cinfo); +} + + +/* + * Set up for an output pass, and perform any dummy pass(es) needed. + * Common subroutine for jpeg_start_decompress and jpeg_start_output. + * Entry: global_state = DSTATE_PRESCAN only if previously suspended. + * Exit: If done, returns TRUE and sets global_state for proper output mode. + * If suspended, returns FALSE and sets global_state = DSTATE_PRESCAN. + */ + +LOCAL(boolean) +output_pass_setup (j_decompress_ptr cinfo) +{ + if (cinfo->global_state != DSTATE_PRESCAN) { + /* First call: do pass setup */ + (*cinfo->master->prepare_for_output_pass) (cinfo); + cinfo->output_scanline = 0; + cinfo->global_state = DSTATE_PRESCAN; + } + /* Loop over any required dummy passes */ + while (cinfo->master->is_dummy_pass) { +#ifdef QUANT_2PASS_SUPPORTED + /* Crank through the dummy pass */ + while (cinfo->output_scanline < cinfo->output_height) { + JDIMENSION last_scanline; + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) cinfo->output_scanline; + cinfo->progress->pass_limit = (long) cinfo->output_height; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + /* Process some data */ + last_scanline = cinfo->output_scanline; + (*cinfo->main->process_data) (cinfo, (JSAMPARRAY) NULL, + &cinfo->output_scanline, (JDIMENSION) 0); + if (cinfo->output_scanline == last_scanline) + return FALSE; /* No progress made, must suspend */ + } + /* Finish up dummy pass, and set up for another one */ + (*cinfo->master->finish_output_pass) (cinfo); + (*cinfo->master->prepare_for_output_pass) (cinfo); + cinfo->output_scanline = 0; +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif /* QUANT_2PASS_SUPPORTED */ + } + /* Ready for application to drive output pass through + * jpeg_read_scanlines or jpeg_read_raw_data. + */ + cinfo->global_state = cinfo->raw_data_out ? DSTATE_RAW_OK : DSTATE_SCANNING; + return TRUE; +} + + +/* + * Read some scanlines of data from the JPEG decompressor. + * + * The return value will be the number of lines actually read. + * This may be less than the number requested in several cases, + * including bottom of image, data source suspension, and operating + * modes that emit multiple scanlines at a time. + * + * Note: we warn about excess calls to jpeg_read_scanlines() since + * this likely signals an application programmer error. However, + * an oversize buffer (max_lines > scanlines remaining) is not an error. + */ + +GLOBAL(JDIMENSION) +jpeg_read_scanlines (j_decompress_ptr cinfo, JSAMPARRAY scanlines, + JDIMENSION max_lines) +{ + JDIMENSION row_ctr; + + if (cinfo->global_state != DSTATE_SCANNING) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + if (cinfo->output_scanline >= cinfo->output_height) { + WARNMS(cinfo, JWRN_TOO_MUCH_DATA); + return 0; + } + + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) cinfo->output_scanline; + cinfo->progress->pass_limit = (long) cinfo->output_height; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + + /* Process some data */ + row_ctr = 0; + (*cinfo->main->process_data) (cinfo, scanlines, &row_ctr, max_lines); + cinfo->output_scanline += row_ctr; + return row_ctr; +} + + +/* + * Alternate entry point to read raw data. + * Processes exactly one iMCU row per call, unless suspended. + */ + +GLOBAL(JDIMENSION) +jpeg_read_raw_data (j_decompress_ptr cinfo, JSAMPIMAGE data, + JDIMENSION max_lines) +{ + JDIMENSION lines_per_iMCU_row; + + if (cinfo->global_state != DSTATE_RAW_OK) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + if (cinfo->output_scanline >= cinfo->output_height) { + WARNMS(cinfo, JWRN_TOO_MUCH_DATA); + return 0; + } + + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) { + cinfo->progress->pass_counter = (long) cinfo->output_scanline; + cinfo->progress->pass_limit = (long) cinfo->output_height; + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + } + + /* Verify that at least one iMCU row can be returned. */ + lines_per_iMCU_row = cinfo->max_v_samp_factor * cinfo->min_DCT_v_scaled_size; + if (max_lines < lines_per_iMCU_row) + ERREXIT(cinfo, JERR_BUFFER_SIZE); + + /* Decompress directly into user's buffer. */ + if (! (*cinfo->coef->decompress_data) (cinfo, data)) + return 0; /* suspension forced, can do nothing more */ + + /* OK, we processed one iMCU row. */ + cinfo->output_scanline += lines_per_iMCU_row; + return lines_per_iMCU_row; +} + + +/* Additional entry points for buffered-image mode. */ + +#ifdef D_MULTISCAN_FILES_SUPPORTED + +/* + * Initialize for an output pass in buffered-image mode. + */ + +GLOBAL(boolean) +jpeg_start_output (j_decompress_ptr cinfo, int scan_number) +{ + if (cinfo->global_state != DSTATE_BUFIMAGE && + cinfo->global_state != DSTATE_PRESCAN) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + /* Limit scan number to valid range */ + if (scan_number <= 0) + scan_number = 1; + if (cinfo->inputctl->eoi_reached && + scan_number > cinfo->input_scan_number) + scan_number = cinfo->input_scan_number; + cinfo->output_scan_number = scan_number; + /* Perform any dummy output passes, and set up for the real pass */ + return output_pass_setup(cinfo); +} + + +/* + * Finish up after an output pass in buffered-image mode. + * + * Returns FALSE if suspended. The return value need be inspected only if + * a suspending data source is used. + */ + +GLOBAL(boolean) +jpeg_finish_output (j_decompress_ptr cinfo) +{ + if ((cinfo->global_state == DSTATE_SCANNING || + cinfo->global_state == DSTATE_RAW_OK) && cinfo->buffered_image) { + /* Terminate this pass. */ + /* We do not require the whole pass to have been completed. */ + (*cinfo->master->finish_output_pass) (cinfo); + cinfo->global_state = DSTATE_BUFPOST; + } else if (cinfo->global_state != DSTATE_BUFPOST) { + /* BUFPOST = repeat call after a suspension, anything else is error */ + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + } + /* Read markers looking for SOS or EOI */ + while (cinfo->input_scan_number <= cinfo->output_scan_number && + ! cinfo->inputctl->eoi_reached) { + if ((*cinfo->inputctl->consume_input) (cinfo) == JPEG_SUSPENDED) + return FALSE; /* Suspend, come back later */ + } + cinfo->global_state = DSTATE_BUFIMAGE; + return TRUE; +} + +#endif /* D_MULTISCAN_FILES_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdarith.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdarith.c new file mode 100644 index 000000000..092f8af5f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdarith.c @@ -0,0 +1,776 @@ +/* + * jdarith.c + * + * Developed 1997-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains portable arithmetic entropy decoding routines for JPEG + * (implementing the ISO/IEC IS 10918-1 and CCITT Recommendation ITU-T T.81). + * + * Both sequential and progressive modes are supported in this single module. + * + * Suspension is not currently supported in this module. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Expanded entropy decoder object for arithmetic decoding. */ + +typedef struct { + struct jpeg_entropy_decoder pub; /* public fields */ + + INT32 c; /* C register, base of coding interval + input bit buffer */ + INT32 a; /* A register, normalized size of coding interval */ + int ct; /* bit shift counter, # of bits left in bit buffer part of C */ + /* init: ct = -16 */ + /* run: ct = 0..7 */ + /* error: ct = -1 */ + int last_dc_val[MAX_COMPS_IN_SCAN]; /* last DC coef for each component */ + int dc_context[MAX_COMPS_IN_SCAN]; /* context index for DC conditioning */ + + unsigned int restarts_to_go; /* MCUs left in this restart interval */ + + /* Pointers to statistics areas (these workspaces have image lifespan) */ + unsigned char * dc_stats[NUM_ARITH_TBLS]; + unsigned char * ac_stats[NUM_ARITH_TBLS]; + + /* Statistics bin for coding with fixed probability 0.5 */ + unsigned char fixed_bin[4]; +} arith_entropy_decoder; + +typedef arith_entropy_decoder * arith_entropy_ptr; + +/* The following two definitions specify the allocation chunk size + * for the statistics area. + * According to sections F.1.4.4.1.3 and F.1.4.4.2, we need at least + * 49 statistics bins for DC, and 245 statistics bins for AC coding. + * + * We use a compact representation with 1 byte per statistics bin, + * thus the numbers directly represent byte sizes. + * This 1 byte per statistics bin contains the meaning of the MPS + * (more probable symbol) in the highest bit (mask 0x80), and the + * index into the probability estimation state machine table + * in the lower bits (mask 0x7F). + */ + +#define DC_STAT_BINS 64 +#define AC_STAT_BINS 256 + + +LOCAL(int) +get_byte (j_decompress_ptr cinfo) +/* Read next input byte; we do not support suspension in this module. */ +{ + struct jpeg_source_mgr * src = cinfo->src; + + if (src->bytes_in_buffer == 0) + if (! (*src->fill_input_buffer) (cinfo)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); + src->bytes_in_buffer--; + return GETJOCTET(*src->next_input_byte++); +} + + +/* + * The core arithmetic decoding routine (common in JPEG and JBIG). + * This needs to go as fast as possible. + * Machine-dependent optimization facilities + * are not utilized in this portable implementation. + * However, this code should be fairly efficient and + * may be a good base for further optimizations anyway. + * + * Return value is 0 or 1 (binary decision). + * + * Note: I've changed the handling of the code base & bit + * buffer register C compared to other implementations + * based on the standards layout & procedures. + * While it also contains both the actual base of the + * coding interval (16 bits) and the next-bits buffer, + * the cut-point between these two parts is floating + * (instead of fixed) with the bit shift counter CT. + * Thus, we also need only one (variable instead of + * fixed size) shift for the LPS/MPS decision, and + * we can get away with any renormalization update + * of C (except for new data insertion, of course). + * + * I've also introduced a new scheme for accessing + * the probability estimation state machine table, + * derived from Markus Kuhn's JBIG implementation. + */ + +LOCAL(int) +arith_decode (j_decompress_ptr cinfo, unsigned char *st) +{ + register arith_entropy_ptr e = (arith_entropy_ptr) cinfo->entropy; + register unsigned char nl, nm; + register INT32 qe, temp; + register int sv, data; + + /* Renormalization & data input per section D.2.6 */ + while (e->a < 0x8000L) { + if (--e->ct < 0) { + /* Need to fetch next data byte */ + if (cinfo->unread_marker) + data = 0; /* stuff zero data */ + else { + data = get_byte(cinfo); /* read next input byte */ + if (data == 0xFF) { /* zero stuff or marker code */ + do data = get_byte(cinfo); + while (data == 0xFF); /* swallow extra 0xFF bytes */ + if (data == 0) + data = 0xFF; /* discard stuffed zero byte */ + else { + /* Note: Different from the Huffman decoder, hitting + * a marker while processing the compressed data + * segment is legal in arithmetic coding. + * The convention is to supply zero data + * then until decoding is complete. + */ + cinfo->unread_marker = data; + data = 0; + } + } + } + e->c = (e->c << 8) | data; /* insert data into C register */ + if ((e->ct += 8) < 0) /* update bit shift counter */ + /* Need more initial bytes */ + if (++e->ct == 0) + /* Got 2 initial bytes -> re-init A and exit loop */ + e->a = 0x8000L; /* => e->a = 0x10000L after loop exit */ + } + e->a <<= 1; + } + + /* Fetch values from our compact representation of Table D.3(D.2): + * Qe values and probability estimation state machine + */ + sv = *st; + qe = jpeg_aritab[sv & 0x7F]; /* => Qe_Value */ + nl = qe & 0xFF; qe >>= 8; /* Next_Index_LPS + Switch_MPS */ + nm = qe & 0xFF; qe >>= 8; /* Next_Index_MPS */ + + /* Decode & estimation procedures per sections D.2.4 & D.2.5 */ + temp = e->a - qe; + e->a = temp; + temp <<= e->ct; + if (e->c >= temp) { + e->c -= temp; + /* Conditional LPS (less probable symbol) exchange */ + if (e->a < qe) { + e->a = qe; + *st = (sv & 0x80) ^ nm; /* Estimate_after_MPS */ + } else { + e->a = qe; + *st = (sv & 0x80) ^ nl; /* Estimate_after_LPS */ + sv ^= 0x80; /* Exchange LPS/MPS */ + } + } else if (e->a < 0x8000L) { + /* Conditional MPS (more probable symbol) exchange */ + if (e->a < qe) { + *st = (sv & 0x80) ^ nl; /* Estimate_after_LPS */ + sv ^= 0x80; /* Exchange LPS/MPS */ + } else { + *st = (sv & 0x80) ^ nm; /* Estimate_after_MPS */ + } + } + + return sv >> 7; +} + + +/* + * Check for a restart marker & resynchronize decoder. + */ + +LOCAL(void) +process_restart (j_decompress_ptr cinfo) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + int ci; + jpeg_component_info * compptr; + + /* Advance past the RSTn marker */ + if (! (*cinfo->marker->read_restart_marker) (cinfo)) + ERREXIT(cinfo, JERR_CANT_SUSPEND); + + /* Re-initialize statistics areas */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + if (! cinfo->progressive_mode || (cinfo->Ss == 0 && cinfo->Ah == 0)) { + MEMZERO(entropy->dc_stats[compptr->dc_tbl_no], DC_STAT_BINS); + /* Reset DC predictions to 0 */ + entropy->last_dc_val[ci] = 0; + entropy->dc_context[ci] = 0; + } + if ((! cinfo->progressive_mode && cinfo->lim_Se) || + (cinfo->progressive_mode && cinfo->Ss)) { + MEMZERO(entropy->ac_stats[compptr->ac_tbl_no], AC_STAT_BINS); + } + } + + /* Reset arithmetic decoding variables */ + entropy->c = 0; + entropy->a = 0; + entropy->ct = -16; /* force reading 2 initial bytes to fill C */ + + /* Reset restart counter */ + entropy->restarts_to_go = cinfo->restart_interval; +} + + +/* + * Arithmetic MCU decoding. + * Each of these routines decodes and returns one MCU's worth of + * arithmetic-compressed coefficients. + * The coefficients are reordered from zigzag order into natural array order, + * but are not dequantized. + * + * The i'th block of the MCU is stored into the block pointed to by + * MCU_data[i]. WE ASSUME THIS AREA IS INITIALLY ZEROED BY THE CALLER. + */ + +/* + * MCU decoding for DC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +decode_mcu_DC_first (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + unsigned char *st; + int blkn, ci, tbl, sign; + int v, m; + + /* Process restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + process_restart(cinfo); + entropy->restarts_to_go--; + } + + if (entropy->ct == -1) return TRUE; /* if error do nothing */ + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + tbl = cinfo->cur_comp_info[ci]->dc_tbl_no; + + /* Sections F.2.4.1 & F.1.4.4.1: Decoding of DC coefficients */ + + /* Table F.4: Point to statistics bin S0 for DC coefficient coding */ + st = entropy->dc_stats[tbl] + entropy->dc_context[ci]; + + /* Figure F.19: Decode_DC_DIFF */ + if (arith_decode(cinfo, st) == 0) + entropy->dc_context[ci] = 0; + else { + /* Figure F.21: Decoding nonzero value v */ + /* Figure F.22: Decoding the sign of v */ + sign = arith_decode(cinfo, st + 1); + st += 2; st += sign; + /* Figure F.23: Decoding the magnitude category of v */ + if ((m = arith_decode(cinfo, st)) != 0) { + st = entropy->dc_stats[tbl] + 20; /* Table F.4: X1 = 20 */ + while (arith_decode(cinfo, st)) { + if ((m <<= 1) == 0x8000) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* magnitude overflow */ + return TRUE; + } + st += 1; + } + } + /* Section F.1.4.4.1.2: Establish dc_context conditioning category */ + if (m < (int) ((1L << cinfo->arith_dc_L[tbl]) >> 1)) + entropy->dc_context[ci] = 0; /* zero diff category */ + else if (m > (int) ((1L << cinfo->arith_dc_U[tbl]) >> 1)) + entropy->dc_context[ci] = 12 + (sign * 4); /* large diff category */ + else + entropy->dc_context[ci] = 4 + (sign * 4); /* small diff category */ + v = m; + /* Figure F.24: Decoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + if (arith_decode(cinfo, st)) v |= m; + v += 1; if (sign) v = -v; + entropy->last_dc_val[ci] += v; + } + + /* Scale and output the DC coefficient (assumes jpeg_natural_order[0]=0) */ + (*block)[0] = (JCOEF) (entropy->last_dc_val[ci] << cinfo->Al); + } + + return TRUE; +} + + +/* + * MCU decoding for AC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +decode_mcu_AC_first (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + unsigned char *st; + int tbl, sign, k; + int v, m; + const int * natural_order; + + /* Process restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + process_restart(cinfo); + entropy->restarts_to_go--; + } + + if (entropy->ct == -1) return TRUE; /* if error do nothing */ + + natural_order = cinfo->natural_order; + + /* There is always only one block per MCU */ + block = MCU_data[0]; + tbl = cinfo->cur_comp_info[0]->ac_tbl_no; + + /* Sections F.2.4.2 & F.1.4.4.2: Decoding of AC coefficients */ + + /* Figure F.20: Decode_AC_coefficients */ + for (k = cinfo->Ss; k <= cinfo->Se; k++) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + if (arith_decode(cinfo, st)) break; /* EOB flag */ + while (arith_decode(cinfo, st + 1) == 0) { + st += 3; k++; + if (k > cinfo->Se) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* spectral overflow */ + return TRUE; + } + } + /* Figure F.21: Decoding nonzero value v */ + /* Figure F.22: Decoding the sign of v */ + sign = arith_decode(cinfo, entropy->fixed_bin); + st += 2; + /* Figure F.23: Decoding the magnitude category of v */ + if ((m = arith_decode(cinfo, st)) != 0) { + if (arith_decode(cinfo, st)) { + m <<= 1; + st = entropy->ac_stats[tbl] + + (k <= cinfo->arith_ac_K[tbl] ? 189 : 217); + while (arith_decode(cinfo, st)) { + if ((m <<= 1) == 0x8000) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* magnitude overflow */ + return TRUE; + } + st += 1; + } + } + } + v = m; + /* Figure F.24: Decoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + if (arith_decode(cinfo, st)) v |= m; + v += 1; if (sign) v = -v; + /* Scale and output coefficient in natural (dezigzagged) order */ + (*block)[natural_order[k]] = (JCOEF) (v << cinfo->Al); + } + + return TRUE; +} + + +/* + * MCU decoding for DC successive approximation refinement scan. + */ + +METHODDEF(boolean) +decode_mcu_DC_refine (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + unsigned char *st; + int p1, blkn; + + /* Process restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + process_restart(cinfo); + entropy->restarts_to_go--; + } + + st = entropy->fixed_bin; /* use fixed probability estimation */ + p1 = 1 << cinfo->Al; /* 1 in the bit position being coded */ + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + /* Encoded data is simply the next bit of the two's-complement DC value */ + if (arith_decode(cinfo, st)) + MCU_data[blkn][0][0] |= p1; + } + + return TRUE; +} + + +/* + * MCU decoding for AC successive approximation refinement scan. + */ + +METHODDEF(boolean) +decode_mcu_AC_refine (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + JBLOCKROW block; + JCOEFPTR thiscoef; + unsigned char *st; + int tbl, k, kex; + int p1, m1; + const int * natural_order; + + /* Process restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + process_restart(cinfo); + entropy->restarts_to_go--; + } + + if (entropy->ct == -1) return TRUE; /* if error do nothing */ + + natural_order = cinfo->natural_order; + + /* There is always only one block per MCU */ + block = MCU_data[0]; + tbl = cinfo->cur_comp_info[0]->ac_tbl_no; + + p1 = 1 << cinfo->Al; /* 1 in the bit position being coded */ + m1 = (-1) << cinfo->Al; /* -1 in the bit position being coded */ + + /* Establish EOBx (previous stage end-of-block) index */ + for (kex = cinfo->Se; kex > 0; kex--) + if ((*block)[natural_order[kex]]) break; + + for (k = cinfo->Ss; k <= cinfo->Se; k++) { + st = entropy->ac_stats[tbl] + 3 * (k - 1); + if (k > kex) + if (arith_decode(cinfo, st)) break; /* EOB flag */ + for (;;) { + thiscoef = *block + natural_order[k]; + if (*thiscoef) { /* previously nonzero coef */ + if (arith_decode(cinfo, st + 2)) { + if (*thiscoef < 0) + *thiscoef += m1; + else + *thiscoef += p1; + } + break; + } + if (arith_decode(cinfo, st + 1)) { /* newly nonzero coef */ + if (arith_decode(cinfo, entropy->fixed_bin)) + *thiscoef = m1; + else + *thiscoef = p1; + break; + } + st += 3; k++; + if (k > cinfo->Se) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* spectral overflow */ + return TRUE; + } + } + } + + return TRUE; +} + + +/* + * Decode one MCU's worth of arithmetic-compressed coefficients. + */ + +METHODDEF(boolean) +decode_mcu (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + jpeg_component_info * compptr; + JBLOCKROW block; + unsigned char *st; + int blkn, ci, tbl, sign, k; + int v, m; + const int * natural_order; + + /* Process restart marker if needed */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + process_restart(cinfo); + entropy->restarts_to_go--; + } + + if (entropy->ct == -1) return TRUE; /* if error do nothing */ + + natural_order = cinfo->natural_order; + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + + /* Sections F.2.4.1 & F.1.4.4.1: Decoding of DC coefficients */ + + tbl = compptr->dc_tbl_no; + + /* Table F.4: Point to statistics bin S0 for DC coefficient coding */ + st = entropy->dc_stats[tbl] + entropy->dc_context[ci]; + + /* Figure F.19: Decode_DC_DIFF */ + if (arith_decode(cinfo, st) == 0) + entropy->dc_context[ci] = 0; + else { + /* Figure F.21: Decoding nonzero value v */ + /* Figure F.22: Decoding the sign of v */ + sign = arith_decode(cinfo, st + 1); + st += 2; st += sign; + /* Figure F.23: Decoding the magnitude category of v */ + if ((m = arith_decode(cinfo, st)) != 0) { + st = entropy->dc_stats[tbl] + 20; /* Table F.4: X1 = 20 */ + while (arith_decode(cinfo, st)) { + if ((m <<= 1) == 0x8000) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* magnitude overflow */ + return TRUE; + } + st += 1; + } + } + /* Section F.1.4.4.1.2: Establish dc_context conditioning category */ + if (m < (int) ((1L << cinfo->arith_dc_L[tbl]) >> 1)) + entropy->dc_context[ci] = 0; /* zero diff category */ + else if (m > (int) ((1L << cinfo->arith_dc_U[tbl]) >> 1)) + entropy->dc_context[ci] = 12 + (sign * 4); /* large diff category */ + else + entropy->dc_context[ci] = 4 + (sign * 4); /* small diff category */ + v = m; + /* Figure F.24: Decoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + if (arith_decode(cinfo, st)) v |= m; + v += 1; if (sign) v = -v; + entropy->last_dc_val[ci] += v; + } + + (*block)[0] = (JCOEF) entropy->last_dc_val[ci]; + + /* Sections F.2.4.2 & F.1.4.4.2: Decoding of AC coefficients */ + + if (cinfo->lim_Se == 0) continue; + tbl = compptr->ac_tbl_no; + k = 0; + + /* Figure F.20: Decode_AC_coefficients */ + do { + st = entropy->ac_stats[tbl] + 3 * k; + if (arith_decode(cinfo, st)) break; /* EOB flag */ + for (;;) { + k++; + if (arith_decode(cinfo, st + 1)) break; + st += 3; + if (k >= cinfo->lim_Se) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* spectral overflow */ + return TRUE; + } + } + /* Figure F.21: Decoding nonzero value v */ + /* Figure F.22: Decoding the sign of v */ + sign = arith_decode(cinfo, entropy->fixed_bin); + st += 2; + /* Figure F.23: Decoding the magnitude category of v */ + if ((m = arith_decode(cinfo, st)) != 0) { + if (arith_decode(cinfo, st)) { + m <<= 1; + st = entropy->ac_stats[tbl] + + (k <= cinfo->arith_ac_K[tbl] ? 189 : 217); + while (arith_decode(cinfo, st)) { + if ((m <<= 1) == 0x8000) { + WARNMS(cinfo, JWRN_ARITH_BAD_CODE); + entropy->ct = -1; /* magnitude overflow */ + return TRUE; + } + st += 1; + } + } + } + v = m; + /* Figure F.24: Decoding the magnitude bit pattern of v */ + st += 14; + while (m >>= 1) + if (arith_decode(cinfo, st)) v |= m; + v += 1; if (sign) v = -v; + (*block)[natural_order[k]] = (JCOEF) v; + } while (k < cinfo->lim_Se); + } + + return TRUE; +} + + +/* + * Initialize for an arithmetic-compressed scan. + */ + +METHODDEF(void) +start_pass (j_decompress_ptr cinfo) +{ + arith_entropy_ptr entropy = (arith_entropy_ptr) cinfo->entropy; + int ci, tbl; + jpeg_component_info * compptr; + + if (cinfo->progressive_mode) { + /* Validate progressive scan parameters */ + if (cinfo->Ss == 0) { + if (cinfo->Se != 0) + goto bad; + } else { + /* need not check Ss/Se < 0 since they came from unsigned bytes */ + if (cinfo->Se < cinfo->Ss || cinfo->Se > cinfo->lim_Se) + goto bad; + /* AC scans may have only one component */ + if (cinfo->comps_in_scan != 1) + goto bad; + } + if (cinfo->Ah != 0) { + /* Successive approximation refinement scan: must have Al = Ah-1. */ + if (cinfo->Ah-1 != cinfo->Al) + goto bad; + } + if (cinfo->Al > 13) { /* need not check for < 0 */ + bad: + ERREXIT4(cinfo, JERR_BAD_PROGRESSION, + cinfo->Ss, cinfo->Se, cinfo->Ah, cinfo->Al); + } + /* Update progression status, and verify that scan order is legal. + * Note that inter-scan inconsistencies are treated as warnings + * not fatal errors ... not clear if this is right way to behave. + */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + int coefi, cindex = cinfo->cur_comp_info[ci]->component_index; + int *coef_bit_ptr = & cinfo->coef_bits[cindex][0]; + if (cinfo->Ss && coef_bit_ptr[0] < 0) /* AC without prior DC scan */ + WARNMS2(cinfo, JWRN_BOGUS_PROGRESSION, cindex, 0); + for (coefi = cinfo->Ss; coefi <= cinfo->Se; coefi++) { + int expected = (coef_bit_ptr[coefi] < 0) ? 0 : coef_bit_ptr[coefi]; + if (cinfo->Ah != expected) + WARNMS2(cinfo, JWRN_BOGUS_PROGRESSION, cindex, coefi); + coef_bit_ptr[coefi] = cinfo->Al; + } + } + /* Select MCU decoding routine */ + if (cinfo->Ah == 0) { + if (cinfo->Ss == 0) + entropy->pub.decode_mcu = decode_mcu_DC_first; + else + entropy->pub.decode_mcu = decode_mcu_AC_first; + } else { + if (cinfo->Ss == 0) + entropy->pub.decode_mcu = decode_mcu_DC_refine; + else + entropy->pub.decode_mcu = decode_mcu_AC_refine; + } + } else { + /* Check that the scan parameters Ss, Se, Ah/Al are OK for sequential JPEG. + * This ought to be an error condition, but we make it a warning. + */ + if (cinfo->Ss != 0 || cinfo->Ah != 0 || cinfo->Al != 0 || + (cinfo->Se < DCTSIZE2 && cinfo->Se != cinfo->lim_Se)) + WARNMS(cinfo, JWRN_NOT_SEQUENTIAL); + /* Select MCU decoding routine */ + entropy->pub.decode_mcu = decode_mcu; + } + + /* Allocate & initialize requested statistics areas */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + if (! cinfo->progressive_mode || (cinfo->Ss == 0 && cinfo->Ah == 0)) { + tbl = compptr->dc_tbl_no; + if (tbl < 0 || tbl >= NUM_ARITH_TBLS) + ERREXIT1(cinfo, JERR_NO_ARITH_TABLE, tbl); + if (entropy->dc_stats[tbl] == NULL) + entropy->dc_stats[tbl] = (unsigned char *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, DC_STAT_BINS); + MEMZERO(entropy->dc_stats[tbl], DC_STAT_BINS); + /* Initialize DC predictions to 0 */ + entropy->last_dc_val[ci] = 0; + entropy->dc_context[ci] = 0; + } + if ((! cinfo->progressive_mode && cinfo->lim_Se) || + (cinfo->progressive_mode && cinfo->Ss)) { + tbl = compptr->ac_tbl_no; + if (tbl < 0 || tbl >= NUM_ARITH_TBLS) + ERREXIT1(cinfo, JERR_NO_ARITH_TABLE, tbl); + if (entropy->ac_stats[tbl] == NULL) + entropy->ac_stats[tbl] = (unsigned char *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, AC_STAT_BINS); + MEMZERO(entropy->ac_stats[tbl], AC_STAT_BINS); + } + } + + /* Initialize arithmetic decoding variables */ + entropy->c = 0; + entropy->a = 0; + entropy->ct = -16; /* force reading 2 initial bytes to fill C */ + + /* Initialize restart counter */ + entropy->restarts_to_go = cinfo->restart_interval; +} + + +/* + * Module initialization routine for arithmetic entropy decoding. + */ + +GLOBAL(void) +jinit_arith_decoder (j_decompress_ptr cinfo) +{ + arith_entropy_ptr entropy; + int i; + + entropy = (arith_entropy_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(arith_entropy_decoder)); + cinfo->entropy = (struct jpeg_entropy_decoder *) entropy; + entropy->pub.start_pass = start_pass; + + /* Mark tables unallocated */ + for (i = 0; i < NUM_ARITH_TBLS; i++) { + entropy->dc_stats[i] = NULL; + entropy->ac_stats[i] = NULL; + } + + /* Initialize index for fixed probability estimation */ + entropy->fixed_bin[0] = 113; + + if (cinfo->progressive_mode) { + /* Create progression status table */ + int *coef_bit_ptr, ci; + cinfo->coef_bits = (int (*)[DCTSIZE2]) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->num_components*DCTSIZE2*SIZEOF(int)); + coef_bit_ptr = & cinfo->coef_bits[0][0]; + for (ci = 0; ci < cinfo->num_components; ci++) + for (i = 0; i < DCTSIZE2; i++) + *coef_bit_ptr++ = -1; + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatadst.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatadst.c new file mode 100644 index 000000000..4f29f8163 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatadst.c @@ -0,0 +1,268 @@ +/* + * jdatadst.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * Modified 2009-2012 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains compression data destination routines for the case of + * emitting JPEG data to memory or to a file (or any stdio stream). + * While these routines are sufficient for most applications, + * some will want to use a different destination manager. + * IMPORTANT: we assume that fwrite() will correctly transcribe an array of + * JOCTETs into 8-bit-wide elements on external storage. If char is wider + * than 8 bits on your machine, you may need to do some tweaking. + */ + +/* this is not a core library module, so it doesn't define JPEG_INTERNALS */ +#include "jinclude.h" +#include "jpeglib.h" +#include "jerror.h" + +#ifndef HAVE_STDLIB_H /* should declare malloc(),free() */ +extern void * malloc JPP((size_t size)); +extern void free JPP((void *ptr)); +#endif + + +/* Expanded data destination object for stdio output */ + +typedef struct { + struct jpeg_destination_mgr pub; /* public fields */ +#ifdef JFILE + JFILE * outfile; /* target stream */ +#endif /* JFILE */ + JOCTET * buffer; /* start of buffer */ +} my_destination_mgr; + +typedef my_destination_mgr * my_dest_ptr; + + +#define OUTPUT_BUF_SIZE 4096 /* choose an efficiently fwrite'able size */ + + +/* Expanded data destination object for memory output */ + +typedef struct { + struct jpeg_destination_mgr pub; /* public fields */ + + unsigned char ** outbuffer; /* target buffer */ + unsigned long * outsize; + unsigned char * newbuffer; /* newly allocated buffer */ + JOCTET * buffer; /* start of buffer */ + size_t bufsize; +} my_mem_destination_mgr; + +typedef my_mem_destination_mgr * my_mem_dest_ptr; + + +/* + * Initialize destination --- called by jpeg_start_compress + * before any data is actually written. + */ +#ifdef JFILE +METHODDEF(void) +init_destination (j_compress_ptr cinfo) +{ + my_dest_ptr dest = (my_dest_ptr) cinfo->dest; + + /* Allocate the output buffer --- it will be released when done with image */ + dest->buffer = (JOCTET *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + OUTPUT_BUF_SIZE * SIZEOF(JOCTET)); + + dest->pub.next_output_byte = dest->buffer; + dest->pub.free_in_buffer = OUTPUT_BUF_SIZE; +} +#endif /* JFILE */ + +METHODDEF(void) +init_mem_destination (j_compress_ptr cinfo) +{ + /* no work necessary here */ +} + + +/* + * Empty the output buffer --- called whenever buffer fills up. + * + * In typical applications, this should write the entire output buffer + * (ignoring the current state of next_output_byte & free_in_buffer), + * reset the pointer & count to the start of the buffer, and return TRUE + * indicating that the buffer has been dumped. + * + * In applications that need to be able to suspend compression due to output + * overrun, a FALSE return indicates that the buffer cannot be emptied now. + * In this situation, the compressor will return to its caller (possibly with + * an indication that it has not accepted all the supplied scanlines). The + * application should resume compression after it has made more room in the + * output buffer. Note that there are substantial restrictions on the use of + * suspension --- see the documentation. + * + * When suspending, the compressor will back up to a convenient restart point + * (typically the start of the current MCU). next_output_byte & free_in_buffer + * indicate where the restart point will be if the current call returns FALSE. + * Data beyond this point will be regenerated after resumption, so do not + * write it out when emptying the buffer externally. + */ +#ifdef JFILE +METHODDEF(boolean) +empty_output_buffer (j_compress_ptr cinfo) +{ + my_dest_ptr dest = (my_dest_ptr) cinfo->dest; + + if (JFWRITE(dest->outfile, dest->buffer, OUTPUT_BUF_SIZE) != + (size_t) OUTPUT_BUF_SIZE) + ERREXIT(cinfo, JERR_FILE_WRITE); + + dest->pub.next_output_byte = dest->buffer; + dest->pub.free_in_buffer = OUTPUT_BUF_SIZE; + + return TRUE; +} +#endif /* JFILE */ + +METHODDEF(boolean) +empty_mem_output_buffer (j_compress_ptr cinfo) +{ + size_t nextsize; + JOCTET * nextbuffer; + my_mem_dest_ptr dest = (my_mem_dest_ptr) cinfo->dest; + + /* Try to allocate new buffer with double size */ + nextsize = dest->bufsize * 2; + nextbuffer = (JOCTET *) malloc(nextsize); + + if (nextbuffer == NULL) + ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 10); + + MEMCOPY(nextbuffer, dest->buffer, dest->bufsize); + + if (dest->newbuffer != NULL) + free(dest->newbuffer); + + dest->newbuffer = nextbuffer; + + dest->pub.next_output_byte = nextbuffer + dest->bufsize; + dest->pub.free_in_buffer = dest->bufsize; + + dest->buffer = nextbuffer; + dest->bufsize = nextsize; + + return TRUE; +} + + +/* + * Terminate destination --- called by jpeg_finish_compress + * after all data has been written. Usually needs to flush buffer. + * + * NB: *not* called by jpeg_abort or jpeg_destroy; surrounding + * application must deal with any cleanup that should happen even + * for error exit. + */ +#ifdef JFILE +METHODDEF(void) +term_destination (j_compress_ptr cinfo) +{ + my_dest_ptr dest = (my_dest_ptr) cinfo->dest; + size_t datacount = OUTPUT_BUF_SIZE - dest->pub.free_in_buffer; + + /* Write any data remaining in the buffer */ + if (datacount > 0) { + if (JFWRITE(dest->outfile, dest->buffer, datacount) != datacount) + ERREXIT(cinfo, JERR_FILE_WRITE); + } +} +#endif /* JFILE */ + +METHODDEF(void) +term_mem_destination (j_compress_ptr cinfo) +{ + my_mem_dest_ptr dest = (my_mem_dest_ptr) cinfo->dest; + + *dest->outbuffer = dest->buffer; + *dest->outsize = dest->bufsize - dest->pub.free_in_buffer; +} + + +/* + * Prepare for output to a stdio stream. + * The caller must have already opened the stream, and is responsible + * for closing it after finishing compression. + */ +#ifdef JFILE +GLOBAL(void) +jpeg_stdio_dest (j_compress_ptr cinfo, JFILE * outfile) +{ + my_dest_ptr dest; + + /* The destination object is made permanent so that multiple JPEG images + * can be written to the same file without re-executing jpeg_stdio_dest. + * This makes it dangerous to use this manager and a different destination + * manager serially with the same JPEG object, because their private object + * sizes may be different. Caveat programmer. + */ + if (cinfo->dest == NULL) { /* first time for this JPEG object? */ + cinfo->dest = (struct jpeg_destination_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(my_destination_mgr)); + } + + dest = (my_dest_ptr) cinfo->dest; + dest->pub.init_destination = init_destination; + dest->pub.empty_output_buffer = empty_output_buffer; + dest->pub.term_destination = term_destination; + dest->outfile = outfile; +} +#endif /* JFILE */ + +/* + * Prepare for output to a memory buffer. + * The caller may supply an own initial buffer with appropriate size. + * Otherwise, or when the actual data output exceeds the given size, + * the library adapts the buffer size as necessary. + * The standard library functions malloc/free are used for allocating + * larger memory, so the buffer is available to the application after + * finishing compression, and then the application is responsible for + * freeing the requested memory. + */ + +GLOBAL(void) +jpeg_mem_dest (j_compress_ptr cinfo, + unsigned char ** outbuffer, unsigned long * outsize) +{ + my_mem_dest_ptr dest; + + if (outbuffer == NULL || outsize == NULL) /* sanity check */ + ERREXIT(cinfo, JERR_BUFFER_SIZE); + + /* The destination object is made permanent so that multiple JPEG images + * can be written to the same buffer without re-executing jpeg_mem_dest. + */ + if (cinfo->dest == NULL) { /* first time for this JPEG object? */ + cinfo->dest = (struct jpeg_destination_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(my_mem_destination_mgr)); + } + + dest = (my_mem_dest_ptr) cinfo->dest; + dest->pub.init_destination = init_mem_destination; + dest->pub.empty_output_buffer = empty_mem_output_buffer; + dest->pub.term_destination = term_mem_destination; + dest->outbuffer = outbuffer; + dest->outsize = outsize; + dest->newbuffer = NULL; + + if (*outbuffer == NULL || *outsize == 0) { + /* Allocate initial buffer */ + dest->newbuffer = *outbuffer = (unsigned char *) malloc(OUTPUT_BUF_SIZE); + if (dest->newbuffer == NULL) + ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 10); + *outsize = OUTPUT_BUF_SIZE; + } + + dest->pub.next_output_byte = dest->buffer = *outbuffer; + dest->pub.free_in_buffer = dest->bufsize = *outsize; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatasrc.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatasrc.c new file mode 100644 index 000000000..4aa06b256 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdatasrc.c @@ -0,0 +1,278 @@ +/* + * jdatasrc.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * Modified 2009-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains decompression data source routines for the case of + * reading JPEG data from memory or from a file (or any stdio stream). + * While these routines are sufficient for most applications, + * some will want to use a different source manager. + * IMPORTANT: we assume that fread() will correctly transcribe an array of + * JOCTETs from 8-bit-wide elements on external storage. If char is wider + * than 8 bits on your machine, you may need to do some tweaking. + */ + +/* this is not a core library module, so it doesn't define JPEG_INTERNALS */ +#include "jinclude.h" +#include "jpeglib.h" +#include "jerror.h" + + +/* Expanded data source object for stdio input */ +#ifdef JFILE +typedef struct { + struct jpeg_source_mgr pub; /* public fields */ + + JFILE * infile; /* source stream */ + JOCTET * buffer; /* start of buffer */ + boolean start_of_file; /* have we gotten any data yet? */ +} my_source_mgr; + +typedef my_source_mgr * my_src_ptr; +#endif /* 0 */ + +#define INPUT_BUF_SIZE 4096 /* choose an efficiently fread'able size */ + + +/* + * Initialize source --- called by jpeg_read_header + * before any data is actually read. + */ +#ifdef JFILE +METHODDEF(void) +init_source (j_decompress_ptr cinfo) +{ + my_src_ptr src = (my_src_ptr) cinfo->src; + + /* We reset the empty-input-file flag for each image, + * but we don't clear the input buffer. + * This is correct behavior for reading a series of images from one source. + */ + src->start_of_file = TRUE; +} +#endif /* 0 */ + +METHODDEF(void) +init_mem_source (j_decompress_ptr cinfo) +{ + /* no work necessary here */ +} + + +/* + * Fill the input buffer --- called whenever buffer is emptied. + * + * In typical applications, this should read fresh data into the buffer + * (ignoring the current state of next_input_byte & bytes_in_buffer), + * reset the pointer & count to the start of the buffer, and return TRUE + * indicating that the buffer has been reloaded. It is not necessary to + * fill the buffer entirely, only to obtain at least one more byte. + * + * There is no such thing as an EOF return. If the end of the file has been + * reached, the routine has a choice of ERREXIT() or inserting fake data into + * the buffer. In most cases, generating a warning message and inserting a + * fake EOI marker is the best course of action --- this will allow the + * decompressor to output however much of the image is there. However, + * the resulting error message is misleading if the real problem is an empty + * input file, so we handle that case specially. + * + * In applications that need to be able to suspend compression due to input + * not being available yet, a FALSE return indicates that no more data can be + * obtained right now, but more may be forthcoming later. In this situation, + * the decompressor will return to its caller (with an indication of the + * number of scanlines it has read, if any). The application should resume + * decompression after it has loaded more data into the input buffer. Note + * that there are substantial restrictions on the use of suspension --- see + * the documentation. + * + * When suspending, the decompressor will back up to a convenient restart point + * (typically the start of the current MCU). next_input_byte & bytes_in_buffer + * indicate where the restart point will be if the current call returns FALSE. + * Data beyond this point must be rescanned after resumption, so move it to + * the front of the buffer rather than discarding it. + */ +#ifdef JFILE +METHODDEF(boolean) +fill_input_buffer (j_decompress_ptr cinfo) +{ + my_src_ptr src = (my_src_ptr) cinfo->src; + size_t nbytes; + + nbytes = JFREAD(src->infile, src->buffer, INPUT_BUF_SIZE); + + if (nbytes <= 0) { + if (src->start_of_file) /* Treat empty input file as fatal error */ + ERREXIT(cinfo, JERR_INPUT_EMPTY); + WARNMS(cinfo, JWRN_JPEG_EOF); + /* Insert a fake EOI marker */ + src->buffer[0] = (JOCTET) 0xFF; + src->buffer[1] = (JOCTET) JPEG_EOI; + nbytes = 2; + } + + src->pub.next_input_byte = src->buffer; + src->pub.bytes_in_buffer = nbytes; + src->start_of_file = FALSE; + + return TRUE; +} +#endif /* JFILE */ + +METHODDEF(boolean) +fill_mem_input_buffer (j_decompress_ptr cinfo) +{ + static const JOCTET mybuffer[4] = { + (JOCTET) 0xFF, (JOCTET) JPEG_EOI, 0, 0 + }; + + /* The whole JPEG data is expected to reside in the supplied memory + * buffer, so any request for more data beyond the given buffer size + * is treated as an error. + */ + WARNMS(cinfo, JWRN_JPEG_EOF); + + /* Insert a fake EOI marker */ + + cinfo->src->next_input_byte = mybuffer; + cinfo->src->bytes_in_buffer = 2; + + return TRUE; +} + + +/* + * Skip data --- used to skip over a potentially large amount of + * uninteresting data (such as an APPn marker). + * + * Writers of suspendable-input applications must note that skip_input_data + * is not granted the right to give a suspension return. If the skip extends + * beyond the data currently in the buffer, the buffer can be marked empty so + * that the next read will cause a fill_input_buffer call that can suspend. + * Arranging for additional bytes to be discarded before reloading the input + * buffer is the application writer's problem. + */ + +METHODDEF(void) +skip_input_data (j_decompress_ptr cinfo, long num_bytes) +{ + struct jpeg_source_mgr * src = cinfo->src; + + /* Just a dumb implementation for now. Could use fseek() except + * it doesn't work on pipes. Not clear that being smart is worth + * any trouble anyway --- large skips are infrequent. + */ + if (num_bytes > 0) { + while (num_bytes > (long) src->bytes_in_buffer) { + num_bytes -= (long) src->bytes_in_buffer; + (void) (*src->fill_input_buffer) (cinfo); + /* note we assume that fill_input_buffer will never return FALSE, + * so suspension need not be handled. + */ + } + src->next_input_byte += (size_t) num_bytes; + src->bytes_in_buffer -= (size_t) num_bytes; + } +} + + +/* + * An additional method that can be provided by data source modules is the + * resync_to_restart method for error recovery in the presence of RST markers. + * For the moment, this source module just uses the default resync method + * provided by the JPEG library. That method assumes that no backtracking + * is possible. + */ + + +/* + * Terminate source --- called by jpeg_finish_decompress + * after all data has been read. Often a no-op. + * + * NB: *not* called by jpeg_abort or jpeg_destroy; surrounding + * application must deal with any cleanup that should happen even + * for error exit. + */ + +METHODDEF(void) +term_source (j_decompress_ptr cinfo) +{ + /* no work necessary here */ +} + + +/* + * Prepare for input from a stdio stream. + * The caller must have already opened the stream, and is responsible + * for closing it after finishing decompression. + */ +#ifdef JFILE +GLOBAL(void) +jpeg_stdio_src (j_decompress_ptr cinfo, JFILE * infile) +{ + my_src_ptr src; + + /* The source object and input buffer are made permanent so that a series + * of JPEG images can be read from the same file by calling jpeg_stdio_src + * only before the first one. (If we discarded the buffer at the end of + * one image, we'd likely lose the start of the next one.) + * This makes it unsafe to use this manager and a different source + * manager serially with the same JPEG object. Caveat programmer. + */ + if (cinfo->src == NULL) { /* first time for this JPEG object? */ + cinfo->src = (struct jpeg_source_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(my_source_mgr)); + src = (my_src_ptr) cinfo->src; + src->buffer = (JOCTET *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + INPUT_BUF_SIZE * SIZEOF(JOCTET)); + } + + src = (my_src_ptr) cinfo->src; + src->pub.init_source = init_source; + src->pub.fill_input_buffer = fill_input_buffer; + src->pub.skip_input_data = skip_input_data; + src->pub.resync_to_restart = jpeg_resync_to_restart; /* use default method */ + src->pub.term_source = term_source; + src->infile = infile; + src->pub.bytes_in_buffer = 0; /* forces fill_input_buffer on first read */ + src->pub.next_input_byte = NULL; /* until buffer loaded */ +} +#endif /* JFILE */ + +/* + * Prepare for input from a supplied memory buffer. + * The buffer must contain the whole JPEG data. + */ + +GLOBAL(void) +jpeg_mem_src (j_decompress_ptr cinfo, + unsigned char * inbuffer, unsigned long insize) +{ + struct jpeg_source_mgr * src; + + if (inbuffer == NULL || insize == 0) /* Treat empty input as fatal error */ + ERREXIT(cinfo, JERR_INPUT_EMPTY); + + /* The source object is made permanent so that a series of JPEG images + * can be read from the same buffer by calling jpeg_mem_src only before + * the first one. + */ + if (cinfo->src == NULL) { /* first time for this JPEG object? */ + cinfo->src = (struct jpeg_source_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(struct jpeg_source_mgr)); + } + + src = cinfo->src; + src->init_source = init_mem_source; + src->fill_input_buffer = fill_mem_input_buffer; + src->skip_input_data = skip_input_data; + src->resync_to_restart = jpeg_resync_to_restart; /* use default method */ + src->term_source = term_source; + src->bytes_in_buffer = (size_t) insize; + src->next_input_byte = (JOCTET *) inbuffer; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcoefct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcoefct.c new file mode 100644 index 000000000..ed02fc378 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcoefct.c @@ -0,0 +1,741 @@ +/* + * jdcoefct.c + * + * Copyright (C) 1994-1997, Thomas G. Lane. + * Modified 2002-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the coefficient buffer controller for decompression. + * This controller is the top level of the JPEG decompressor proper. + * The coefficient buffer lies between entropy decoding and inverse-DCT steps. + * + * In buffered-image mode, this controller is the interface between + * input-oriented processing and output-oriented processing. + * Also, the input side (only) is used when reading a file for transcoding. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + +/* Block smoothing is only applicable for progressive JPEG, so: */ +#ifndef D_PROGRESSIVE_SUPPORTED +#undef BLOCK_SMOOTHING_SUPPORTED +#endif + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_d_coef_controller pub; /* public fields */ + + /* These variables keep track of the current location of the input side. */ + /* cinfo->input_iMCU_row is also used for this. */ + JDIMENSION MCU_ctr; /* counts MCUs processed in current row */ + int MCU_vert_offset; /* counts MCU rows within iMCU row */ + int MCU_rows_per_iMCU_row; /* number of such rows needed */ + + /* The output side's location is represented by cinfo->output_iMCU_row. */ + + /* In single-pass modes, it's sufficient to buffer just one MCU. + * We allocate a workspace of D_MAX_BLOCKS_IN_MCU coefficient blocks, + * and let the entropy decoder write into that workspace each time. + * (On 80x86, the workspace is FAR even though it's not really very big; + * this is to keep the module interfaces unchanged when a large coefficient + * buffer is necessary.) + * In multi-pass modes, this array points to the current MCU's blocks + * within the virtual arrays; it is used only by the input side. + */ + JBLOCKROW MCU_buffer[D_MAX_BLOCKS_IN_MCU]; + +#ifdef D_MULTISCAN_FILES_SUPPORTED + /* In multi-pass modes, we need a virtual block array for each component. */ + jvirt_barray_ptr whole_image[MAX_COMPONENTS]; +#endif + +#ifdef BLOCK_SMOOTHING_SUPPORTED + /* When doing block smoothing, we latch coefficient Al values here */ + int * coef_bits_latch; +#define SAVED_COEFS 6 /* we save coef_bits[0..5] */ +#endif +} my_coef_controller; + +typedef my_coef_controller * my_coef_ptr; + +/* Forward declarations */ +METHODDEF(int) decompress_onepass + JPP((j_decompress_ptr cinfo, JSAMPIMAGE output_buf)); +#ifdef D_MULTISCAN_FILES_SUPPORTED +METHODDEF(int) decompress_data + JPP((j_decompress_ptr cinfo, JSAMPIMAGE output_buf)); +#endif +#ifdef BLOCK_SMOOTHING_SUPPORTED +LOCAL(boolean) smoothing_ok JPP((j_decompress_ptr cinfo)); +METHODDEF(int) decompress_smooth_data + JPP((j_decompress_ptr cinfo, JSAMPIMAGE output_buf)); +#endif + + +LOCAL(void) +start_iMCU_row (j_decompress_ptr cinfo) +/* Reset within-iMCU-row counters for a new row (input side) */ +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + /* In an interleaved scan, an MCU row is the same as an iMCU row. + * In a noninterleaved scan, an iMCU row has v_samp_factor MCU rows. + * But at the bottom of the image, process only what's left. + */ + if (cinfo->comps_in_scan > 1) { + coef->MCU_rows_per_iMCU_row = 1; + } else { + if (cinfo->input_iMCU_row < (cinfo->total_iMCU_rows-1)) + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->v_samp_factor; + else + coef->MCU_rows_per_iMCU_row = cinfo->cur_comp_info[0]->last_row_height; + } + + coef->MCU_ctr = 0; + coef->MCU_vert_offset = 0; +} + + +/* + * Initialize for an input processing pass. + */ + +METHODDEF(void) +start_input_pass (j_decompress_ptr cinfo) +{ + cinfo->input_iMCU_row = 0; + start_iMCU_row(cinfo); +} + + +/* + * Initialize for an output processing pass. + */ + +METHODDEF(void) +start_output_pass (j_decompress_ptr cinfo) +{ +#ifdef BLOCK_SMOOTHING_SUPPORTED + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + + /* If multipass, check to see whether to use block smoothing on this pass */ + if (coef->pub.coef_arrays != NULL) { + if (cinfo->do_block_smoothing && smoothing_ok(cinfo)) + coef->pub.decompress_data = decompress_smooth_data; + else + coef->pub.decompress_data = decompress_data; + } +#endif + cinfo->output_iMCU_row = 0; +} + + +/* + * Decompress and return some data in the single-pass case. + * Always attempts to emit one fully interleaved MCU row ("iMCU" row). + * Input and output must run in lockstep since we have only a one-MCU buffer. + * Return value is JPEG_ROW_COMPLETED, JPEG_SCAN_COMPLETED, or JPEG_SUSPENDED. + * + * NB: output_buf contains a plane for each component in image, + * which we index according to the component's SOF position. + */ + +METHODDEF(int) +decompress_onepass (j_decompress_ptr cinfo, JSAMPIMAGE output_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION MCU_col_num; /* index of current MCU within row */ + JDIMENSION last_MCU_col = cinfo->MCUs_per_row - 1; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + int blkn, ci, xindex, yindex, yoffset, useful_width; + JSAMPARRAY output_ptr; + JDIMENSION start_col, output_col; + jpeg_component_info *compptr; + inverse_DCT_method_ptr inverse_DCT; + + /* Loop to process as much as one whole iMCU row */ + for (yoffset = coef->MCU_vert_offset; yoffset < coef->MCU_rows_per_iMCU_row; + yoffset++) { + for (MCU_col_num = coef->MCU_ctr; MCU_col_num <= last_MCU_col; + MCU_col_num++) { + /* Try to fetch an MCU. Entropy decoder expects buffer to be zeroed. */ + if (cinfo->lim_Se) /* can bypass in DC only case */ + FMEMZERO((void FAR *) coef->MCU_buffer[0], + (size_t) (cinfo->blocks_in_MCU * SIZEOF(JBLOCK))); + if (! (*cinfo->entropy->decode_mcu) (cinfo, coef->MCU_buffer)) { + /* Suspension forced; update state counters and exit */ + coef->MCU_vert_offset = yoffset; + coef->MCU_ctr = MCU_col_num; + return JPEG_SUSPENDED; + } + /* Determine where data should go in output_buf and do the IDCT thing. + * We skip dummy blocks at the right and bottom edges (but blkn gets + * incremented past them!). Note the inner loop relies on having + * allocated the MCU_buffer[] blocks sequentially. + */ + blkn = 0; /* index of current DCT block within MCU */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* Don't bother to IDCT an uninteresting component. */ + if (! compptr->component_needed) { + blkn += compptr->MCU_blocks; + continue; + } + inverse_DCT = cinfo->idct->inverse_DCT[compptr->component_index]; + useful_width = (MCU_col_num < last_MCU_col) ? compptr->MCU_width + : compptr->last_col_width; + output_ptr = output_buf[compptr->component_index] + + yoffset * compptr->DCT_v_scaled_size; + start_col = MCU_col_num * compptr->MCU_sample_width; + for (yindex = 0; yindex < compptr->MCU_height; yindex++) { + if (cinfo->input_iMCU_row < last_iMCU_row || + yoffset+yindex < compptr->last_row_height) { + output_col = start_col; + for (xindex = 0; xindex < useful_width; xindex++) { + (*inverse_DCT) (cinfo, compptr, + (JCOEFPTR) coef->MCU_buffer[blkn+xindex], + output_ptr, output_col); + output_col += compptr->DCT_h_scaled_size; + } + } + blkn += compptr->MCU_width; + output_ptr += compptr->DCT_v_scaled_size; + } + } + } + /* Completed an MCU row, but perhaps not an iMCU row */ + coef->MCU_ctr = 0; + } + /* Completed the iMCU row, advance counters for next one */ + cinfo->output_iMCU_row++; + if (++(cinfo->input_iMCU_row) < cinfo->total_iMCU_rows) { + start_iMCU_row(cinfo); + return JPEG_ROW_COMPLETED; + } + /* Completed the scan */ + (*cinfo->inputctl->finish_input_pass) (cinfo); + return JPEG_SCAN_COMPLETED; +} + + +/* + * Dummy consume-input routine for single-pass operation. + */ + +METHODDEF(int) +dummy_consume_data (j_decompress_ptr cinfo) +{ + return JPEG_SUSPENDED; /* Always indicate nothing was done */ +} + + +#ifdef D_MULTISCAN_FILES_SUPPORTED + +/* + * Consume input data and store it in the full-image coefficient buffer. + * We read as much as one fully interleaved MCU row ("iMCU" row) per call, + * ie, v_samp_factor block rows for each component in the scan. + * Return value is JPEG_ROW_COMPLETED, JPEG_SCAN_COMPLETED, or JPEG_SUSPENDED. + */ + +METHODDEF(int) +consume_data (j_decompress_ptr cinfo) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION MCU_col_num; /* index of current MCU within row */ + int blkn, ci, xindex, yindex, yoffset; + JDIMENSION start_col; + JBLOCKARRAY buffer[MAX_COMPS_IN_SCAN]; + JBLOCKROW buffer_ptr; + jpeg_component_info *compptr; + + /* Align the virtual buffers for the components used in this scan. */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + buffer[ci] = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[compptr->component_index], + cinfo->input_iMCU_row * compptr->v_samp_factor, + (JDIMENSION) compptr->v_samp_factor, TRUE); + /* Note: entropy decoder expects buffer to be zeroed, + * but this is handled automatically by the memory manager + * because we requested a pre-zeroed array. + */ + } + + /* Loop to process one whole iMCU row */ + for (yoffset = coef->MCU_vert_offset; yoffset < coef->MCU_rows_per_iMCU_row; + yoffset++) { + for (MCU_col_num = coef->MCU_ctr; MCU_col_num < cinfo->MCUs_per_row; + MCU_col_num++) { + /* Construct list of pointers to DCT blocks belonging to this MCU */ + blkn = 0; /* index of current DCT block within MCU */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + start_col = MCU_col_num * compptr->MCU_width; + for (yindex = 0; yindex < compptr->MCU_height; yindex++) { + buffer_ptr = buffer[ci][yindex+yoffset] + start_col; + for (xindex = 0; xindex < compptr->MCU_width; xindex++) { + coef->MCU_buffer[blkn++] = buffer_ptr++; + } + } + } + /* Try to fetch the MCU. */ + if (! (*cinfo->entropy->decode_mcu) (cinfo, coef->MCU_buffer)) { + /* Suspension forced; update state counters and exit */ + coef->MCU_vert_offset = yoffset; + coef->MCU_ctr = MCU_col_num; + return JPEG_SUSPENDED; + } + } + /* Completed an MCU row, but perhaps not an iMCU row */ + coef->MCU_ctr = 0; + } + /* Completed the iMCU row, advance counters for next one */ + if (++(cinfo->input_iMCU_row) < cinfo->total_iMCU_rows) { + start_iMCU_row(cinfo); + return JPEG_ROW_COMPLETED; + } + /* Completed the scan */ + (*cinfo->inputctl->finish_input_pass) (cinfo); + return JPEG_SCAN_COMPLETED; +} + + +/* + * Decompress and return some data in the multi-pass case. + * Always attempts to emit one fully interleaved MCU row ("iMCU" row). + * Return value is JPEG_ROW_COMPLETED, JPEG_SCAN_COMPLETED, or JPEG_SUSPENDED. + * + * NB: output_buf contains a plane for each component in image. + */ + +METHODDEF(int) +decompress_data (j_decompress_ptr cinfo, JSAMPIMAGE output_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + JDIMENSION block_num; + int ci, block_row, block_rows; + JBLOCKARRAY buffer; + JBLOCKROW buffer_ptr; + JSAMPARRAY output_ptr; + JDIMENSION output_col; + jpeg_component_info *compptr; + inverse_DCT_method_ptr inverse_DCT; + + /* Force some input to be done if we are getting ahead of the input. */ + while (cinfo->input_scan_number < cinfo->output_scan_number || + (cinfo->input_scan_number == cinfo->output_scan_number && + cinfo->input_iMCU_row <= cinfo->output_iMCU_row)) { + if ((*cinfo->inputctl->consume_input)(cinfo) == JPEG_SUSPENDED) + return JPEG_SUSPENDED; + } + + /* OK, output from the virtual arrays. */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Don't bother to IDCT an uninteresting component. */ + if (! compptr->component_needed) + continue; + /* Align the virtual buffer for this component. */ + buffer = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[ci], + cinfo->output_iMCU_row * compptr->v_samp_factor, + (JDIMENSION) compptr->v_samp_factor, FALSE); + /* Count non-dummy DCT block rows in this iMCU row. */ + if (cinfo->output_iMCU_row < last_iMCU_row) + block_rows = compptr->v_samp_factor; + else { + /* NB: can't use last_row_height here; it is input-side-dependent! */ + block_rows = (int) (compptr->height_in_blocks % compptr->v_samp_factor); + if (block_rows == 0) block_rows = compptr->v_samp_factor; + } + inverse_DCT = cinfo->idct->inverse_DCT[ci]; + output_ptr = output_buf[ci]; + /* Loop over all DCT blocks to be processed. */ + for (block_row = 0; block_row < block_rows; block_row++) { + buffer_ptr = buffer[block_row]; + output_col = 0; + for (block_num = 0; block_num < compptr->width_in_blocks; block_num++) { + (*inverse_DCT) (cinfo, compptr, (JCOEFPTR) buffer_ptr, + output_ptr, output_col); + buffer_ptr++; + output_col += compptr->DCT_h_scaled_size; + } + output_ptr += compptr->DCT_v_scaled_size; + } + } + + if (++(cinfo->output_iMCU_row) < cinfo->total_iMCU_rows) + return JPEG_ROW_COMPLETED; + return JPEG_SCAN_COMPLETED; +} + +#endif /* D_MULTISCAN_FILES_SUPPORTED */ + + +#ifdef BLOCK_SMOOTHING_SUPPORTED + +/* + * This code applies interblock smoothing as described by section K.8 + * of the JPEG standard: the first 5 AC coefficients are estimated from + * the DC values of a DCT block and its 8 neighboring blocks. + * We apply smoothing only for progressive JPEG decoding, and only if + * the coefficients it can estimate are not yet known to full precision. + */ + +/* Natural-order array positions of the first 5 zigzag-order coefficients */ +#define Q01_POS 1 +#define Q10_POS 8 +#define Q20_POS 16 +#define Q11_POS 9 +#define Q02_POS 2 + +/* + * Determine whether block smoothing is applicable and safe. + * We also latch the current states of the coef_bits[] entries for the + * AC coefficients; otherwise, if the input side of the decompressor + * advances into a new scan, we might think the coefficients are known + * more accurately than they really are. + */ + +LOCAL(boolean) +smoothing_ok (j_decompress_ptr cinfo) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + boolean smoothing_useful = FALSE; + int ci, coefi; + jpeg_component_info *compptr; + JQUANT_TBL * qtable; + int * coef_bits; + int * coef_bits_latch; + + if (! cinfo->progressive_mode || cinfo->coef_bits == NULL) + return FALSE; + + /* Allocate latch area if not already done */ + if (coef->coef_bits_latch == NULL) + coef->coef_bits_latch = (int *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->num_components * + (SAVED_COEFS * SIZEOF(int))); + coef_bits_latch = coef->coef_bits_latch; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* All components' quantization values must already be latched. */ + if ((qtable = compptr->quant_table) == NULL) + return FALSE; + /* Verify DC & first 5 AC quantizers are nonzero to avoid zero-divide. */ + if (qtable->quantval[0] == 0 || + qtable->quantval[Q01_POS] == 0 || + qtable->quantval[Q10_POS] == 0 || + qtable->quantval[Q20_POS] == 0 || + qtable->quantval[Q11_POS] == 0 || + qtable->quantval[Q02_POS] == 0) + return FALSE; + /* DC values must be at least partly known for all components. */ + coef_bits = cinfo->coef_bits[ci]; + if (coef_bits[0] < 0) + return FALSE; + /* Block smoothing is helpful if some AC coefficients remain inaccurate. */ + for (coefi = 1; coefi <= 5; coefi++) { + coef_bits_latch[coefi] = coef_bits[coefi]; + if (coef_bits[coefi] != 0) + smoothing_useful = TRUE; + } + coef_bits_latch += SAVED_COEFS; + } + + return smoothing_useful; +} + + +/* + * Variant of decompress_data for use when doing block smoothing. + */ + +METHODDEF(int) +decompress_smooth_data (j_decompress_ptr cinfo, JSAMPIMAGE output_buf) +{ + my_coef_ptr coef = (my_coef_ptr) cinfo->coef; + JDIMENSION last_iMCU_row = cinfo->total_iMCU_rows - 1; + JDIMENSION block_num, last_block_column; + int ci, block_row, block_rows, access_rows; + JBLOCKARRAY buffer; + JBLOCKROW buffer_ptr, prev_block_row, next_block_row; + JSAMPARRAY output_ptr; + JDIMENSION output_col; + jpeg_component_info *compptr; + inverse_DCT_method_ptr inverse_DCT; + boolean first_row, last_row; + JBLOCK workspace; + int *coef_bits; + JQUANT_TBL *quanttbl; + INT32 Q00,Q01,Q02,Q10,Q11,Q20, num; + int DC1,DC2,DC3,DC4,DC5,DC6,DC7,DC8,DC9; + int Al, pred; + + /* Force some input to be done if we are getting ahead of the input. */ + while (cinfo->input_scan_number <= cinfo->output_scan_number && + ! cinfo->inputctl->eoi_reached) { + if (cinfo->input_scan_number == cinfo->output_scan_number) { + /* If input is working on current scan, we ordinarily want it to + * have completed the current row. But if input scan is DC, + * we want it to keep one row ahead so that next block row's DC + * values are up to date. + */ + JDIMENSION delta = (cinfo->Ss == 0) ? 1 : 0; + if (cinfo->input_iMCU_row > cinfo->output_iMCU_row+delta) + break; + } + if ((*cinfo->inputctl->consume_input)(cinfo) == JPEG_SUSPENDED) + return JPEG_SUSPENDED; + } + + /* OK, output from the virtual arrays. */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Don't bother to IDCT an uninteresting component. */ + if (! compptr->component_needed) + continue; + /* Count non-dummy DCT block rows in this iMCU row. */ + if (cinfo->output_iMCU_row < last_iMCU_row) { + block_rows = compptr->v_samp_factor; + access_rows = block_rows * 2; /* this and next iMCU row */ + last_row = FALSE; + } else { + /* NB: can't use last_row_height here; it is input-side-dependent! */ + block_rows = (int) (compptr->height_in_blocks % compptr->v_samp_factor); + if (block_rows == 0) block_rows = compptr->v_samp_factor; + access_rows = block_rows; /* this iMCU row only */ + last_row = TRUE; + } + /* Align the virtual buffer for this component. */ + if (cinfo->output_iMCU_row > 0) { + access_rows += compptr->v_samp_factor; /* prior iMCU row too */ + buffer = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[ci], + (cinfo->output_iMCU_row - 1) * compptr->v_samp_factor, + (JDIMENSION) access_rows, FALSE); + buffer += compptr->v_samp_factor; /* point to current iMCU row */ + first_row = FALSE; + } else { + buffer = (*cinfo->mem->access_virt_barray) + ((j_common_ptr) cinfo, coef->whole_image[ci], + (JDIMENSION) 0, (JDIMENSION) access_rows, FALSE); + first_row = TRUE; + } + /* Fetch component-dependent info */ + coef_bits = coef->coef_bits_latch + (ci * SAVED_COEFS); + quanttbl = compptr->quant_table; + Q00 = quanttbl->quantval[0]; + Q01 = quanttbl->quantval[Q01_POS]; + Q10 = quanttbl->quantval[Q10_POS]; + Q20 = quanttbl->quantval[Q20_POS]; + Q11 = quanttbl->quantval[Q11_POS]; + Q02 = quanttbl->quantval[Q02_POS]; + inverse_DCT = cinfo->idct->inverse_DCT[ci]; + output_ptr = output_buf[ci]; + /* Loop over all DCT blocks to be processed. */ + for (block_row = 0; block_row < block_rows; block_row++) { + buffer_ptr = buffer[block_row]; + if (first_row && block_row == 0) + prev_block_row = buffer_ptr; + else + prev_block_row = buffer[block_row-1]; + if (last_row && block_row == block_rows-1) + next_block_row = buffer_ptr; + else + next_block_row = buffer[block_row+1]; + /* We fetch the surrounding DC values using a sliding-register approach. + * Initialize all nine here so as to do the right thing on narrow pics. + */ + DC1 = DC2 = DC3 = (int) prev_block_row[0][0]; + DC4 = DC5 = DC6 = (int) buffer_ptr[0][0]; + DC7 = DC8 = DC9 = (int) next_block_row[0][0]; + output_col = 0; + last_block_column = compptr->width_in_blocks - 1; + for (block_num = 0; block_num <= last_block_column; block_num++) { + /* Fetch current DCT block into workspace so we can modify it. */ + jcopy_block_row(buffer_ptr, (JBLOCKROW) workspace, (JDIMENSION) 1); + /* Update DC values */ + if (block_num < last_block_column) { + DC3 = (int) prev_block_row[1][0]; + DC6 = (int) buffer_ptr[1][0]; + DC9 = (int) next_block_row[1][0]; + } + /* Compute coefficient estimates per K.8. + * An estimate is applied only if coefficient is still zero, + * and is not known to be fully accurate. + */ + /* AC01 */ + if ((Al=coef_bits[1]) != 0 && workspace[1] == 0) { + num = 36 * Q00 * (DC4 - DC6); + if (num >= 0) { + pred = (int) (((Q01<<7) + num) / (Q01<<8)); + if (Al > 0 && pred >= (1< 0 && pred >= (1<= 0) { + pred = (int) (((Q10<<7) + num) / (Q10<<8)); + if (Al > 0 && pred >= (1< 0 && pred >= (1<= 0) { + pred = (int) (((Q20<<7) + num) / (Q20<<8)); + if (Al > 0 && pred >= (1< 0 && pred >= (1<= 0) { + pred = (int) (((Q11<<7) + num) / (Q11<<8)); + if (Al > 0 && pred >= (1< 0 && pred >= (1<= 0) { + pred = (int) (((Q02<<7) + num) / (Q02<<8)); + if (Al > 0 && pred >= (1< 0 && pred >= (1<DCT_h_scaled_size; + } + output_ptr += compptr->DCT_v_scaled_size; + } + } + + if (++(cinfo->output_iMCU_row) < cinfo->total_iMCU_rows) + return JPEG_ROW_COMPLETED; + return JPEG_SCAN_COMPLETED; +} + +#endif /* BLOCK_SMOOTHING_SUPPORTED */ + + +/* + * Initialize coefficient buffer controller. + */ + +GLOBAL(void) +jinit_d_coef_controller (j_decompress_ptr cinfo, boolean need_full_buffer) +{ + my_coef_ptr coef; + + coef = (my_coef_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_coef_controller)); + cinfo->coef = (struct jpeg_d_coef_controller *) coef; + coef->pub.start_input_pass = start_input_pass; + coef->pub.start_output_pass = start_output_pass; +#ifdef BLOCK_SMOOTHING_SUPPORTED + coef->coef_bits_latch = NULL; +#endif + + /* Create the coefficient buffer. */ + if (need_full_buffer) { +#ifdef D_MULTISCAN_FILES_SUPPORTED + /* Allocate a full-image virtual array for each component, */ + /* padded to a multiple of samp_factor DCT blocks in each direction. */ + /* Note we ask for a pre-zeroed array. */ + int ci, access_rows; + jpeg_component_info *compptr; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + access_rows = compptr->v_samp_factor; +#ifdef BLOCK_SMOOTHING_SUPPORTED + /* If block smoothing could be used, need a bigger window */ + if (cinfo->progressive_mode) + access_rows *= 3; +#endif + coef->whole_image[ci] = (*cinfo->mem->request_virt_barray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, TRUE, + (JDIMENSION) jround_up((long) compptr->width_in_blocks, + (long) compptr->h_samp_factor), + (JDIMENSION) jround_up((long) compptr->height_in_blocks, + (long) compptr->v_samp_factor), + (JDIMENSION) access_rows); + } + coef->pub.consume_data = consume_data; + coef->pub.decompress_data = decompress_data; + coef->pub.coef_arrays = coef->whole_image; /* link to virtual arrays */ +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } else { + /* We only need a single-MCU buffer. */ + JBLOCKROW buffer; + int i; + + buffer = (JBLOCKROW) + (*cinfo->mem->alloc_large) ((j_common_ptr) cinfo, JPOOL_IMAGE, + D_MAX_BLOCKS_IN_MCU * SIZEOF(JBLOCK)); + for (i = 0; i < D_MAX_BLOCKS_IN_MCU; i++) { + coef->MCU_buffer[i] = buffer + i; + } + if (cinfo->lim_Se == 0) /* DC only case: want to bypass later */ + FMEMZERO((void FAR *) buffer, + (size_t) (D_MAX_BLOCKS_IN_MCU * SIZEOF(JBLOCK))); + coef->pub.consume_data = dummy_consume_data; + coef->pub.decompress_data = decompress_onepass; + coef->pub.coef_arrays = NULL; /* flag for no virtual arrays */ + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcolor.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcolor.c new file mode 100644 index 000000000..83e4d069a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdcolor.c @@ -0,0 +1,512 @@ +/* + * jdcolor.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains output colorspace conversion routines. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private subobject */ + +typedef struct { + struct jpeg_color_deconverter pub; /* public fields */ + + /* Private state for YCC->RGB conversion */ + int * Cr_r_tab; /* => table for Cr to R conversion */ + int * Cb_b_tab; /* => table for Cb to B conversion */ + INT32 * Cr_g_tab; /* => table for Cr to G conversion */ + INT32 * Cb_g_tab; /* => table for Cb to G conversion */ + + /* Private state for RGB->Y conversion */ + INT32 * rgb_y_tab; /* => table for RGB to Y conversion */ +} my_color_deconverter; + +typedef my_color_deconverter * my_cconvert_ptr; + + +/**************** YCbCr -> RGB conversion: most common case **************/ +/**************** RGB -> Y conversion: less common case **************/ + +/* + * YCbCr is defined per CCIR 601-1, except that Cb and Cr are + * normalized to the range 0..MAXJSAMPLE rather than -0.5 .. 0.5. + * The conversion equations to be implemented are therefore + * + * R = Y + 1.40200 * Cr + * G = Y - 0.34414 * Cb - 0.71414 * Cr + * B = Y + 1.77200 * Cb + * + * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B + * + * where Cb and Cr represent the incoming values less CENTERJSAMPLE. + * (These numbers are derived from TIFF 6.0 section 21, dated 3-June-92.) + * + * To avoid floating-point arithmetic, we represent the fractional constants + * as integers scaled up by 2^16 (about 4 digits precision); we have to divide + * the products by 2^16, with appropriate rounding, to get the correct answer. + * Notice that Y, being an integral input, does not contribute any fraction + * so it need not participate in the rounding. + * + * For even more speed, we avoid doing any multiplications in the inner loop + * by precalculating the constants times Cb and Cr for all possible values. + * For 8-bit JSAMPLEs this is very reasonable (only 256 entries per table); + * for 12-bit samples it is still acceptable. It's not very reasonable for + * 16-bit samples, but if you want lossless storage you shouldn't be changing + * colorspace anyway. + * The Cr=>R and Cb=>B values can be rounded to integers in advance; the + * values for the G calculation are left scaled up, since we must add them + * together before rounding. + */ + +#define SCALEBITS 16 /* speediest right-shift on some machines */ +#define ONE_HALF ((INT32) 1 << (SCALEBITS-1)) +#define FIX(x) ((INT32) ((x) * (1L<Y conversion and divide it up into + * three parts, instead of doing three alloc_small requests. This lets us + * use a single table base address, which can be held in a register in the + * inner loops on many machines (more than can hold all three addresses, + * anyway). + */ + +#define R_Y_OFF 0 /* offset to R => Y section */ +#define G_Y_OFF (1*(MAXJSAMPLE+1)) /* offset to G => Y section */ +#define B_Y_OFF (2*(MAXJSAMPLE+1)) /* etc. */ +#define TABLE_SIZE (3*(MAXJSAMPLE+1)) + + +/* + * Initialize tables for YCC->RGB colorspace conversion. + */ + +LOCAL(void) +build_ycc_rgb_table (j_decompress_ptr cinfo) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + int i; + INT32 x; + SHIFT_TEMPS + + cconvert->Cr_r_tab = (int *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(int)); + cconvert->Cb_b_tab = (int *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(int)); + cconvert->Cr_g_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(INT32)); + cconvert->Cb_g_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(INT32)); + + for (i = 0, x = -CENTERJSAMPLE; i <= MAXJSAMPLE; i++, x++) { + /* i is the actual input pixel value, in the range 0..MAXJSAMPLE */ + /* The Cb or Cr value we are thinking of is x = i - CENTERJSAMPLE */ + /* Cr=>R value is nearest int to 1.40200 * x */ + cconvert->Cr_r_tab[i] = (int) + RIGHT_SHIFT(FIX(1.40200) * x + ONE_HALF, SCALEBITS); + /* Cb=>B value is nearest int to 1.77200 * x */ + cconvert->Cb_b_tab[i] = (int) + RIGHT_SHIFT(FIX(1.77200) * x + ONE_HALF, SCALEBITS); + /* Cr=>G value is scaled-up -0.71414 * x */ + cconvert->Cr_g_tab[i] = (- FIX(0.71414)) * x; + /* Cb=>G value is scaled-up -0.34414 * x */ + /* We also add in ONE_HALF so that need not do it in inner loop */ + cconvert->Cb_g_tab[i] = (- FIX(0.34414)) * x + ONE_HALF; + } +} + + +/* + * Convert some rows of samples to the output colorspace. + * + * Note that we change from noninterleaved, one-plane-per-component format + * to interleaved-pixel format. The output buffer is therefore three times + * as wide as the input buffer. + * A starting row offset is provided only for the input buffer. The caller + * can easily adjust the passed output_buf value to accommodate any row + * offset required on that side. + */ + +METHODDEF(void) +ycc_rgb_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int y, cb, cr; + register JSAMPROW outptr; + register JSAMPROW inptr0, inptr1, inptr2; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->output_width; + /* copy these pointers into registers if possible */ + register JSAMPLE * range_limit = cinfo->sample_range_limit; + register int * Crrtab = cconvert->Cr_r_tab; + register int * Cbbtab = cconvert->Cb_b_tab; + register INT32 * Crgtab = cconvert->Cr_g_tab; + register INT32 * Cbgtab = cconvert->Cb_g_tab; + SHIFT_TEMPS + + while (--num_rows >= 0) { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + input_row++; + outptr = *output_buf++; + for (col = 0; col < num_cols; col++) { + y = GETJSAMPLE(inptr0[col]); + cb = GETJSAMPLE(inptr1[col]); + cr = GETJSAMPLE(inptr2[col]); + /* Range-limiting is essential due to noise introduced by DCT losses. */ + outptr[RGB_RED] = range_limit[y + Crrtab[cr]]; + outptr[RGB_GREEN] = range_limit[y + + ((int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], + SCALEBITS))]; + outptr[RGB_BLUE] = range_limit[y + Cbbtab[cb]]; + outptr += RGB_PIXELSIZE; + } + } +} + + +/**************** Cases other than YCbCr -> RGB **************/ + + +/* + * Initialize for RGB->grayscale colorspace conversion. + */ + +LOCAL(void) +build_rgb_y_table (j_decompress_ptr cinfo) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + INT32 * rgb_y_tab; + INT32 i; + + /* Allocate and fill in the conversion tables. */ + cconvert->rgb_y_tab = rgb_y_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (TABLE_SIZE * SIZEOF(INT32))); + + for (i = 0; i <= MAXJSAMPLE; i++) { + rgb_y_tab[i+R_Y_OFF] = FIX(0.29900) * i; + rgb_y_tab[i+G_Y_OFF] = FIX(0.58700) * i; + rgb_y_tab[i+B_Y_OFF] = FIX(0.11400) * i + ONE_HALF; + } +} + + +/* + * Convert RGB to grayscale. + */ + +METHODDEF(void) +rgb_gray_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int r, g, b; + register INT32 * ctab = cconvert->rgb_y_tab; + register JSAMPROW outptr; + register JSAMPROW inptr0, inptr1, inptr2; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->output_width; + + while (--num_rows >= 0) { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + input_row++; + outptr = *output_buf++; + for (col = 0; col < num_cols; col++) { + r = GETJSAMPLE(inptr0[col]); + g = GETJSAMPLE(inptr1[col]); + b = GETJSAMPLE(inptr2[col]); + /* Y */ + outptr[col] = (JSAMPLE) + ((ctab[r+R_Y_OFF] + ctab[g+G_Y_OFF] + ctab[b+B_Y_OFF]) + >> SCALEBITS); + } + } +} + + +/* + * No colorspace change, but conversion from separate-planes + * to interleaved representation. + */ + +METHODDEF(void) +rgb_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + register JSAMPROW outptr; + register JSAMPROW inptr0, inptr1, inptr2; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->output_width; + + while (--num_rows >= 0) { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + input_row++; + outptr = *output_buf++; + for (col = 0; col < num_cols; col++) { + /* We can dispense with GETJSAMPLE() here */ + outptr[RGB_RED] = inptr0[col]; + outptr[RGB_GREEN] = inptr1[col]; + outptr[RGB_BLUE] = inptr2[col]; + outptr += RGB_PIXELSIZE; + } + } +} + + +/* + * Color conversion for no colorspace change: just copy the data, + * converting from separate-planes to interleaved representation. + */ + +METHODDEF(void) +null_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + register JSAMPROW inptr, outptr; + register JDIMENSION count; + register int num_components = cinfo->num_components; + JDIMENSION num_cols = cinfo->output_width; + int ci; + + while (--num_rows >= 0) { + for (ci = 0; ci < num_components; ci++) { + inptr = input_buf[ci][input_row]; + outptr = output_buf[0] + ci; + for (count = num_cols; count > 0; count--) { + *outptr = *inptr++; /* needn't bother with GETJSAMPLE() here */ + outptr += num_components; + } + } + input_row++; + output_buf++; + } +} + + +/* + * Color conversion for grayscale: just copy the data. + * This also works for YCbCr -> grayscale conversion, in which + * we just copy the Y (luminance) component and ignore chrominance. + */ + +METHODDEF(void) +grayscale_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + jcopy_sample_rows(input_buf[0], (int) input_row, output_buf, 0, + num_rows, cinfo->output_width); +} + + +/* + * Convert grayscale to RGB: just duplicate the graylevel three times. + * This is provided to support applications that don't want to cope + * with grayscale as a separate case. + */ + +METHODDEF(void) +gray_rgb_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + register JSAMPROW inptr, outptr; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->output_width; + + while (--num_rows >= 0) { + inptr = input_buf[0][input_row++]; + outptr = *output_buf++; + for (col = 0; col < num_cols; col++) { + /* We can dispense with GETJSAMPLE() here */ + outptr[RGB_RED] = outptr[RGB_GREEN] = outptr[RGB_BLUE] = inptr[col]; + outptr += RGB_PIXELSIZE; + } + } +} + + +/* + * Adobe-style YCCK->CMYK conversion. + * We convert YCbCr to R=1-C, G=1-M, and B=1-Y using the same + * conversion as above, while passing K (black) unchanged. + * We assume build_ycc_rgb_table has been called. + */ + +METHODDEF(void) +ycck_cmyk_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + register int y, cb, cr; + register JSAMPROW outptr; + register JSAMPROW inptr0, inptr1, inptr2, inptr3; + register JDIMENSION col; + JDIMENSION num_cols = cinfo->output_width; + /* copy these pointers into registers if possible */ + register JSAMPLE * range_limit = cinfo->sample_range_limit; + register int * Crrtab = cconvert->Cr_r_tab; + register int * Cbbtab = cconvert->Cb_b_tab; + register INT32 * Crgtab = cconvert->Cr_g_tab; + register INT32 * Cbgtab = cconvert->Cb_g_tab; + SHIFT_TEMPS + + while (--num_rows >= 0) { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + inptr3 = input_buf[3][input_row]; + input_row++; + outptr = *output_buf++; + for (col = 0; col < num_cols; col++) { + y = GETJSAMPLE(inptr0[col]); + cb = GETJSAMPLE(inptr1[col]); + cr = GETJSAMPLE(inptr2[col]); + /* Range-limiting is essential due to noise introduced by DCT losses. */ + outptr[0] = range_limit[MAXJSAMPLE - (y + Crrtab[cr])]; /* red */ + outptr[1] = range_limit[MAXJSAMPLE - (y + /* green */ + ((int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], + SCALEBITS)))]; + outptr[2] = range_limit[MAXJSAMPLE - (y + Cbbtab[cb])]; /* blue */ + /* K passes through unchanged */ + outptr[3] = inptr3[col]; /* don't need GETJSAMPLE here */ + outptr += 4; + } + } +} + + +/* + * Empty method for start_pass. + */ + +METHODDEF(void) +start_pass_dcolor (j_decompress_ptr cinfo) +{ + /* no work needed */ +} + + +/* + * Module initialization routine for output colorspace conversion. + */ + +GLOBAL(void) +jinit_color_deconverter (j_decompress_ptr cinfo) +{ + my_cconvert_ptr cconvert; + int ci; + + cconvert = (my_cconvert_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_color_deconverter)); + cinfo->cconvert = (struct jpeg_color_deconverter *) cconvert; + cconvert->pub.start_pass = start_pass_dcolor; + + /* Make sure num_components agrees with jpeg_color_space */ + switch (cinfo->jpeg_color_space) { + case JCS_GRAYSCALE: + if (cinfo->num_components != 1) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + break; + + case JCS_RGB: + case JCS_YCbCr: + if (cinfo->num_components != 3) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + break; + + case JCS_CMYK: + case JCS_YCCK: + if (cinfo->num_components != 4) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + break; + + default: /* JCS_UNKNOWN can be anything */ + if (cinfo->num_components < 1) + ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); + break; + } + + /* Set out_color_components and conversion method based on requested space. + * Also clear the component_needed flags for any unused components, + * so that earlier pipeline stages can avoid useless computation. + */ + + switch (cinfo->out_color_space) { + case JCS_GRAYSCALE: + cinfo->out_color_components = 1; + if (cinfo->jpeg_color_space == JCS_GRAYSCALE || + cinfo->jpeg_color_space == JCS_YCbCr) { + cconvert->pub.color_convert = grayscale_convert; + /* For color->grayscale conversion, only the Y (0) component is needed */ + for (ci = 1; ci < cinfo->num_components; ci++) + cinfo->comp_info[ci].component_needed = FALSE; + } else if (cinfo->jpeg_color_space == JCS_RGB) { + cconvert->pub.color_convert = rgb_gray_convert; + build_rgb_y_table(cinfo); + } else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_RGB: + cinfo->out_color_components = RGB_PIXELSIZE; + if (cinfo->jpeg_color_space == JCS_YCbCr) { + cconvert->pub.color_convert = ycc_rgb_convert; + build_ycc_rgb_table(cinfo); + } else if (cinfo->jpeg_color_space == JCS_GRAYSCALE) { + cconvert->pub.color_convert = gray_rgb_convert; + } else if (cinfo->jpeg_color_space == JCS_RGB) { + cconvert->pub.color_convert = rgb_convert; + } else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + case JCS_CMYK: + cinfo->out_color_components = 4; + if (cinfo->jpeg_color_space == JCS_YCCK) { + cconvert->pub.color_convert = ycck_cmyk_convert; + build_ycc_rgb_table(cinfo); + } else if (cinfo->jpeg_color_space == JCS_CMYK) { + cconvert->pub.color_convert = null_convert; + } else + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + + default: + /* Permit null conversion to same output space */ + if (cinfo->out_color_space == cinfo->jpeg_color_space) { + cinfo->out_color_components = cinfo->num_components; + cconvert->pub.color_convert = null_convert; + } else /* unsupported non-null conversion */ + ERREXIT(cinfo, JERR_CONVERSION_NOTIMPL); + break; + } + + if (cinfo->quantize_colors) + cinfo->output_components = 1; /* single colormapped output component */ + else + cinfo->output_components = cinfo->out_color_components; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jddctmgr.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jddctmgr.c new file mode 100644 index 000000000..0ded9d574 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jddctmgr.c @@ -0,0 +1,384 @@ +/* + * jddctmgr.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * Modified 2002-2010 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the inverse-DCT management logic. + * This code selects a particular IDCT implementation to be used, + * and it performs related housekeeping chores. No code in this file + * is executed per IDCT step, only during output pass setup. + * + * Note that the IDCT routines are responsible for performing coefficient + * dequantization as well as the IDCT proper. This module sets up the + * dequantization multiplier table needed by the IDCT routine. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + + +/* + * The decompressor input side (jdinput.c) saves away the appropriate + * quantization table for each component at the start of the first scan + * involving that component. (This is necessary in order to correctly + * decode files that reuse Q-table slots.) + * When we are ready to make an output pass, the saved Q-table is converted + * to a multiplier table that will actually be used by the IDCT routine. + * The multiplier table contents are IDCT-method-dependent. To support + * application changes in IDCT method between scans, we can remake the + * multiplier tables if necessary. + * In buffered-image mode, the first output pass may occur before any data + * has been seen for some components, and thus before their Q-tables have + * been saved away. To handle this case, multiplier tables are preset + * to zeroes; the result of the IDCT will be a neutral gray level. + */ + + +/* Private subobject for this module */ + +typedef struct { + struct jpeg_inverse_dct pub; /* public fields */ + + /* This array contains the IDCT method code that each multiplier table + * is currently set up for, or -1 if it's not yet set up. + * The actual multiplier tables are pointed to by dct_table in the + * per-component comp_info structures. + */ + int cur_method[MAX_COMPONENTS]; +} my_idct_controller; + +typedef my_idct_controller * my_idct_ptr; + + +/* Allocated multiplier tables: big enough for any supported variant */ + +typedef union { + ISLOW_MULT_TYPE islow_array[DCTSIZE2]; +#ifdef DCT_IFAST_SUPPORTED + IFAST_MULT_TYPE ifast_array[DCTSIZE2]; +#endif +#ifdef DCT_FLOAT_SUPPORTED + FLOAT_MULT_TYPE float_array[DCTSIZE2]; +#endif +} multiplier_table; + + +/* The current scaled-IDCT routines require ISLOW-style multiplier tables, + * so be sure to compile that code if either ISLOW or SCALING is requested. + */ +#ifdef DCT_ISLOW_SUPPORTED +#define PROVIDE_ISLOW_TABLES +#else +#ifdef IDCT_SCALING_SUPPORTED +#define PROVIDE_ISLOW_TABLES +#endif +#endif + + +/* + * Prepare for an output pass. + * Here we select the proper IDCT routine for each component and build + * a matching multiplier table. + */ + +METHODDEF(void) +start_pass (j_decompress_ptr cinfo) +{ + my_idct_ptr idct = (my_idct_ptr) cinfo->idct; + int ci, i; + jpeg_component_info *compptr; + int method = 0; + inverse_DCT_method_ptr method_ptr = NULL; + JQUANT_TBL * qtbl; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Select the proper IDCT routine for this component's scaling */ + switch ((compptr->DCT_h_scaled_size << 8) + compptr->DCT_v_scaled_size) { +#ifdef IDCT_SCALING_SUPPORTED + case ((1 << 8) + 1): + method_ptr = jpeg_idct_1x1; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((2 << 8) + 2): + method_ptr = jpeg_idct_2x2; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((3 << 8) + 3): + method_ptr = jpeg_idct_3x3; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((4 << 8) + 4): + method_ptr = jpeg_idct_4x4; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((5 << 8) + 5): + method_ptr = jpeg_idct_5x5; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((6 << 8) + 6): + method_ptr = jpeg_idct_6x6; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((7 << 8) + 7): + method_ptr = jpeg_idct_7x7; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((9 << 8) + 9): + method_ptr = jpeg_idct_9x9; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((10 << 8) + 10): + method_ptr = jpeg_idct_10x10; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((11 << 8) + 11): + method_ptr = jpeg_idct_11x11; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((12 << 8) + 12): + method_ptr = jpeg_idct_12x12; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((13 << 8) + 13): + method_ptr = jpeg_idct_13x13; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((14 << 8) + 14): + method_ptr = jpeg_idct_14x14; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((15 << 8) + 15): + method_ptr = jpeg_idct_15x15; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((16 << 8) + 16): + method_ptr = jpeg_idct_16x16; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((16 << 8) + 8): + method_ptr = jpeg_idct_16x8; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((14 << 8) + 7): + method_ptr = jpeg_idct_14x7; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((12 << 8) + 6): + method_ptr = jpeg_idct_12x6; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((10 << 8) + 5): + method_ptr = jpeg_idct_10x5; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((8 << 8) + 4): + method_ptr = jpeg_idct_8x4; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((6 << 8) + 3): + method_ptr = jpeg_idct_6x3; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((4 << 8) + 2): + method_ptr = jpeg_idct_4x2; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((2 << 8) + 1): + method_ptr = jpeg_idct_2x1; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((8 << 8) + 16): + method_ptr = jpeg_idct_8x16; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((7 << 8) + 14): + method_ptr = jpeg_idct_7x14; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((6 << 8) + 12): + method_ptr = jpeg_idct_6x12; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((5 << 8) + 10): + method_ptr = jpeg_idct_5x10; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((4 << 8) + 8): + method_ptr = jpeg_idct_4x8; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((3 << 8) + 6): + method_ptr = jpeg_idct_3x6; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((2 << 8) + 4): + method_ptr = jpeg_idct_2x4; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; + case ((1 << 8) + 2): + method_ptr = jpeg_idct_1x2; + method = JDCT_ISLOW; /* jidctint uses islow-style table */ + break; +#endif + case ((DCTSIZE << 8) + DCTSIZE): + switch (cinfo->dct_method) { +#ifdef DCT_ISLOW_SUPPORTED + case JDCT_ISLOW: + method_ptr = jpeg_idct_islow; + method = JDCT_ISLOW; + break; +#endif +#ifdef DCT_IFAST_SUPPORTED + case JDCT_IFAST: + method_ptr = jpeg_idct_ifast; + method = JDCT_IFAST; + break; +#endif +#ifdef DCT_FLOAT_SUPPORTED + case JDCT_FLOAT: + method_ptr = jpeg_idct_float; + method = JDCT_FLOAT; + break; +#endif + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + break; + } + break; + default: + ERREXIT2(cinfo, JERR_BAD_DCTSIZE, + compptr->DCT_h_scaled_size, compptr->DCT_v_scaled_size); + break; + } + idct->pub.inverse_DCT[ci] = method_ptr; + /* Create multiplier table from quant table. + * However, we can skip this if the component is uninteresting + * or if we already built the table. Also, if no quant table + * has yet been saved for the component, we leave the + * multiplier table all-zero; we'll be reading zeroes from the + * coefficient controller's buffer anyway. + */ + if (! compptr->component_needed || idct->cur_method[ci] == method) + continue; + qtbl = compptr->quant_table; + if (qtbl == NULL) /* happens if no data yet for component */ + continue; + idct->cur_method[ci] = method; + switch (method) { +#ifdef PROVIDE_ISLOW_TABLES + case JDCT_ISLOW: + { + /* For LL&M IDCT method, multipliers are equal to raw quantization + * coefficients, but are stored as ints to ensure access efficiency. + */ + ISLOW_MULT_TYPE * ismtbl = (ISLOW_MULT_TYPE *) compptr->dct_table; + for (i = 0; i < DCTSIZE2; i++) { + ismtbl[i] = (ISLOW_MULT_TYPE) qtbl->quantval[i]; + } + } + break; +#endif +#ifdef DCT_IFAST_SUPPORTED + case JDCT_IFAST: + { + /* For AA&N IDCT method, multipliers are equal to quantization + * coefficients scaled by scalefactor[row]*scalefactor[col], where + * scalefactor[0] = 1 + * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 + * For integer operation, the multiplier table is to be scaled by + * IFAST_SCALE_BITS. + */ + IFAST_MULT_TYPE * ifmtbl = (IFAST_MULT_TYPE *) compptr->dct_table; +#define CONST_BITS 14 + static const INT16 aanscales[DCTSIZE2] = { + /* precomputed values scaled up by 14 bits */ + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, + 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, + 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, + 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, + 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, + 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, + 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 + }; + SHIFT_TEMPS + + for (i = 0; i < DCTSIZE2; i++) { + ifmtbl[i] = (IFAST_MULT_TYPE) + DESCALE(MULTIPLY16V16((INT32) qtbl->quantval[i], + (INT32) aanscales[i]), + CONST_BITS-IFAST_SCALE_BITS); + } + } + break; +#endif +#ifdef DCT_FLOAT_SUPPORTED + case JDCT_FLOAT: + { + /* For float AA&N IDCT method, multipliers are equal to quantization + * coefficients scaled by scalefactor[row]*scalefactor[col], where + * scalefactor[0] = 1 + * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 + * We apply a further scale factor of 1/8. + */ + FLOAT_MULT_TYPE * fmtbl = (FLOAT_MULT_TYPE *) compptr->dct_table; + int row, col; + static const double aanscalefactor[DCTSIZE] = { + 1.0, 1.387039845, 1.306562965, 1.175875602, + 1.0, 0.785694958, 0.541196100, 0.275899379 + }; + + i = 0; + for (row = 0; row < DCTSIZE; row++) { + for (col = 0; col < DCTSIZE; col++) { + fmtbl[i] = (FLOAT_MULT_TYPE) + ((double) qtbl->quantval[i] * + aanscalefactor[row] * aanscalefactor[col] * 0.125); + i++; + } + } + } + break; +#endif + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + break; + } + } +} + + +/* + * Initialize IDCT manager. + */ + +GLOBAL(void) +jinit_inverse_dct (j_decompress_ptr cinfo) +{ + my_idct_ptr idct; + int ci; + jpeg_component_info *compptr; + + idct = (my_idct_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_idct_controller)); + cinfo->idct = (struct jpeg_inverse_dct *) idct; + idct->pub.start_pass = start_pass; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Allocate and pre-zero a multiplier table for each component */ + compptr->dct_table = + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(multiplier_table)); + MEMZERO(compptr->dct_table, SIZEOF(multiplier_table)); + /* Mark multiplier table not yet set up for any method */ + idct->cur_method[ci] = -1; + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdhuff.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdhuff.c new file mode 100644 index 000000000..06f92fe47 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdhuff.c @@ -0,0 +1,1541 @@ +/* + * jdhuff.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2006-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains Huffman entropy decoding routines. + * Both sequential and progressive modes are supported in this single module. + * + * Much of the complexity here has to do with supporting input suspension. + * If the data source module demands suspension, we want to be able to back + * up to the start of the current MCU. To do this, we copy state variables + * into local working storage, and update them back to the permanent + * storage only upon successful completion of an MCU. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Derived data constructed for each Huffman table */ + +#define HUFF_LOOKAHEAD 8 /* # of bits of lookahead */ + +typedef struct { + /* Basic tables: (element [0] of each array is unused) */ + INT32 maxcode[18]; /* largest code of length k (-1 if none) */ + /* (maxcode[17] is a sentinel to ensure jpeg_huff_decode terminates) */ + INT32 valoffset[17]; /* huffval[] offset for codes of length k */ + /* valoffset[k] = huffval[] index of 1st symbol of code length k, less + * the smallest code of length k; so given a code of length k, the + * corresponding symbol is huffval[code + valoffset[k]] + */ + + /* Link to public Huffman table (needed only in jpeg_huff_decode) */ + JHUFF_TBL *pub; + + /* Lookahead tables: indexed by the next HUFF_LOOKAHEAD bits of + * the input data stream. If the next Huffman code is no more + * than HUFF_LOOKAHEAD bits long, we can obtain its length and + * the corresponding symbol directly from these tables. + */ + int look_nbits[1< 32 bits on your machine, and shifting/masking longs is + * reasonably fast, making bit_buf_type be long and setting BIT_BUF_SIZE + * appropriately should be a win. Unfortunately we can't define the size + * with something like #define BIT_BUF_SIZE (sizeof(bit_buf_type)*8) + * because not all machines measure sizeof in 8-bit bytes. + */ + +typedef struct { /* Bitreading state saved across MCUs */ + bit_buf_type get_buffer; /* current bit-extraction buffer */ + int bits_left; /* # of unused bits in it */ +} bitread_perm_state; + +typedef struct { /* Bitreading working state within an MCU */ + /* Current data source location */ + /* We need a copy, rather than munging the original, in case of suspension */ + const JOCTET * next_input_byte; /* => next byte to read from source */ + size_t bytes_in_buffer; /* # of bytes remaining in source buffer */ + /* Bit input buffer --- note these values are kept in register variables, + * not in this struct, inside the inner loops. + */ + bit_buf_type get_buffer; /* current bit-extraction buffer */ + int bits_left; /* # of unused bits in it */ + /* Pointer needed by jpeg_fill_bit_buffer. */ + j_decompress_ptr cinfo; /* back link to decompress master record */ +} bitread_working_state; + +/* Macros to declare and load/save bitread local variables. */ +#define BITREAD_STATE_VARS \ + register bit_buf_type get_buffer; \ + register int bits_left; \ + bitread_working_state br_state + +#define BITREAD_LOAD_STATE(cinfop,permstate) \ + br_state.cinfo = cinfop; \ + br_state.next_input_byte = cinfop->src->next_input_byte; \ + br_state.bytes_in_buffer = cinfop->src->bytes_in_buffer; \ + get_buffer = permstate.get_buffer; \ + bits_left = permstate.bits_left; + +#define BITREAD_SAVE_STATE(cinfop,permstate) \ + cinfop->src->next_input_byte = br_state.next_input_byte; \ + cinfop->src->bytes_in_buffer = br_state.bytes_in_buffer; \ + permstate.get_buffer = get_buffer; \ + permstate.bits_left = bits_left + +/* + * These macros provide the in-line portion of bit fetching. + * Use CHECK_BIT_BUFFER to ensure there are N bits in get_buffer + * before using GET_BITS, PEEK_BITS, or DROP_BITS. + * The variables get_buffer and bits_left are assumed to be locals, + * but the state struct might not be (jpeg_huff_decode needs this). + * CHECK_BIT_BUFFER(state,n,action); + * Ensure there are N bits in get_buffer; if suspend, take action. + * val = GET_BITS(n); + * Fetch next N bits. + * val = PEEK_BITS(n); + * Fetch next N bits without removing them from the buffer. + * DROP_BITS(n); + * Discard next N bits. + * The value N should be a simple variable, not an expression, because it + * is evaluated multiple times. + */ + +#define CHECK_BIT_BUFFER(state,nbits,action) \ + { if (bits_left < (nbits)) { \ + if (! jpeg_fill_bit_buffer(&(state),get_buffer,bits_left,nbits)) \ + { action; } \ + get_buffer = (state).get_buffer; bits_left = (state).bits_left; } } + +#define GET_BITS(nbits) \ + (((int) (get_buffer >> (bits_left -= (nbits)))) & BIT_MASK(nbits)) + +#define PEEK_BITS(nbits) \ + (((int) (get_buffer >> (bits_left - (nbits)))) & BIT_MASK(nbits)) + +#define DROP_BITS(nbits) \ + (bits_left -= (nbits)) + + +/* + * Code for extracting next Huffman-coded symbol from input bit stream. + * Again, this is time-critical and we make the main paths be macros. + * + * We use a lookahead table to process codes of up to HUFF_LOOKAHEAD bits + * without looping. Usually, more than 95% of the Huffman codes will be 8 + * or fewer bits long. The few overlength codes are handled with a loop, + * which need not be inline code. + * + * Notes about the HUFF_DECODE macro: + * 1. Near the end of the data segment, we may fail to get enough bits + * for a lookahead. In that case, we do it the hard way. + * 2. If the lookahead table contains no entry, the next code must be + * more than HUFF_LOOKAHEAD bits long. + * 3. jpeg_huff_decode returns -1 if forced to suspend. + */ + +#define HUFF_DECODE(result,state,htbl,failaction,slowlabel) \ +{ register int nb, look; \ + if (bits_left < HUFF_LOOKAHEAD) { \ + if (! jpeg_fill_bit_buffer(&state,get_buffer,bits_left, 0)) {failaction;} \ + get_buffer = state.get_buffer; bits_left = state.bits_left; \ + if (bits_left < HUFF_LOOKAHEAD) { \ + nb = 1; goto slowlabel; \ + } \ + } \ + look = PEEK_BITS(HUFF_LOOKAHEAD); \ + if ((nb = htbl->look_nbits[look]) != 0) { \ + DROP_BITS(nb); \ + result = htbl->look_sym[look]; \ + } else { \ + nb = HUFF_LOOKAHEAD+1; \ +slowlabel: \ + if ((result=jpeg_huff_decode(&state,get_buffer,bits_left,htbl,nb)) < 0) \ + { failaction; } \ + get_buffer = state.get_buffer; bits_left = state.bits_left; \ + } \ +} + + +/* + * Expanded entropy decoder object for Huffman decoding. + * + * The savable_state subrecord contains fields that change within an MCU, + * but must not be updated permanently until we complete the MCU. + */ + +typedef struct { + unsigned int EOBRUN; /* remaining EOBs in EOBRUN */ + int last_dc_val[MAX_COMPS_IN_SCAN]; /* last DC coef for each component */ +} savable_state; + +/* This macro is to work around compilers with missing or broken + * structure assignment. You'll need to fix this code if you have + * such a compiler and you change MAX_COMPS_IN_SCAN. + */ + +#ifndef NO_STRUCT_ASSIGN +#define ASSIGN_STATE(dest,src) ((dest) = (src)) +#else +#if MAX_COMPS_IN_SCAN == 4 +#define ASSIGN_STATE(dest,src) \ + ((dest).EOBRUN = (src).EOBRUN, \ + (dest).last_dc_val[0] = (src).last_dc_val[0], \ + (dest).last_dc_val[1] = (src).last_dc_val[1], \ + (dest).last_dc_val[2] = (src).last_dc_val[2], \ + (dest).last_dc_val[3] = (src).last_dc_val[3]) +#endif +#endif + + +typedef struct { + struct jpeg_entropy_decoder pub; /* public fields */ + + /* These fields are loaded into local variables at start of each MCU. + * In case of suspension, we exit WITHOUT updating them. + */ + bitread_perm_state bitstate; /* Bit buffer at start of MCU */ + savable_state saved; /* Other state at start of MCU */ + + /* These fields are NOT loaded into local working state. */ + boolean insufficient_data; /* set TRUE after emitting warning */ + unsigned int restarts_to_go; /* MCUs left in this restart interval */ + + /* Following two fields used only in progressive mode */ + + /* Pointers to derived tables (these workspaces have image lifespan) */ + d_derived_tbl * derived_tbls[NUM_HUFF_TBLS]; + + d_derived_tbl * ac_derived_tbl; /* active table during an AC scan */ + + /* Following fields used only in sequential mode */ + + /* Pointers to derived tables (these workspaces have image lifespan) */ + d_derived_tbl * dc_derived_tbls[NUM_HUFF_TBLS]; + d_derived_tbl * ac_derived_tbls[NUM_HUFF_TBLS]; + + /* Precalculated info set up by start_pass for use in decode_mcu: */ + + /* Pointers to derived tables to be used for each block within an MCU */ + d_derived_tbl * dc_cur_tbls[D_MAX_BLOCKS_IN_MCU]; + d_derived_tbl * ac_cur_tbls[D_MAX_BLOCKS_IN_MCU]; + /* Whether we care about the DC and AC coefficient values for each block */ + int coef_limit[D_MAX_BLOCKS_IN_MCU]; +} huff_entropy_decoder; + +typedef huff_entropy_decoder * huff_entropy_ptr; + + +static const int jpeg_zigzag_order[8][8] = { + { 0, 1, 5, 6, 14, 15, 27, 28 }, + { 2, 4, 7, 13, 16, 26, 29, 42 }, + { 3, 8, 12, 17, 25, 30, 41, 43 }, + { 9, 11, 18, 24, 31, 40, 44, 53 }, + { 10, 19, 23, 32, 39, 45, 52, 54 }, + { 20, 22, 33, 38, 46, 51, 55, 60 }, + { 21, 34, 37, 47, 50, 56, 59, 61 }, + { 35, 36, 48, 49, 57, 58, 62, 63 } +}; + +static const int jpeg_zigzag_order7[7][7] = { + { 0, 1, 5, 6, 14, 15, 27 }, + { 2, 4, 7, 13, 16, 26, 28 }, + { 3, 8, 12, 17, 25, 29, 38 }, + { 9, 11, 18, 24, 30, 37, 39 }, + { 10, 19, 23, 31, 36, 40, 45 }, + { 20, 22, 32, 35, 41, 44, 46 }, + { 21, 33, 34, 42, 43, 47, 48 } +}; + +static const int jpeg_zigzag_order6[6][6] = { + { 0, 1, 5, 6, 14, 15 }, + { 2, 4, 7, 13, 16, 25 }, + { 3, 8, 12, 17, 24, 26 }, + { 9, 11, 18, 23, 27, 32 }, + { 10, 19, 22, 28, 31, 33 }, + { 20, 21, 29, 30, 34, 35 } +}; + +static const int jpeg_zigzag_order5[5][5] = { + { 0, 1, 5, 6, 14 }, + { 2, 4, 7, 13, 15 }, + { 3, 8, 12, 16, 21 }, + { 9, 11, 17, 20, 22 }, + { 10, 18, 19, 23, 24 } +}; + +static const int jpeg_zigzag_order4[4][4] = { + { 0, 1, 5, 6 }, + { 2, 4, 7, 12 }, + { 3, 8, 11, 13 }, + { 9, 10, 14, 15 } +}; + +static const int jpeg_zigzag_order3[3][3] = { + { 0, 1, 5 }, + { 2, 4, 6 }, + { 3, 7, 8 } +}; + +static const int jpeg_zigzag_order2[2][2] = { + { 0, 1 }, + { 2, 3 } +}; + + +/* + * Compute the derived values for a Huffman table. + * This routine also performs some validation checks on the table. + */ + +LOCAL(void) +jpeg_make_d_derived_tbl (j_decompress_ptr cinfo, boolean isDC, int tblno, + d_derived_tbl ** pdtbl) +{ + JHUFF_TBL *htbl; + d_derived_tbl *dtbl; + int p, i, l, si, numsymbols; + int lookbits, ctr; + char huffsize[257]; + unsigned int huffcode[257]; + unsigned int code; + + /* Note that huffsize[] and huffcode[] are filled in code-length order, + * paralleling the order of the symbols themselves in htbl->huffval[]. + */ + + /* Find the input Huffman table */ + if (tblno < 0 || tblno >= NUM_HUFF_TBLS) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tblno); + htbl = + isDC ? cinfo->dc_huff_tbl_ptrs[tblno] : cinfo->ac_huff_tbl_ptrs[tblno]; + if (htbl == NULL) + ERREXIT1(cinfo, JERR_NO_HUFF_TABLE, tblno); + + /* Allocate a workspace if we haven't already done so. */ + if (*pdtbl == NULL) + *pdtbl = (d_derived_tbl *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(d_derived_tbl)); + dtbl = *pdtbl; + dtbl->pub = htbl; /* fill in back link */ + + /* Figure C.1: make table of Huffman code length for each symbol */ + + p = 0; + for (l = 1; l <= 16; l++) { + i = (int) htbl->bits[l]; + if (i < 0 || p + i > 256) /* protect against table overrun */ + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + while (i--) + huffsize[p++] = (char) l; + } + huffsize[p] = 0; + numsymbols = p; + + /* Figure C.2: generate the codes themselves */ + /* We also validate that the counts represent a legal Huffman code tree. */ + + code = 0; + si = huffsize[0]; + p = 0; + while (huffsize[p]) { + while (((int) huffsize[p]) == si) { + huffcode[p++] = code; + code++; + } + /* code is now 1 more than the last code used for codelength si; but + * it must still fit in si bits, since no code is allowed to be all ones. + */ + if (((INT32) code) >= (((INT32) 1) << si)) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + code <<= 1; + si++; + } + + /* Figure F.15: generate decoding tables for bit-sequential decoding */ + + p = 0; + for (l = 1; l <= 16; l++) { + if (htbl->bits[l]) { + /* valoffset[l] = huffval[] index of 1st symbol of code length l, + * minus the minimum code of length l + */ + dtbl->valoffset[l] = (INT32) p - (INT32) huffcode[p]; + p += htbl->bits[l]; + dtbl->maxcode[l] = huffcode[p-1]; /* maximum code of length l */ + } else { + dtbl->maxcode[l] = -1; /* -1 if no codes of this length */ + } + } + dtbl->maxcode[17] = 0xFFFFFL; /* ensures jpeg_huff_decode terminates */ + + /* Compute lookahead tables to speed up decoding. + * First we set all the table entries to 0, indicating "too long"; + * then we iterate through the Huffman codes that are short enough and + * fill in all the entries that correspond to bit sequences starting + * with that code. + */ + + MEMZERO(dtbl->look_nbits, SIZEOF(dtbl->look_nbits)); + + p = 0; + for (l = 1; l <= HUFF_LOOKAHEAD; l++) { + for (i = 1; i <= (int) htbl->bits[l]; i++, p++) { + /* l = current code's length, p = its index in huffcode[] & huffval[]. */ + /* Generate left-justified code followed by all possible bit sequences */ + lookbits = huffcode[p] << (HUFF_LOOKAHEAD-l); + for (ctr = 1 << (HUFF_LOOKAHEAD-l); ctr > 0; ctr--) { + dtbl->look_nbits[lookbits] = l; + dtbl->look_sym[lookbits] = htbl->huffval[p]; + lookbits++; + } + } + } + + /* Validate symbols as being reasonable. + * For AC tables, we make no check, but accept all byte values 0..255. + * For DC tables, we require the symbols to be in range 0..15. + * (Tighter bounds could be applied depending on the data depth and mode, + * but this is sufficient to ensure safe decoding.) + */ + if (isDC) { + for (i = 0; i < numsymbols; i++) { + int sym = htbl->huffval[i]; + if (sym < 0 || sym > 15) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + } + } +} + + +/* + * Out-of-line code for bit fetching. + * Note: current values of get_buffer and bits_left are passed as parameters, + * but are returned in the corresponding fields of the state struct. + * + * On most machines MIN_GET_BITS should be 25 to allow the full 32-bit width + * of get_buffer to be used. (On machines with wider words, an even larger + * buffer could be used.) However, on some machines 32-bit shifts are + * quite slow and take time proportional to the number of places shifted. + * (This is true with most PC compilers, for instance.) In this case it may + * be a win to set MIN_GET_BITS to the minimum value of 15. This reduces the + * average shift distance at the cost of more calls to jpeg_fill_bit_buffer. + */ + +#ifdef SLOW_SHIFT_32 +#define MIN_GET_BITS 15 /* minimum allowable value */ +#else +#define MIN_GET_BITS (BIT_BUF_SIZE-7) +#endif + + +LOCAL(boolean) +jpeg_fill_bit_buffer (bitread_working_state * state, + register bit_buf_type get_buffer, register int bits_left, + int nbits) +/* Load up the bit buffer to a depth of at least nbits */ +{ + /* Copy heavily used state fields into locals (hopefully registers) */ + register const JOCTET * next_input_byte = state->next_input_byte; + register size_t bytes_in_buffer = state->bytes_in_buffer; + j_decompress_ptr cinfo = state->cinfo; + + /* Attempt to load at least MIN_GET_BITS bits into get_buffer. */ + /* (It is assumed that no request will be for more than that many bits.) */ + /* We fail to do so only if we hit a marker or are forced to suspend. */ + + if (cinfo->unread_marker == 0) { /* cannot advance past a marker */ + while (bits_left < MIN_GET_BITS) { + register int c; + + /* Attempt to read a byte */ + if (bytes_in_buffer == 0) { + if (! (*cinfo->src->fill_input_buffer) (cinfo)) + return FALSE; + next_input_byte = cinfo->src->next_input_byte; + bytes_in_buffer = cinfo->src->bytes_in_buffer; + } + bytes_in_buffer--; + c = GETJOCTET(*next_input_byte++); + + /* If it's 0xFF, check and discard stuffed zero byte */ + if (c == 0xFF) { + /* Loop here to discard any padding FF's on terminating marker, + * so that we can save a valid unread_marker value. NOTE: we will + * accept multiple FF's followed by a 0 as meaning a single FF data + * byte. This data pattern is not valid according to the standard. + */ + do { + if (bytes_in_buffer == 0) { + if (! (*cinfo->src->fill_input_buffer) (cinfo)) + return FALSE; + next_input_byte = cinfo->src->next_input_byte; + bytes_in_buffer = cinfo->src->bytes_in_buffer; + } + bytes_in_buffer--; + c = GETJOCTET(*next_input_byte++); + } while (c == 0xFF); + + if (c == 0) { + /* Found FF/00, which represents an FF data byte */ + c = 0xFF; + } else { + /* Oops, it's actually a marker indicating end of compressed data. + * Save the marker code for later use. + * Fine point: it might appear that we should save the marker into + * bitread working state, not straight into permanent state. But + * once we have hit a marker, we cannot need to suspend within the + * current MCU, because we will read no more bytes from the data + * source. So it is OK to update permanent state right away. + */ + cinfo->unread_marker = c; + /* See if we need to insert some fake zero bits. */ + goto no_more_bytes; + } + } + + /* OK, load c into get_buffer */ + get_buffer = (get_buffer << 8) | c; + bits_left += 8; + } /* end while */ + } else { + no_more_bytes: + /* We get here if we've read the marker that terminates the compressed + * data segment. There should be enough bits in the buffer register + * to satisfy the request; if so, no problem. + */ + if (nbits > bits_left) { + /* Uh-oh. Report corrupted data to user and stuff zeroes into + * the data stream, so that we can produce some kind of image. + * We use a nonvolatile flag to ensure that only one warning message + * appears per data segment. + */ + if (! ((huff_entropy_ptr) cinfo->entropy)->insufficient_data) { + WARNMS(cinfo, JWRN_HIT_MARKER); + ((huff_entropy_ptr) cinfo->entropy)->insufficient_data = TRUE; + } + /* Fill the buffer with zero bits */ + get_buffer <<= MIN_GET_BITS - bits_left; + bits_left = MIN_GET_BITS; + } + } + + /* Unload the local registers */ + state->next_input_byte = next_input_byte; + state->bytes_in_buffer = bytes_in_buffer; + state->get_buffer = get_buffer; + state->bits_left = bits_left; + + return TRUE; +} + + +/* + * Figure F.12: extend sign bit. + * On some machines, a shift and sub will be faster than a table lookup. + */ + +#ifdef AVOID_TABLES + +#define BIT_MASK(nbits) ((1<<(nbits))-1) +#define HUFF_EXTEND(x,s) ((x) < (1<<((s)-1)) ? (x) - ((1<<(s))-1) : (x)) + +#else + +#define BIT_MASK(nbits) bmask[nbits] +#define HUFF_EXTEND(x,s) ((x) <= bmask[(s) - 1] ? (x) - bmask[s] : (x)) + +static const int bmask[16] = /* bmask[n] is mask for n rightmost bits */ + { 0, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, + 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF }; + +#endif /* AVOID_TABLES */ + + +/* + * Out-of-line code for Huffman code decoding. + */ + +LOCAL(int) +jpeg_huff_decode (bitread_working_state * state, + register bit_buf_type get_buffer, register int bits_left, + d_derived_tbl * htbl, int min_bits) +{ + register int l = min_bits; + register INT32 code; + + /* HUFF_DECODE has determined that the code is at least min_bits */ + /* bits long, so fetch that many bits in one swoop. */ + + CHECK_BIT_BUFFER(*state, l, return -1); + code = GET_BITS(l); + + /* Collect the rest of the Huffman code one bit at a time. */ + /* This is per Figure F.16 in the JPEG spec. */ + + while (code > htbl->maxcode[l]) { + code <<= 1; + CHECK_BIT_BUFFER(*state, 1, return -1); + code |= GET_BITS(1); + l++; + } + + /* Unload the local registers */ + state->get_buffer = get_buffer; + state->bits_left = bits_left; + + /* With garbage input we may reach the sentinel value l = 17. */ + + if (l > 16) { + WARNMS(state->cinfo, JWRN_HUFF_BAD_CODE); + return 0; /* fake a zero as the safest result */ + } + + return htbl->pub->huffval[ (int) (code + htbl->valoffset[l]) ]; +} + + +/* + * Check for a restart marker & resynchronize decoder. + * Returns FALSE if must suspend. + */ + +LOCAL(boolean) +process_restart (j_decompress_ptr cinfo) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int ci; + + /* Throw away any unused bits remaining in bit buffer; */ + /* include any full bytes in next_marker's count of discarded bytes */ + cinfo->marker->discarded_bytes += entropy->bitstate.bits_left / 8; + entropy->bitstate.bits_left = 0; + + /* Advance past the RSTn marker */ + if (! (*cinfo->marker->read_restart_marker) (cinfo)) + return FALSE; + + /* Re-initialize DC predictions to 0 */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) + entropy->saved.last_dc_val[ci] = 0; + /* Re-init EOB run count, too */ + entropy->saved.EOBRUN = 0; + + /* Reset restart counter */ + entropy->restarts_to_go = cinfo->restart_interval; + + /* Reset out-of-data flag, unless read_restart_marker left us smack up + * against a marker. In that case we will end up treating the next data + * segment as empty, and we can avoid producing bogus output pixels by + * leaving the flag set. + */ + if (cinfo->unread_marker == 0) + entropy->insufficient_data = FALSE; + + return TRUE; +} + + +/* + * Huffman MCU decoding. + * Each of these routines decodes and returns one MCU's worth of + * Huffman-compressed coefficients. + * The coefficients are reordered from zigzag order into natural array order, + * but are not dequantized. + * + * The i'th block of the MCU is stored into the block pointed to by + * MCU_data[i]. WE ASSUME THIS AREA IS INITIALLY ZEROED BY THE CALLER. + * (Wholesale zeroing is usually a little faster than retail...) + * + * We return FALSE if data source requested suspension. In that case no + * changes have been made to permanent state. (Exception: some output + * coefficients may already have been assigned. This is harmless for + * spectral selection, since we'll just re-assign them on the next call. + * Successive approximation AC refinement has to be more careful, however.) + */ + +/* + * MCU decoding for DC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +decode_mcu_DC_first (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int Al = cinfo->Al; + register int s, r; + int blkn, ci; + JBLOCKROW block; + BITREAD_STATE_VARS; + savable_state state; + d_derived_tbl * tbl; + jpeg_component_info * compptr; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* If we've run out of data, just leave the MCU set to zeroes. + * This way, we return uniform gray for the remainder of the segment. + */ + if (! entropy->insufficient_data) { + + /* Load up working state */ + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(state, entropy->saved); + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + tbl = entropy->derived_tbls[compptr->dc_tbl_no]; + + /* Decode a single block's worth of coefficients */ + + /* Section F.2.2.1: decode the DC coefficient difference */ + HUFF_DECODE(s, br_state, tbl, return FALSE, label1); + if (s) { + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + } + + /* Convert DC difference to actual value, update last_dc_val */ + s += state.last_dc_val[ci]; + state.last_dc_val[ci] = s; + /* Scale and output the coefficient (assumes jpeg_natural_order[0]=0) */ + (*block)[0] = (JCOEF) (s << Al); + } + + /* Completed MCU, so update state */ + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(entropy->saved, state); + } + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; +} + + +/* + * MCU decoding for AC initial scan (either spectral selection, + * or first pass of successive approximation). + */ + +METHODDEF(boolean) +decode_mcu_AC_first (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int s, k, r; + unsigned int EOBRUN; + int Se, Al; + const int * natural_order; + JBLOCKROW block; + BITREAD_STATE_VARS; + d_derived_tbl * tbl; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* If we've run out of data, just leave the MCU set to zeroes. + * This way, we return uniform gray for the remainder of the segment. + */ + if (! entropy->insufficient_data) { + + Se = cinfo->Se; + Al = cinfo->Al; + natural_order = cinfo->natural_order; + + /* Load up working state. + * We can avoid loading/saving bitread state if in an EOB run. + */ + EOBRUN = entropy->saved.EOBRUN; /* only part of saved state we need */ + + /* There is always only one block per MCU */ + + if (EOBRUN > 0) /* if it's a band of zeroes... */ + EOBRUN--; /* ...process it now (we do nothing) */ + else { + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + block = MCU_data[0]; + tbl = entropy->ac_derived_tbl; + + for (k = cinfo->Ss; k <= Se; k++) { + HUFF_DECODE(s, br_state, tbl, return FALSE, label2); + r = s >> 4; + s &= 15; + if (s) { + k += r; + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + /* Scale and output coefficient in natural (dezigzagged) order */ + (*block)[natural_order[k]] = (JCOEF) (s << Al); + } else { + if (r == 15) { /* ZRL */ + k += 15; /* skip 15 zeroes in band */ + } else { /* EOBr, run length is 2^r + appended bits */ + EOBRUN = 1 << r; + if (r) { /* EOBr, r > 0 */ + CHECK_BIT_BUFFER(br_state, r, return FALSE); + r = GET_BITS(r); + EOBRUN += r; + } + EOBRUN--; /* this band is processed at this moment */ + break; /* force end-of-band */ + } + } + } + + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + } + + /* Completed MCU, so update state */ + entropy->saved.EOBRUN = EOBRUN; /* only part of saved state we need */ + } + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; +} + + +/* + * MCU decoding for DC successive approximation refinement scan. + * Note: we assume such scans can be multi-component, although the spec + * is not very clear on the point. + */ + +METHODDEF(boolean) +decode_mcu_DC_refine (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int p1 = 1 << cinfo->Al; /* 1 in the bit position being coded */ + int blkn; + JBLOCKROW block; + BITREAD_STATE_VARS; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* Not worth the cycles to check insufficient_data here, + * since we will not change the data anyway if we read zeroes. + */ + + /* Load up working state */ + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + block = MCU_data[blkn]; + + /* Encoded data is simply the next bit of the two's-complement DC value */ + CHECK_BIT_BUFFER(br_state, 1, return FALSE); + if (GET_BITS(1)) + (*block)[0] |= p1; + /* Note: since we use |=, repeating the assignment later is safe */ + } + + /* Completed MCU, so update state */ + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; +} + + +/* + * MCU decoding for AC successive approximation refinement scan. + */ + +METHODDEF(boolean) +decode_mcu_AC_refine (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + register int s, k, r; + unsigned int EOBRUN; + int Se, p1, m1; + const int * natural_order; + JBLOCKROW block; + JCOEFPTR thiscoef; + BITREAD_STATE_VARS; + d_derived_tbl * tbl; + int num_newnz; + int newnz_pos[DCTSIZE2]; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* If we've run out of data, don't modify the MCU. + */ + if (! entropy->insufficient_data) { + + Se = cinfo->Se; + p1 = 1 << cinfo->Al; /* 1 in the bit position being coded */ + m1 = (-1) << cinfo->Al; /* -1 in the bit position being coded */ + natural_order = cinfo->natural_order; + + /* Load up working state */ + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + EOBRUN = entropy->saved.EOBRUN; /* only part of saved state we need */ + + /* There is always only one block per MCU */ + block = MCU_data[0]; + tbl = entropy->ac_derived_tbl; + + /* If we are forced to suspend, we must undo the assignments to any newly + * nonzero coefficients in the block, because otherwise we'd get confused + * next time about which coefficients were already nonzero. + * But we need not undo addition of bits to already-nonzero coefficients; + * instead, we can test the current bit to see if we already did it. + */ + num_newnz = 0; + + /* initialize coefficient loop counter to start of band */ + k = cinfo->Ss; + + if (EOBRUN == 0) { + for (; k <= Se; k++) { + HUFF_DECODE(s, br_state, tbl, goto undoit, label3); + r = s >> 4; + s &= 15; + if (s) { + if (s != 1) /* size of new coef should always be 1 */ + WARNMS(cinfo, JWRN_HUFF_BAD_CODE); + CHECK_BIT_BUFFER(br_state, 1, goto undoit); + if (GET_BITS(1)) + s = p1; /* newly nonzero coef is positive */ + else + s = m1; /* newly nonzero coef is negative */ + } else { + if (r != 15) { + EOBRUN = 1 << r; /* EOBr, run length is 2^r + appended bits */ + if (r) { + CHECK_BIT_BUFFER(br_state, r, goto undoit); + r = GET_BITS(r); + EOBRUN += r; + } + break; /* rest of block is handled by EOB logic */ + } + /* note s = 0 for processing ZRL */ + } + /* Advance over already-nonzero coefs and r still-zero coefs, + * appending correction bits to the nonzeroes. A correction bit is 1 + * if the absolute value of the coefficient must be increased. + */ + do { + thiscoef = *block + natural_order[k]; + if (*thiscoef != 0) { + CHECK_BIT_BUFFER(br_state, 1, goto undoit); + if (GET_BITS(1)) { + if ((*thiscoef & p1) == 0) { /* do nothing if already set it */ + if (*thiscoef >= 0) + *thiscoef += p1; + else + *thiscoef += m1; + } + } + } else { + if (--r < 0) + break; /* reached target zero coefficient */ + } + k++; + } while (k <= Se); + if (s) { + int pos = natural_order[k]; + /* Output newly nonzero coefficient */ + (*block)[pos] = (JCOEF) s; + /* Remember its position in case we have to suspend */ + newnz_pos[num_newnz++] = pos; + } + } + } + + if (EOBRUN > 0) { + /* Scan any remaining coefficient positions after the end-of-band + * (the last newly nonzero coefficient, if any). Append a correction + * bit to each already-nonzero coefficient. A correction bit is 1 + * if the absolute value of the coefficient must be increased. + */ + for (; k <= Se; k++) { + thiscoef = *block + natural_order[k]; + if (*thiscoef != 0) { + CHECK_BIT_BUFFER(br_state, 1, goto undoit); + if (GET_BITS(1)) { + if ((*thiscoef & p1) == 0) { /* do nothing if already changed it */ + if (*thiscoef >= 0) + *thiscoef += p1; + else + *thiscoef += m1; + } + } + } + } + /* Count one block completed in EOB run */ + EOBRUN--; + } + + /* Completed MCU, so update state */ + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + entropy->saved.EOBRUN = EOBRUN; /* only part of saved state we need */ + } + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; + +undoit: + /* Re-zero any output coefficients that we made newly nonzero */ + while (num_newnz > 0) + (*block)[newnz_pos[--num_newnz]] = 0; + + return FALSE; +} + + +/* + * Decode one MCU's worth of Huffman-compressed coefficients, + * partial blocks. + */ + +METHODDEF(boolean) +decode_mcu_sub (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + const int * natural_order; + int Se, blkn; + BITREAD_STATE_VARS; + savable_state state; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* If we've run out of data, just leave the MCU set to zeroes. + * This way, we return uniform gray for the remainder of the segment. + */ + if (! entropy->insufficient_data) { + + natural_order = cinfo->natural_order; + Se = cinfo->lim_Se; + + /* Load up working state */ + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(state, entropy->saved); + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + JBLOCKROW block = MCU_data[blkn]; + d_derived_tbl * htbl; + register int s, k, r; + int coef_limit, ci; + + /* Decode a single block's worth of coefficients */ + + /* Section F.2.2.1: decode the DC coefficient difference */ + htbl = entropy->dc_cur_tbls[blkn]; + HUFF_DECODE(s, br_state, htbl, return FALSE, label1); + + htbl = entropy->ac_cur_tbls[blkn]; + k = 1; + coef_limit = entropy->coef_limit[blkn]; + if (coef_limit) { + /* Convert DC difference to actual value, update last_dc_val */ + if (s) { + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + } + ci = cinfo->MCU_membership[blkn]; + s += state.last_dc_val[ci]; + state.last_dc_val[ci] = s; + /* Output the DC coefficient */ + (*block)[0] = (JCOEF) s; + + /* Section F.2.2.2: decode the AC coefficients */ + /* Since zeroes are skipped, output area must be cleared beforehand */ + for (; k < coef_limit; k++) { + HUFF_DECODE(s, br_state, htbl, return FALSE, label2); + + r = s >> 4; + s &= 15; + + if (s) { + k += r; + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + /* Output coefficient in natural (dezigzagged) order. + * Note: the extra entries in natural_order[] will save us + * if k > Se, which could happen if the data is corrupted. + */ + (*block)[natural_order[k]] = (JCOEF) s; + } else { + if (r != 15) + goto EndOfBlock; + k += 15; + } + } + } else { + if (s) { + CHECK_BIT_BUFFER(br_state, s, return FALSE); + DROP_BITS(s); + } + } + + /* Section F.2.2.2: decode the AC coefficients */ + /* In this path we just discard the values */ + for (; k <= Se; k++) { + HUFF_DECODE(s, br_state, htbl, return FALSE, label3); + + r = s >> 4; + s &= 15; + + if (s) { + k += r; + CHECK_BIT_BUFFER(br_state, s, return FALSE); + DROP_BITS(s); + } else { + if (r != 15) + break; + k += 15; + } + } + + EndOfBlock: ; + } + + /* Completed MCU, so update state */ + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(entropy->saved, state); + } + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; +} + + +/* + * Decode one MCU's worth of Huffman-compressed coefficients, + * full-size blocks. + */ + +METHODDEF(boolean) +decode_mcu (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int blkn; + BITREAD_STATE_VARS; + savable_state state; + + /* Process restart marker if needed; may have to suspend */ + if (cinfo->restart_interval) { + if (entropy->restarts_to_go == 0) + if (! process_restart(cinfo)) + return FALSE; + } + + /* If we've run out of data, just leave the MCU set to zeroes. + * This way, we return uniform gray for the remainder of the segment. + */ + if (! entropy->insufficient_data) { + + /* Load up working state */ + BITREAD_LOAD_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(state, entropy->saved); + + /* Outer loop handles each block in the MCU */ + + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + JBLOCKROW block = MCU_data[blkn]; + d_derived_tbl * htbl; + register int s, k, r; + int coef_limit, ci; + + /* Decode a single block's worth of coefficients */ + + /* Section F.2.2.1: decode the DC coefficient difference */ + htbl = entropy->dc_cur_tbls[blkn]; + HUFF_DECODE(s, br_state, htbl, return FALSE, label1); + + htbl = entropy->ac_cur_tbls[blkn]; + k = 1; + coef_limit = entropy->coef_limit[blkn]; + if (coef_limit) { + /* Convert DC difference to actual value, update last_dc_val */ + if (s) { + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + } + ci = cinfo->MCU_membership[blkn]; + s += state.last_dc_val[ci]; + state.last_dc_val[ci] = s; + /* Output the DC coefficient */ + (*block)[0] = (JCOEF) s; + + /* Section F.2.2.2: decode the AC coefficients */ + /* Since zeroes are skipped, output area must be cleared beforehand */ + for (; k < coef_limit; k++) { + HUFF_DECODE(s, br_state, htbl, return FALSE, label2); + + r = s >> 4; + s &= 15; + + if (s) { + k += r; + CHECK_BIT_BUFFER(br_state, s, return FALSE); + r = GET_BITS(s); + s = HUFF_EXTEND(r, s); + /* Output coefficient in natural (dezigzagged) order. + * Note: the extra entries in jpeg_natural_order[] will save us + * if k >= DCTSIZE2, which could happen if the data is corrupted. + */ + (*block)[jpeg_natural_order[k]] = (JCOEF) s; + } else { + if (r != 15) + goto EndOfBlock; + k += 15; + } + } + } else { + if (s) { + CHECK_BIT_BUFFER(br_state, s, return FALSE); + DROP_BITS(s); + } + } + + /* Section F.2.2.2: decode the AC coefficients */ + /* In this path we just discard the values */ + for (; k < DCTSIZE2; k++) { + HUFF_DECODE(s, br_state, htbl, return FALSE, label3); + + r = s >> 4; + s &= 15; + + if (s) { + k += r; + CHECK_BIT_BUFFER(br_state, s, return FALSE); + DROP_BITS(s); + } else { + if (r != 15) + break; + k += 15; + } + } + + EndOfBlock: ; + } + + /* Completed MCU, so update state */ + BITREAD_SAVE_STATE(cinfo,entropy->bitstate); + ASSIGN_STATE(entropy->saved, state); + } + + /* Account for restart interval (no-op if not using restarts) */ + entropy->restarts_to_go--; + + return TRUE; +} + + +/* + * Initialize for a Huffman-compressed scan. + */ + +METHODDEF(void) +start_pass_huff_decoder (j_decompress_ptr cinfo) +{ + huff_entropy_ptr entropy = (huff_entropy_ptr) cinfo->entropy; + int ci, blkn, tbl, i; + jpeg_component_info * compptr; + + if (cinfo->progressive_mode) { + /* Validate progressive scan parameters */ + if (cinfo->Ss == 0) { + if (cinfo->Se != 0) + goto bad; + } else { + /* need not check Ss/Se < 0 since they came from unsigned bytes */ + if (cinfo->Se < cinfo->Ss || cinfo->Se > cinfo->lim_Se) + goto bad; + /* AC scans may have only one component */ + if (cinfo->comps_in_scan != 1) + goto bad; + } + if (cinfo->Ah != 0) { + /* Successive approximation refinement scan: must have Al = Ah-1. */ + if (cinfo->Ah-1 != cinfo->Al) + goto bad; + } + if (cinfo->Al > 13) { /* need not check for < 0 */ + /* Arguably the maximum Al value should be less than 13 for 8-bit precision, + * but the spec doesn't say so, and we try to be liberal about what we + * accept. Note: large Al values could result in out-of-range DC + * coefficients during early scans, leading to bizarre displays due to + * overflows in the IDCT math. But we won't crash. + */ + bad: + ERREXIT4(cinfo, JERR_BAD_PROGRESSION, + cinfo->Ss, cinfo->Se, cinfo->Ah, cinfo->Al); + } + /* Update progression status, and verify that scan order is legal. + * Note that inter-scan inconsistencies are treated as warnings + * not fatal errors ... not clear if this is right way to behave. + */ + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + int coefi, cindex = cinfo->cur_comp_info[ci]->component_index; + int *coef_bit_ptr = & cinfo->coef_bits[cindex][0]; + if (cinfo->Ss && coef_bit_ptr[0] < 0) /* AC without prior DC scan */ + WARNMS2(cinfo, JWRN_BOGUS_PROGRESSION, cindex, 0); + for (coefi = cinfo->Ss; coefi <= cinfo->Se; coefi++) { + int expected = (coef_bit_ptr[coefi] < 0) ? 0 : coef_bit_ptr[coefi]; + if (cinfo->Ah != expected) + WARNMS2(cinfo, JWRN_BOGUS_PROGRESSION, cindex, coefi); + coef_bit_ptr[coefi] = cinfo->Al; + } + } + + /* Select MCU decoding routine */ + if (cinfo->Ah == 0) { + if (cinfo->Ss == 0) + entropy->pub.decode_mcu = decode_mcu_DC_first; + else + entropy->pub.decode_mcu = decode_mcu_AC_first; + } else { + if (cinfo->Ss == 0) + entropy->pub.decode_mcu = decode_mcu_DC_refine; + else + entropy->pub.decode_mcu = decode_mcu_AC_refine; + } + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* Make sure requested tables are present, and compute derived tables. + * We may build same derived table more than once, but it's not expensive. + */ + if (cinfo->Ss == 0) { + if (cinfo->Ah == 0) { /* DC refinement needs no table */ + tbl = compptr->dc_tbl_no; + jpeg_make_d_derived_tbl(cinfo, TRUE, tbl, + & entropy->derived_tbls[tbl]); + } + } else { + tbl = compptr->ac_tbl_no; + jpeg_make_d_derived_tbl(cinfo, FALSE, tbl, + & entropy->derived_tbls[tbl]); + /* remember the single active table */ + entropy->ac_derived_tbl = entropy->derived_tbls[tbl]; + } + /* Initialize DC predictions to 0 */ + entropy->saved.last_dc_val[ci] = 0; + } + + /* Initialize private state variables */ + entropy->saved.EOBRUN = 0; + } else { + /* Check that the scan parameters Ss, Se, Ah/Al are OK for sequential JPEG. + * This ought to be an error condition, but we make it a warning because + * there are some baseline files out there with all zeroes in these bytes. + */ + if (cinfo->Ss != 0 || cinfo->Ah != 0 || cinfo->Al != 0 || + ((cinfo->is_baseline || cinfo->Se < DCTSIZE2) && + cinfo->Se != cinfo->lim_Se)) + WARNMS(cinfo, JWRN_NOT_SEQUENTIAL); + + /* Select MCU decoding routine */ + /* We retain the hard-coded case for full-size blocks. + * This is not necessary, but it appears that this version is slightly + * more performant in the given implementation. + * With an improved implementation we would prefer a single optimized + * function. + */ + if (cinfo->lim_Se != DCTSIZE2-1) + entropy->pub.decode_mcu = decode_mcu_sub; + else + entropy->pub.decode_mcu = decode_mcu; + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* Compute derived values for Huffman tables */ + /* We may do this more than once for a table, but it's not expensive */ + tbl = compptr->dc_tbl_no; + jpeg_make_d_derived_tbl(cinfo, TRUE, tbl, + & entropy->dc_derived_tbls[tbl]); + if (cinfo->lim_Se) { /* AC needs no table when not present */ + tbl = compptr->ac_tbl_no; + jpeg_make_d_derived_tbl(cinfo, FALSE, tbl, + & entropy->ac_derived_tbls[tbl]); + } + /* Initialize DC predictions to 0 */ + entropy->saved.last_dc_val[ci] = 0; + } + + /* Precalculate decoding info for each block in an MCU of this scan */ + for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { + ci = cinfo->MCU_membership[blkn]; + compptr = cinfo->cur_comp_info[ci]; + /* Precalculate which table to use for each block */ + entropy->dc_cur_tbls[blkn] = entropy->dc_derived_tbls[compptr->dc_tbl_no]; + entropy->ac_cur_tbls[blkn] = entropy->ac_derived_tbls[compptr->ac_tbl_no]; + /* Decide whether we really care about the coefficient values */ + if (compptr->component_needed) { + ci = compptr->DCT_v_scaled_size; + i = compptr->DCT_h_scaled_size; + switch (cinfo->lim_Se) { + case (1*1-1): + entropy->coef_limit[blkn] = 1; + break; + case (2*2-1): + if (ci <= 0 || ci > 2) ci = 2; + if (i <= 0 || i > 2) i = 2; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order2[ci - 1][i - 1]; + break; + case (3*3-1): + if (ci <= 0 || ci > 3) ci = 3; + if (i <= 0 || i > 3) i = 3; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order3[ci - 1][i - 1]; + break; + case (4*4-1): + if (ci <= 0 || ci > 4) ci = 4; + if (i <= 0 || i > 4) i = 4; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order4[ci - 1][i - 1]; + break; + case (5*5-1): + if (ci <= 0 || ci > 5) ci = 5; + if (i <= 0 || i > 5) i = 5; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order5[ci - 1][i - 1]; + break; + case (6*6-1): + if (ci <= 0 || ci > 6) ci = 6; + if (i <= 0 || i > 6) i = 6; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order6[ci - 1][i - 1]; + break; + case (7*7-1): + if (ci <= 0 || ci > 7) ci = 7; + if (i <= 0 || i > 7) i = 7; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order7[ci - 1][i - 1]; + break; + default: + if (ci <= 0 || ci > 8) ci = 8; + if (i <= 0 || i > 8) i = 8; + entropy->coef_limit[blkn] = 1 + jpeg_zigzag_order[ci - 1][i - 1]; + break; + } + } else { + entropy->coef_limit[blkn] = 0; + } + } + } + + /* Initialize bitread state variables */ + entropy->bitstate.bits_left = 0; + entropy->bitstate.get_buffer = 0; /* unnecessary, but keeps Purify quiet */ + entropy->insufficient_data = FALSE; + + /* Initialize restart counter */ + entropy->restarts_to_go = cinfo->restart_interval; +} + + +/* + * Module initialization routine for Huffman entropy decoding. + */ + +GLOBAL(void) +jinit_huff_decoder (j_decompress_ptr cinfo) +{ + huff_entropy_ptr entropy; + int i; + + entropy = (huff_entropy_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(huff_entropy_decoder)); + cinfo->entropy = (struct jpeg_entropy_decoder *) entropy; + entropy->pub.start_pass = start_pass_huff_decoder; + + if (cinfo->progressive_mode) { + /* Create progression status table */ + int *coef_bit_ptr, ci; + cinfo->coef_bits = (int (*)[DCTSIZE2]) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->num_components*DCTSIZE2*SIZEOF(int)); + coef_bit_ptr = & cinfo->coef_bits[0][0]; + for (ci = 0; ci < cinfo->num_components; ci++) + for (i = 0; i < DCTSIZE2; i++) + *coef_bit_ptr++ = -1; + + /* Mark derived tables unallocated */ + for (i = 0; i < NUM_HUFF_TBLS; i++) { + entropy->derived_tbls[i] = NULL; + } + } else { + /* Mark tables unallocated */ + for (i = 0; i < NUM_HUFF_TBLS; i++) { + entropy->dc_derived_tbls[i] = entropy->ac_derived_tbls[i] = NULL; + } + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdinput.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdinput.c new file mode 100644 index 000000000..2c5c717b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdinput.c @@ -0,0 +1,661 @@ +/* + * jdinput.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2002-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains input control logic for the JPEG decompressor. + * These routines are concerned with controlling the decompressor's input + * processing (marker reading and coefficient decoding). The actual input + * reading is done in jdmarker.c, jdhuff.c, and jdarith.c. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private state */ + +typedef struct { + struct jpeg_input_controller pub; /* public fields */ + + int inheaders; /* Nonzero until first SOS is reached */ +} my_input_controller; + +typedef my_input_controller * my_inputctl_ptr; + + +/* Forward declarations */ +METHODDEF(int) consume_markers JPP((j_decompress_ptr cinfo)); + + +/* + * Routines to calculate various quantities related to the size of the image. + */ + + +/* + * Compute output image dimensions and related values. + * NOTE: this is exported for possible use by application. + * Hence it mustn't do anything that can't be done twice. + */ + +GLOBAL(void) +jpeg_core_output_dimensions (j_decompress_ptr cinfo) +/* Do computations that are needed before master selection phase. + * This function is used for transcoding and full decompression. + */ +{ +#ifdef IDCT_SCALING_SUPPORTED + int ci; + jpeg_component_info *compptr; + + /* Compute actual output image dimensions and DCT scaling choices. */ + if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom) { + /* Provide 1/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 1; + cinfo->min_DCT_v_scaled_size = 1; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 2) { + /* Provide 2/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 2L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 2L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 2; + cinfo->min_DCT_v_scaled_size = 2; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 3) { + /* Provide 3/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 3L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 3L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 3; + cinfo->min_DCT_v_scaled_size = 3; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 4) { + /* Provide 4/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 4L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 4L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 4; + cinfo->min_DCT_v_scaled_size = 4; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 5) { + /* Provide 5/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 5L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 5L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 5; + cinfo->min_DCT_v_scaled_size = 5; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 6) { + /* Provide 6/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 6L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 6L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 6; + cinfo->min_DCT_v_scaled_size = 6; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 7) { + /* Provide 7/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 7L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 7L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 7; + cinfo->min_DCT_v_scaled_size = 7; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 8) { + /* Provide 8/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 8L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 8L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 8; + cinfo->min_DCT_v_scaled_size = 8; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 9) { + /* Provide 9/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 9L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 9L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 9; + cinfo->min_DCT_v_scaled_size = 9; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 10) { + /* Provide 10/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 10L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 10L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 10; + cinfo->min_DCT_v_scaled_size = 10; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 11) { + /* Provide 11/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 11L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 11L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 11; + cinfo->min_DCT_v_scaled_size = 11; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 12) { + /* Provide 12/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 12L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 12L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 12; + cinfo->min_DCT_v_scaled_size = 12; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 13) { + /* Provide 13/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 13L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 13L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 13; + cinfo->min_DCT_v_scaled_size = 13; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 14) { + /* Provide 14/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 14L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 14L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 14; + cinfo->min_DCT_v_scaled_size = 14; + } else if (cinfo->scale_num * cinfo->block_size <= cinfo->scale_denom * 15) { + /* Provide 15/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 15L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 15L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 15; + cinfo->min_DCT_v_scaled_size = 15; + } else { + /* Provide 16/block_size scaling */ + cinfo->output_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * 16L, (long) cinfo->block_size); + cinfo->output_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * 16L, (long) cinfo->block_size); + cinfo->min_DCT_h_scaled_size = 16; + cinfo->min_DCT_v_scaled_size = 16; + } + + /* Recompute dimensions of components */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + compptr->DCT_h_scaled_size = cinfo->min_DCT_h_scaled_size; + compptr->DCT_v_scaled_size = cinfo->min_DCT_v_scaled_size; + } + +#else /* !IDCT_SCALING_SUPPORTED */ + + /* Hardwire it to "no scaling" */ + cinfo->output_width = cinfo->image_width; + cinfo->output_height = cinfo->image_height; + /* jdinput.c has already initialized DCT_scaled_size, + * and has computed unscaled downsampled_width and downsampled_height. + */ + +#endif /* IDCT_SCALING_SUPPORTED */ +} + + +LOCAL(void) +initial_setup (j_decompress_ptr cinfo) +/* Called once, when first SOS marker is reached */ +{ + int ci; + jpeg_component_info *compptr; + + /* Make sure image isn't bigger than I can handle */ + if ((long) cinfo->image_height > (long) JPEG_MAX_DIMENSION || + (long) cinfo->image_width > (long) JPEG_MAX_DIMENSION) + ERREXIT1(cinfo, JERR_IMAGE_TOO_BIG, (unsigned int) JPEG_MAX_DIMENSION); + + /* For now, precision must match compiled-in value... */ + if (cinfo->data_precision != BITS_IN_JSAMPLE) + ERREXIT1(cinfo, JERR_BAD_PRECISION, cinfo->data_precision); + + /* Check that number of components won't exceed internal array sizes */ + if (cinfo->num_components > MAX_COMPONENTS) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, + MAX_COMPONENTS); + + /* Compute maximum sampling factors; check factor validity */ + cinfo->max_h_samp_factor = 1; + cinfo->max_v_samp_factor = 1; + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + if (compptr->h_samp_factor<=0 || compptr->h_samp_factor>MAX_SAMP_FACTOR || + compptr->v_samp_factor<=0 || compptr->v_samp_factor>MAX_SAMP_FACTOR) + ERREXIT(cinfo, JERR_BAD_SAMPLING); + cinfo->max_h_samp_factor = MAX(cinfo->max_h_samp_factor, + compptr->h_samp_factor); + cinfo->max_v_samp_factor = MAX(cinfo->max_v_samp_factor, + compptr->v_samp_factor); + } + + /* Derive block_size, natural_order, and lim_Se */ + if (cinfo->is_baseline || (cinfo->progressive_mode && + cinfo->comps_in_scan)) { /* no pseudo SOS marker */ + cinfo->block_size = DCTSIZE; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + } else + switch (cinfo->Se) { + case (1*1-1): + cinfo->block_size = 1; + cinfo->natural_order = jpeg_natural_order; /* not needed */ + cinfo->lim_Se = cinfo->Se; + break; + case (2*2-1): + cinfo->block_size = 2; + cinfo->natural_order = jpeg_natural_order2; + cinfo->lim_Se = cinfo->Se; + break; + case (3*3-1): + cinfo->block_size = 3; + cinfo->natural_order = jpeg_natural_order3; + cinfo->lim_Se = cinfo->Se; + break; + case (4*4-1): + cinfo->block_size = 4; + cinfo->natural_order = jpeg_natural_order4; + cinfo->lim_Se = cinfo->Se; + break; + case (5*5-1): + cinfo->block_size = 5; + cinfo->natural_order = jpeg_natural_order5; + cinfo->lim_Se = cinfo->Se; + break; + case (6*6-1): + cinfo->block_size = 6; + cinfo->natural_order = jpeg_natural_order6; + cinfo->lim_Se = cinfo->Se; + break; + case (7*7-1): + cinfo->block_size = 7; + cinfo->natural_order = jpeg_natural_order7; + cinfo->lim_Se = cinfo->Se; + break; + case (8*8-1): + cinfo->block_size = 8; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (9*9-1): + cinfo->block_size = 9; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (10*10-1): + cinfo->block_size = 10; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (11*11-1): + cinfo->block_size = 11; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (12*12-1): + cinfo->block_size = 12; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (13*13-1): + cinfo->block_size = 13; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (14*14-1): + cinfo->block_size = 14; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (15*15-1): + cinfo->block_size = 15; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + case (16*16-1): + cinfo->block_size = 16; + cinfo->natural_order = jpeg_natural_order; + cinfo->lim_Se = DCTSIZE2-1; + break; + default: + ERREXIT4(cinfo, JERR_BAD_PROGRESSION, + cinfo->Ss, cinfo->Se, cinfo->Ah, cinfo->Al); + break; + } + + /* We initialize DCT_scaled_size and min_DCT_scaled_size to block_size. + * In the full decompressor, + * this will be overridden by jpeg_calc_output_dimensions in jdmaster.c; + * but in the transcoder, + * jpeg_calc_output_dimensions is not used, so we must do it here. + */ + cinfo->min_DCT_h_scaled_size = cinfo->block_size; + cinfo->min_DCT_v_scaled_size = cinfo->block_size; + + /* Compute dimensions of components */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + compptr->DCT_h_scaled_size = cinfo->block_size; + compptr->DCT_v_scaled_size = cinfo->block_size; + /* Size in DCT blocks */ + compptr->width_in_blocks = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * (long) compptr->h_samp_factor, + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + compptr->height_in_blocks = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * (long) compptr->v_samp_factor, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + /* downsampled_width and downsampled_height will also be overridden by + * jdmaster.c if we are doing full decompression. The transcoder library + * doesn't use these values, but the calling application might. + */ + /* Size in samples */ + compptr->downsampled_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * (long) compptr->h_samp_factor, + (long) cinfo->max_h_samp_factor); + compptr->downsampled_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * (long) compptr->v_samp_factor, + (long) cinfo->max_v_samp_factor); + /* Mark component needed, until color conversion says otherwise */ + compptr->component_needed = TRUE; + /* Mark no quantization table yet saved for component */ + compptr->quant_table = NULL; + } + + /* Compute number of fully interleaved MCU rows. */ + cinfo->total_iMCU_rows = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + + /* Decide whether file contains multiple scans */ + if (cinfo->comps_in_scan < cinfo->num_components || cinfo->progressive_mode) + cinfo->inputctl->has_multiple_scans = TRUE; + else + cinfo->inputctl->has_multiple_scans = FALSE; +} + + +LOCAL(void) +per_scan_setup (j_decompress_ptr cinfo) +/* Do computations that are needed before processing a JPEG scan */ +/* cinfo->comps_in_scan and cinfo->cur_comp_info[] were set from SOS marker */ +{ + int ci, mcublks, tmp; + jpeg_component_info *compptr; + + if (cinfo->comps_in_scan == 1) { + + /* Noninterleaved (single-component) scan */ + compptr = cinfo->cur_comp_info[0]; + + /* Overall image size in MCUs */ + cinfo->MCUs_per_row = compptr->width_in_blocks; + cinfo->MCU_rows_in_scan = compptr->height_in_blocks; + + /* For noninterleaved scan, always one block per MCU */ + compptr->MCU_width = 1; + compptr->MCU_height = 1; + compptr->MCU_blocks = 1; + compptr->MCU_sample_width = compptr->DCT_h_scaled_size; + compptr->last_col_width = 1; + /* For noninterleaved scans, it is convenient to define last_row_height + * as the number of block rows present in the last iMCU row. + */ + tmp = (int) (compptr->height_in_blocks % compptr->v_samp_factor); + if (tmp == 0) tmp = compptr->v_samp_factor; + compptr->last_row_height = tmp; + + /* Prepare array describing MCU composition */ + cinfo->blocks_in_MCU = 1; + cinfo->MCU_membership[0] = 0; + + } else { + + /* Interleaved (multi-component) scan */ + if (cinfo->comps_in_scan <= 0 || cinfo->comps_in_scan > MAX_COMPS_IN_SCAN) + ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->comps_in_scan, + MAX_COMPS_IN_SCAN); + + /* Overall image size in MCUs */ + cinfo->MCUs_per_row = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width, + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + cinfo->MCU_rows_in_scan = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height, + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + + cinfo->blocks_in_MCU = 0; + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* Sampling factors give # of blocks of component in each MCU */ + compptr->MCU_width = compptr->h_samp_factor; + compptr->MCU_height = compptr->v_samp_factor; + compptr->MCU_blocks = compptr->MCU_width * compptr->MCU_height; + compptr->MCU_sample_width = compptr->MCU_width * compptr->DCT_h_scaled_size; + /* Figure number of non-dummy blocks in last MCU column & row */ + tmp = (int) (compptr->width_in_blocks % compptr->MCU_width); + if (tmp == 0) tmp = compptr->MCU_width; + compptr->last_col_width = tmp; + tmp = (int) (compptr->height_in_blocks % compptr->MCU_height); + if (tmp == 0) tmp = compptr->MCU_height; + compptr->last_row_height = tmp; + /* Prepare array describing MCU composition */ + mcublks = compptr->MCU_blocks; + if (cinfo->blocks_in_MCU + mcublks > D_MAX_BLOCKS_IN_MCU) + ERREXIT(cinfo, JERR_BAD_MCU_SIZE); + while (mcublks-- > 0) { + cinfo->MCU_membership[cinfo->blocks_in_MCU++] = ci; + } + } + + } +} + + +/* + * Save away a copy of the Q-table referenced by each component present + * in the current scan, unless already saved during a prior scan. + * + * In a multiple-scan JPEG file, the encoder could assign different components + * the same Q-table slot number, but change table definitions between scans + * so that each component uses a different Q-table. (The IJG encoder is not + * currently capable of doing this, but other encoders might.) Since we want + * to be able to dequantize all the components at the end of the file, this + * means that we have to save away the table actually used for each component. + * We do this by copying the table at the start of the first scan containing + * the component. + * The JPEG spec prohibits the encoder from changing the contents of a Q-table + * slot between scans of a component using that slot. If the encoder does so + * anyway, this decoder will simply use the Q-table values that were current + * at the start of the first scan for the component. + * + * The decompressor output side looks only at the saved quant tables, + * not at the current Q-table slots. + */ + +LOCAL(void) +latch_quant_tables (j_decompress_ptr cinfo) +{ + int ci, qtblno; + jpeg_component_info *compptr; + JQUANT_TBL * qtbl; + + for (ci = 0; ci < cinfo->comps_in_scan; ci++) { + compptr = cinfo->cur_comp_info[ci]; + /* No work if we already saved Q-table for this component */ + if (compptr->quant_table != NULL) + continue; + /* Make sure specified quantization table is present */ + qtblno = compptr->quant_tbl_no; + if (qtblno < 0 || qtblno >= NUM_QUANT_TBLS || + cinfo->quant_tbl_ptrs[qtblno] == NULL) + ERREXIT1(cinfo, JERR_NO_QUANT_TABLE, qtblno); + /* OK, save away the quantization table */ + qtbl = (JQUANT_TBL *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(JQUANT_TBL)); + MEMCOPY(qtbl, cinfo->quant_tbl_ptrs[qtblno], SIZEOF(JQUANT_TBL)); + compptr->quant_table = qtbl; + } +} + + +/* + * Initialize the input modules to read a scan of compressed data. + * The first call to this is done by jdmaster.c after initializing + * the entire decompressor (during jpeg_start_decompress). + * Subsequent calls come from consume_markers, below. + */ + +METHODDEF(void) +start_input_pass (j_decompress_ptr cinfo) +{ + per_scan_setup(cinfo); + latch_quant_tables(cinfo); + (*cinfo->entropy->start_pass) (cinfo); + (*cinfo->coef->start_input_pass) (cinfo); + cinfo->inputctl->consume_input = cinfo->coef->consume_data; +} + + +/* + * Finish up after inputting a compressed-data scan. + * This is called by the coefficient controller after it's read all + * the expected data of the scan. + */ + +METHODDEF(void) +finish_input_pass (j_decompress_ptr cinfo) +{ + cinfo->inputctl->consume_input = consume_markers; +} + + +/* + * Read JPEG markers before, between, or after compressed-data scans. + * Change state as necessary when a new scan is reached. + * Return value is JPEG_SUSPENDED, JPEG_REACHED_SOS, or JPEG_REACHED_EOI. + * + * The consume_input method pointer points either here or to the + * coefficient controller's consume_data routine, depending on whether + * we are reading a compressed data segment or inter-segment markers. + * + * Note: This function should NOT return a pseudo SOS marker (with zero + * component number) to the caller. A pseudo marker received by + * read_markers is processed and then skipped for other markers. + */ + +METHODDEF(int) +consume_markers (j_decompress_ptr cinfo) +{ + my_inputctl_ptr inputctl = (my_inputctl_ptr) cinfo->inputctl; + int val; + + if (inputctl->pub.eoi_reached) /* After hitting EOI, read no further */ + return JPEG_REACHED_EOI; + + for (;;) { /* Loop to pass pseudo SOS marker */ + val = (*cinfo->marker->read_markers) (cinfo); + + switch (val) { + case JPEG_REACHED_SOS: /* Found SOS */ + if (inputctl->inheaders) { /* 1st SOS */ + if (inputctl->inheaders == 1) + initial_setup(cinfo); + if (cinfo->comps_in_scan == 0) { /* pseudo SOS marker */ + inputctl->inheaders = 2; + break; + } + inputctl->inheaders = 0; + /* Note: start_input_pass must be called by jdmaster.c + * before any more input can be consumed. jdapimin.c is + * responsible for enforcing this sequencing. + */ + } else { /* 2nd or later SOS marker */ + if (! inputctl->pub.has_multiple_scans) + ERREXIT(cinfo, JERR_EOI_EXPECTED); /* Oops, I wasn't expecting this! */ + if (cinfo->comps_in_scan == 0) /* unexpected pseudo SOS marker */ + break; + start_input_pass(cinfo); + } + return val; + case JPEG_REACHED_EOI: /* Found EOI */ + inputctl->pub.eoi_reached = TRUE; + if (inputctl->inheaders) { /* Tables-only datastream, apparently */ + if (cinfo->marker->saw_SOF) + ERREXIT(cinfo, JERR_SOF_NO_SOS); + } else { + /* Prevent infinite loop in coef ctlr's decompress_data routine + * if user set output_scan_number larger than number of scans. + */ + if (cinfo->output_scan_number > cinfo->input_scan_number) + cinfo->output_scan_number = cinfo->input_scan_number; + } + return val; + case JPEG_SUSPENDED: + return val; + default: + return val; + } + } +} + + +/* + * Reset state to begin a fresh datastream. + */ + +METHODDEF(void) +reset_input_controller (j_decompress_ptr cinfo) +{ + my_inputctl_ptr inputctl = (my_inputctl_ptr) cinfo->inputctl; + + inputctl->pub.consume_input = consume_markers; + inputctl->pub.has_multiple_scans = FALSE; /* "unknown" would be better */ + inputctl->pub.eoi_reached = FALSE; + inputctl->inheaders = 1; + /* Reset other modules */ + (*cinfo->err->reset_error_mgr) ((j_common_ptr) cinfo); + (*cinfo->marker->reset_marker_reader) (cinfo); + /* Reset progression state -- would be cleaner if entropy decoder did this */ + cinfo->coef_bits = NULL; +} + + +/* + * Initialize the input controller module. + * This is called only once, when the decompression object is created. + */ + +GLOBAL(void) +jinit_input_controller (j_decompress_ptr cinfo) +{ + my_inputctl_ptr inputctl; + + /* Create subobject in permanent pool */ + inputctl = (my_inputctl_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(my_input_controller)); + cinfo->inputctl = (struct jpeg_input_controller *) inputctl; + /* Initialize method pointers */ + inputctl->pub.consume_input = consume_markers; + inputctl->pub.reset_input_controller = reset_input_controller; + inputctl->pub.start_input_pass = start_input_pass; + inputctl->pub.finish_input_pass = finish_input_pass; + /* Initialize state: can't use reset_input_controller since we don't + * want to try to reset other modules yet. + */ + inputctl->pub.has_multiple_scans = FALSE; /* "unknown" would be better */ + inputctl->pub.eoi_reached = FALSE; + inputctl->inheaders = 1; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmainct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmainct.c new file mode 100644 index 000000000..02723ca73 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmainct.c @@ -0,0 +1,512 @@ +/* + * jdmainct.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the main buffer controller for decompression. + * The main buffer lies between the JPEG decompressor proper and the + * post-processor; it holds downsampled data in the JPEG colorspace. + * + * Note that this code is bypassed in raw-data mode, since the application + * supplies the equivalent of the main buffer in that case. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * In the current system design, the main buffer need never be a full-image + * buffer; any full-height buffers will be found inside the coefficient or + * postprocessing controllers. Nonetheless, the main controller is not + * trivial. Its responsibility is to provide context rows for upsampling/ + * rescaling, and doing this in an efficient fashion is a bit tricky. + * + * Postprocessor input data is counted in "row groups". A row group + * is defined to be (v_samp_factor * DCT_scaled_size / min_DCT_scaled_size) + * sample rows of each component. (We require DCT_scaled_size values to be + * chosen such that these numbers are integers. In practice DCT_scaled_size + * values will likely be powers of two, so we actually have the stronger + * condition that DCT_scaled_size / min_DCT_scaled_size is an integer.) + * Upsampling will typically produce max_v_samp_factor pixel rows from each + * row group (times any additional scale factor that the upsampler is + * applying). + * + * The coefficient controller will deliver data to us one iMCU row at a time; + * each iMCU row contains v_samp_factor * DCT_scaled_size sample rows, or + * exactly min_DCT_scaled_size row groups. (This amount of data corresponds + * to one row of MCUs when the image is fully interleaved.) Note that the + * number of sample rows varies across components, but the number of row + * groups does not. Some garbage sample rows may be included in the last iMCU + * row at the bottom of the image. + * + * Depending on the vertical scaling algorithm used, the upsampler may need + * access to the sample row(s) above and below its current input row group. + * The upsampler is required to set need_context_rows TRUE at global selection + * time if so. When need_context_rows is FALSE, this controller can simply + * obtain one iMCU row at a time from the coefficient controller and dole it + * out as row groups to the postprocessor. + * + * When need_context_rows is TRUE, this controller guarantees that the buffer + * passed to postprocessing contains at least one row group's worth of samples + * above and below the row group(s) being processed. Note that the context + * rows "above" the first passed row group appear at negative row offsets in + * the passed buffer. At the top and bottom of the image, the required + * context rows are manufactured by duplicating the first or last real sample + * row; this avoids having special cases in the upsampling inner loops. + * + * The amount of context is fixed at one row group just because that's a + * convenient number for this controller to work with. The existing + * upsamplers really only need one sample row of context. An upsampler + * supporting arbitrary output rescaling might wish for more than one row + * group of context when shrinking the image; tough, we don't handle that. + * (This is justified by the assumption that downsizing will be handled mostly + * by adjusting the DCT_scaled_size values, so that the actual scale factor at + * the upsample step needn't be much less than one.) + * + * To provide the desired context, we have to retain the last two row groups + * of one iMCU row while reading in the next iMCU row. (The last row group + * can't be processed until we have another row group for its below-context, + * and so we have to save the next-to-last group too for its above-context.) + * We could do this most simply by copying data around in our buffer, but + * that'd be very slow. We can avoid copying any data by creating a rather + * strange pointer structure. Here's how it works. We allocate a workspace + * consisting of M+2 row groups (where M = min_DCT_scaled_size is the number + * of row groups per iMCU row). We create two sets of redundant pointers to + * the workspace. Labeling the physical row groups 0 to M+1, the synthesized + * pointer lists look like this: + * M+1 M-1 + * master pointer --> 0 master pointer --> 0 + * 1 1 + * ... ... + * M-3 M-3 + * M-2 M + * M-1 M+1 + * M M-2 + * M+1 M-1 + * 0 0 + * We read alternate iMCU rows using each master pointer; thus the last two + * row groups of the previous iMCU row remain un-overwritten in the workspace. + * The pointer lists are set up so that the required context rows appear to + * be adjacent to the proper places when we pass the pointer lists to the + * upsampler. + * + * The above pictures describe the normal state of the pointer lists. + * At top and bottom of the image, we diddle the pointer lists to duplicate + * the first or last sample row as necessary (this is cheaper than copying + * sample rows around). + * + * This scheme breaks down if M < 2, ie, min_DCT_scaled_size is 1. In that + * situation each iMCU row provides only one row group so the buffering logic + * must be different (eg, we must read two iMCU rows before we can emit the + * first row group). For now, we simply do not support providing context + * rows when min_DCT_scaled_size is 1. That combination seems unlikely to + * be worth providing --- if someone wants a 1/8th-size preview, they probably + * want it quick and dirty, so a context-free upsampler is sufficient. + */ + + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_d_main_controller pub; /* public fields */ + + /* Pointer to allocated workspace (M or M+2 row groups). */ + JSAMPARRAY buffer[MAX_COMPONENTS]; + + boolean buffer_full; /* Have we gotten an iMCU row from decoder? */ + JDIMENSION rowgroup_ctr; /* counts row groups output to postprocessor */ + + /* Remaining fields are only used in the context case. */ + + /* These are the master pointers to the funny-order pointer lists. */ + JSAMPIMAGE xbuffer[2]; /* pointers to weird pointer lists */ + + int whichptr; /* indicates which pointer set is now in use */ + int context_state; /* process_data state machine status */ + JDIMENSION rowgroups_avail; /* row groups available to postprocessor */ + JDIMENSION iMCU_row_ctr; /* counts iMCU rows to detect image top/bot */ +} my_main_controller; + +typedef my_main_controller * my_main_ptr; + +/* context_state values: */ +#define CTX_PREPARE_FOR_IMCU 0 /* need to prepare for MCU row */ +#define CTX_PROCESS_IMCU 1 /* feeding iMCU to postprocessor */ +#define CTX_POSTPONED_ROW 2 /* feeding postponed row group */ + + +/* Forward declarations */ +METHODDEF(void) process_data_simple_main + JPP((j_decompress_ptr cinfo, JSAMPARRAY output_buf, + JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)); +METHODDEF(void) process_data_context_main + JPP((j_decompress_ptr cinfo, JSAMPARRAY output_buf, + JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)); +#ifdef QUANT_2PASS_SUPPORTED +METHODDEF(void) process_data_crank_post + JPP((j_decompress_ptr cinfo, JSAMPARRAY output_buf, + JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)); +#endif + + +LOCAL(void) +alloc_funny_pointers (j_decompress_ptr cinfo) +/* Allocate space for the funny pointer lists. + * This is done only once, not once per pass. + */ +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + int ci, rgroup; + int M = cinfo->min_DCT_v_scaled_size; + jpeg_component_info *compptr; + JSAMPARRAY xbuf; + + /* Get top-level space for component array pointers. + * We alloc both arrays with one call to save a few cycles. + */ + main->xbuffer[0] = (JSAMPIMAGE) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->num_components * 2 * SIZEOF(JSAMPARRAY)); + main->xbuffer[1] = main->xbuffer[0] + cinfo->num_components; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + rgroup = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; /* height of a row group of component */ + /* Get space for pointer lists --- M+4 row groups in each list. + * We alloc both pointer lists with one call to save a few cycles. + */ + xbuf = (JSAMPARRAY) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + 2 * (rgroup * (M + 4)) * SIZEOF(JSAMPROW)); + xbuf += rgroup; /* want one row group at negative offsets */ + main->xbuffer[0][ci] = xbuf; + xbuf += rgroup * (M + 4); + main->xbuffer[1][ci] = xbuf; + } +} + + +LOCAL(void) +make_funny_pointers (j_decompress_ptr cinfo) +/* Create the funny pointer lists discussed in the comments above. + * The actual workspace is already allocated (in main->buffer), + * and the space for the pointer lists is allocated too. + * This routine just fills in the curiously ordered lists. + * This will be repeated at the beginning of each pass. + */ +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + int ci, i, rgroup; + int M = cinfo->min_DCT_v_scaled_size; + jpeg_component_info *compptr; + JSAMPARRAY buf, xbuf0, xbuf1; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + rgroup = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; /* height of a row group of component */ + xbuf0 = main->xbuffer[0][ci]; + xbuf1 = main->xbuffer[1][ci]; + /* First copy the workspace pointers as-is */ + buf = main->buffer[ci]; + for (i = 0; i < rgroup * (M + 2); i++) { + xbuf0[i] = xbuf1[i] = buf[i]; + } + /* In the second list, put the last four row groups in swapped order */ + for (i = 0; i < rgroup * 2; i++) { + xbuf1[rgroup*(M-2) + i] = buf[rgroup*M + i]; + xbuf1[rgroup*M + i] = buf[rgroup*(M-2) + i]; + } + /* The wraparound pointers at top and bottom will be filled later + * (see set_wraparound_pointers, below). Initially we want the "above" + * pointers to duplicate the first actual data line. This only needs + * to happen in xbuffer[0]. + */ + for (i = 0; i < rgroup; i++) { + xbuf0[i - rgroup] = xbuf0[0]; + } + } +} + + +LOCAL(void) +set_wraparound_pointers (j_decompress_ptr cinfo) +/* Set up the "wraparound" pointers at top and bottom of the pointer lists. + * This changes the pointer list state from top-of-image to the normal state. + */ +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + int ci, i, rgroup; + int M = cinfo->min_DCT_v_scaled_size; + jpeg_component_info *compptr; + JSAMPARRAY xbuf0, xbuf1; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + rgroup = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; /* height of a row group of component */ + xbuf0 = main->xbuffer[0][ci]; + xbuf1 = main->xbuffer[1][ci]; + for (i = 0; i < rgroup; i++) { + xbuf0[i - rgroup] = xbuf0[rgroup*(M+1) + i]; + xbuf1[i - rgroup] = xbuf1[rgroup*(M+1) + i]; + xbuf0[rgroup*(M+2) + i] = xbuf0[i]; + xbuf1[rgroup*(M+2) + i] = xbuf1[i]; + } + } +} + + +LOCAL(void) +set_bottom_pointers (j_decompress_ptr cinfo) +/* Change the pointer lists to duplicate the last sample row at the bottom + * of the image. whichptr indicates which xbuffer holds the final iMCU row. + * Also sets rowgroups_avail to indicate number of nondummy row groups in row. + */ +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + int ci, i, rgroup, iMCUheight, rows_left; + jpeg_component_info *compptr; + JSAMPARRAY xbuf; + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Count sample rows in one iMCU row and in one row group */ + iMCUheight = compptr->v_samp_factor * compptr->DCT_v_scaled_size; + rgroup = iMCUheight / cinfo->min_DCT_v_scaled_size; + /* Count nondummy sample rows remaining for this component */ + rows_left = (int) (compptr->downsampled_height % (JDIMENSION) iMCUheight); + if (rows_left == 0) rows_left = iMCUheight; + /* Count nondummy row groups. Should get same answer for each component, + * so we need only do it once. + */ + if (ci == 0) { + main->rowgroups_avail = (JDIMENSION) ((rows_left-1) / rgroup + 1); + } + /* Duplicate the last real sample row rgroup*2 times; this pads out the + * last partial rowgroup and ensures at least one full rowgroup of context. + */ + xbuf = main->xbuffer[main->whichptr][ci]; + for (i = 0; i < rgroup * 2; i++) { + xbuf[rows_left + i] = xbuf[rows_left-1]; + } + } +} + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_main (j_decompress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + + switch (pass_mode) { + case JBUF_PASS_THRU: + if (cinfo->upsample->need_context_rows) { + main->pub.process_data = process_data_context_main; + make_funny_pointers(cinfo); /* Create the xbuffer[] lists */ + main->whichptr = 0; /* Read first iMCU row into xbuffer[0] */ + main->context_state = CTX_PREPARE_FOR_IMCU; + main->iMCU_row_ctr = 0; + } else { + /* Simple case with no context needed */ + main->pub.process_data = process_data_simple_main; + } + main->buffer_full = FALSE; /* Mark buffer empty */ + main->rowgroup_ctr = 0; + break; +#ifdef QUANT_2PASS_SUPPORTED + case JBUF_CRANK_DEST: + /* For last pass of 2-pass quantization, just crank the postprocessor */ + main->pub.process_data = process_data_crank_post; + break; +#endif + default: + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + break; + } +} + + +/* + * Process some data. + * This handles the simple case where no context is required. + */ + +METHODDEF(void) +process_data_simple_main (j_decompress_ptr cinfo, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + JDIMENSION rowgroups_avail; + + /* Read input data if we haven't filled the main buffer yet */ + if (! main->buffer_full) { + if (! (*cinfo->coef->decompress_data) (cinfo, main->buffer)) + return; /* suspension forced, can do nothing more */ + main->buffer_full = TRUE; /* OK, we have an iMCU row to work with */ + } + + /* There are always min_DCT_scaled_size row groups in an iMCU row. */ + rowgroups_avail = (JDIMENSION) cinfo->min_DCT_v_scaled_size; + /* Note: at the bottom of the image, we may pass extra garbage row groups + * to the postprocessor. The postprocessor has to check for bottom + * of image anyway (at row resolution), so no point in us doing it too. + */ + + /* Feed the postprocessor */ + (*cinfo->post->post_process_data) (cinfo, main->buffer, + &main->rowgroup_ctr, rowgroups_avail, + output_buf, out_row_ctr, out_rows_avail); + + /* Has postprocessor consumed all the data yet? If so, mark buffer empty */ + if (main->rowgroup_ctr >= rowgroups_avail) { + main->buffer_full = FALSE; + main->rowgroup_ctr = 0; + } +} + + +/* + * Process some data. + * This handles the case where context rows must be provided. + */ + +METHODDEF(void) +process_data_context_main (j_decompress_ptr cinfo, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_main_ptr main = (my_main_ptr) cinfo->main; + + /* Read input data if we haven't filled the main buffer yet */ + if (! main->buffer_full) { + if (! (*cinfo->coef->decompress_data) (cinfo, + main->xbuffer[main->whichptr])) + return; /* suspension forced, can do nothing more */ + main->buffer_full = TRUE; /* OK, we have an iMCU row to work with */ + main->iMCU_row_ctr++; /* count rows received */ + } + + /* Postprocessor typically will not swallow all the input data it is handed + * in one call (due to filling the output buffer first). Must be prepared + * to exit and restart. This switch lets us keep track of how far we got. + * Note that each case falls through to the next on successful completion. + */ + switch (main->context_state) { + case CTX_POSTPONED_ROW: + /* Call postprocessor using previously set pointers for postponed row */ + (*cinfo->post->post_process_data) (cinfo, main->xbuffer[main->whichptr], + &main->rowgroup_ctr, main->rowgroups_avail, + output_buf, out_row_ctr, out_rows_avail); + if (main->rowgroup_ctr < main->rowgroups_avail) + return; /* Need to suspend */ + main->context_state = CTX_PREPARE_FOR_IMCU; + if (*out_row_ctr >= out_rows_avail) + return; /* Postprocessor exactly filled output buf */ + /*FALLTHROUGH*/ + case CTX_PREPARE_FOR_IMCU: + /* Prepare to process first M-1 row groups of this iMCU row */ + main->rowgroup_ctr = 0; + main->rowgroups_avail = (JDIMENSION) (cinfo->min_DCT_v_scaled_size - 1); + /* Check for bottom of image: if so, tweak pointers to "duplicate" + * the last sample row, and adjust rowgroups_avail to ignore padding rows. + */ + if (main->iMCU_row_ctr == cinfo->total_iMCU_rows) + set_bottom_pointers(cinfo); + main->context_state = CTX_PROCESS_IMCU; + /*FALLTHROUGH*/ + case CTX_PROCESS_IMCU: + /* Call postprocessor using previously set pointers */ + (*cinfo->post->post_process_data) (cinfo, main->xbuffer[main->whichptr], + &main->rowgroup_ctr, main->rowgroups_avail, + output_buf, out_row_ctr, out_rows_avail); + if (main->rowgroup_ctr < main->rowgroups_avail) + return; /* Need to suspend */ + /* After the first iMCU, change wraparound pointers to normal state */ + if (main->iMCU_row_ctr == 1) + set_wraparound_pointers(cinfo); + /* Prepare to load new iMCU row using other xbuffer list */ + main->whichptr ^= 1; /* 0=>1 or 1=>0 */ + main->buffer_full = FALSE; + /* Still need to process last row group of this iMCU row, */ + /* which is saved at index M+1 of the other xbuffer */ + main->rowgroup_ctr = (JDIMENSION) (cinfo->min_DCT_v_scaled_size + 1); + main->rowgroups_avail = (JDIMENSION) (cinfo->min_DCT_v_scaled_size + 2); + main->context_state = CTX_POSTPONED_ROW; + } +} + + +/* + * Process some data. + * Final pass of two-pass quantization: just call the postprocessor. + * Source data will be the postprocessor controller's internal buffer. + */ + +#ifdef QUANT_2PASS_SUPPORTED + +METHODDEF(void) +process_data_crank_post (j_decompress_ptr cinfo, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + (*cinfo->post->post_process_data) (cinfo, (JSAMPIMAGE) NULL, + (JDIMENSION *) NULL, (JDIMENSION) 0, + output_buf, out_row_ctr, out_rows_avail); +} + +#endif /* QUANT_2PASS_SUPPORTED */ + + +/* + * Initialize main buffer controller. + */ + +GLOBAL(void) +jinit_d_main_controller (j_decompress_ptr cinfo, boolean need_full_buffer) +{ + my_main_ptr main; + int ci, rgroup, ngroups; + jpeg_component_info *compptr; + + main = (my_main_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_main_controller)); + cinfo->main = (struct jpeg_d_main_controller *) main; + main->pub.start_pass = start_pass_main; + + if (need_full_buffer) /* shouldn't happen */ + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + + /* Allocate the workspace. + * ngroups is the number of row groups we need. + */ + if (cinfo->upsample->need_context_rows) { + if (cinfo->min_DCT_v_scaled_size < 2) /* unsupported, see comments above */ + ERREXIT(cinfo, JERR_NOTIMPL); + alloc_funny_pointers(cinfo); /* Alloc space for xbuffer[] lists */ + ngroups = cinfo->min_DCT_v_scaled_size + 2; + } else { + ngroups = cinfo->min_DCT_v_scaled_size; + } + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + rgroup = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; /* height of a row group of component */ + main->buffer[ci] = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + compptr->width_in_blocks * compptr->DCT_h_scaled_size, + (JDIMENSION) (rgroup * ngroups)); + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmarker.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmarker.c new file mode 100644 index 000000000..f2a9cc429 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmarker.c @@ -0,0 +1,1406 @@ +/* + * jdmarker.c + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * Modified 2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains routines to decode JPEG datastream markers. + * Most of the complexity arises from our desire to support input + * suspension: if not all of the data for a marker is available, + * we must exit back to the application. On resumption, we reprocess + * the marker. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +typedef enum { /* JPEG marker codes */ + M_SOF0 = 0xc0, + M_SOF1 = 0xc1, + M_SOF2 = 0xc2, + M_SOF3 = 0xc3, + + M_SOF5 = 0xc5, + M_SOF6 = 0xc6, + M_SOF7 = 0xc7, + + M_JPG = 0xc8, + M_SOF9 = 0xc9, + M_SOF10 = 0xca, + M_SOF11 = 0xcb, + + M_SOF13 = 0xcd, + M_SOF14 = 0xce, + M_SOF15 = 0xcf, + + M_DHT = 0xc4, + + M_DAC = 0xcc, + + M_RST0 = 0xd0, + M_RST1 = 0xd1, + M_RST2 = 0xd2, + M_RST3 = 0xd3, + M_RST4 = 0xd4, + M_RST5 = 0xd5, + M_RST6 = 0xd6, + M_RST7 = 0xd7, + + M_SOI = 0xd8, + M_EOI = 0xd9, + M_SOS = 0xda, + M_DQT = 0xdb, + M_DNL = 0xdc, + M_DRI = 0xdd, + M_DHP = 0xde, + M_EXP = 0xdf, + + M_APP0 = 0xe0, + M_APP1 = 0xe1, + M_APP2 = 0xe2, + M_APP3 = 0xe3, + M_APP4 = 0xe4, + M_APP5 = 0xe5, + M_APP6 = 0xe6, + M_APP7 = 0xe7, + M_APP8 = 0xe8, + M_APP9 = 0xe9, + M_APP10 = 0xea, + M_APP11 = 0xeb, + M_APP12 = 0xec, + M_APP13 = 0xed, + M_APP14 = 0xee, + M_APP15 = 0xef, + + M_JPG0 = 0xf0, + M_JPG13 = 0xfd, + M_COM = 0xfe, + + M_TEM = 0x01, + + M_ERROR = 0x100 +} JPEG_MARKER; + + +/* Private state */ + +typedef struct { + struct jpeg_marker_reader pub; /* public fields */ + + /* Application-overridable marker processing methods */ + jpeg_marker_parser_method process_COM; + jpeg_marker_parser_method process_APPn[16]; + + /* Limit on marker data length to save for each marker type */ + unsigned int length_limit_COM; + unsigned int length_limit_APPn[16]; + + /* Status of COM/APPn marker saving */ + jpeg_saved_marker_ptr cur_marker; /* NULL if not processing a marker */ + unsigned int bytes_read; /* data bytes read so far in marker */ + /* Note: cur_marker is not linked into marker_list until it's all read. */ +} my_marker_reader; + +typedef my_marker_reader * my_marker_ptr; + + +/* + * Macros for fetching data from the data source module. + * + * At all times, cinfo->src->next_input_byte and ->bytes_in_buffer reflect + * the current restart point; we update them only when we have reached a + * suitable place to restart if a suspension occurs. + */ + +/* Declare and initialize local copies of input pointer/count */ +#define INPUT_VARS(cinfo) \ + struct jpeg_source_mgr * datasrc = (cinfo)->src; \ + const JOCTET * next_input_byte = datasrc->next_input_byte; \ + size_t bytes_in_buffer = datasrc->bytes_in_buffer + +/* Unload the local copies --- do this only at a restart boundary */ +#define INPUT_SYNC(cinfo) \ + ( datasrc->next_input_byte = next_input_byte, \ + datasrc->bytes_in_buffer = bytes_in_buffer ) + +/* Reload the local copies --- used only in MAKE_BYTE_AVAIL */ +#define INPUT_RELOAD(cinfo) \ + ( next_input_byte = datasrc->next_input_byte, \ + bytes_in_buffer = datasrc->bytes_in_buffer ) + +/* Internal macro for INPUT_BYTE and INPUT_2BYTES: make a byte available. + * Note we do *not* do INPUT_SYNC before calling fill_input_buffer, + * but we must reload the local copies after a successful fill. + */ +#define MAKE_BYTE_AVAIL(cinfo,action) \ + if (bytes_in_buffer == 0) { \ + if (! (*datasrc->fill_input_buffer) (cinfo)) \ + { action; } \ + INPUT_RELOAD(cinfo); \ + } + +/* Read a byte into variable V. + * If must suspend, take the specified action (typically "return FALSE"). + */ +#define INPUT_BYTE(cinfo,V,action) \ + MAKESTMT( MAKE_BYTE_AVAIL(cinfo,action); \ + bytes_in_buffer--; \ + V = GETJOCTET(*next_input_byte++); ) + +/* As above, but read two bytes interpreted as an unsigned 16-bit integer. + * V should be declared unsigned int or perhaps INT32. + */ +#define INPUT_2BYTES(cinfo,V,action) \ + MAKESTMT( MAKE_BYTE_AVAIL(cinfo,action); \ + bytes_in_buffer--; \ + V = ((unsigned int) GETJOCTET(*next_input_byte++)) << 8; \ + MAKE_BYTE_AVAIL(cinfo,action); \ + bytes_in_buffer--; \ + V += GETJOCTET(*next_input_byte++); ) + + +/* + * Routines to process JPEG markers. + * + * Entry condition: JPEG marker itself has been read and its code saved + * in cinfo->unread_marker; input restart point is just after the marker. + * + * Exit: if return TRUE, have read and processed any parameters, and have + * updated the restart point to point after the parameters. + * If return FALSE, was forced to suspend before reaching end of + * marker parameters; restart point has not been moved. Same routine + * will be called again after application supplies more input data. + * + * This approach to suspension assumes that all of a marker's parameters + * can fit into a single input bufferload. This should hold for "normal" + * markers. Some COM/APPn markers might have large parameter segments + * that might not fit. If we are simply dropping such a marker, we use + * skip_input_data to get past it, and thereby put the problem on the + * source manager's shoulders. If we are saving the marker's contents + * into memory, we use a slightly different convention: when forced to + * suspend, the marker processor updates the restart point to the end of + * what it's consumed (ie, the end of the buffer) before returning FALSE. + * On resumption, cinfo->unread_marker still contains the marker code, + * but the data source will point to the next chunk of marker data. + * The marker processor must retain internal state to deal with this. + * + * Note that we don't bother to avoid duplicate trace messages if a + * suspension occurs within marker parameters. Other side effects + * require more care. + */ + + +LOCAL(boolean) +get_soi (j_decompress_ptr cinfo) +/* Process an SOI marker */ +{ + int i; + + TRACEMS(cinfo, 1, JTRC_SOI); + + if (cinfo->marker->saw_SOI) + ERREXIT(cinfo, JERR_SOI_DUPLICATE); + + /* Reset all parameters that are defined to be reset by SOI */ + + for (i = 0; i < NUM_ARITH_TBLS; i++) { + cinfo->arith_dc_L[i] = 0; + cinfo->arith_dc_U[i] = 1; + cinfo->arith_ac_K[i] = 5; + } + cinfo->restart_interval = 0; + + /* Set initial assumptions for colorspace etc */ + + cinfo->jpeg_color_space = JCS_UNKNOWN; + cinfo->CCIR601_sampling = FALSE; /* Assume non-CCIR sampling??? */ + + cinfo->saw_JFIF_marker = FALSE; + cinfo->JFIF_major_version = 1; /* set default JFIF APP0 values */ + cinfo->JFIF_minor_version = 1; + cinfo->density_unit = 0; + cinfo->X_density = 1; + cinfo->Y_density = 1; + cinfo->saw_Adobe_marker = FALSE; + cinfo->Adobe_transform = 0; + + cinfo->marker->saw_SOI = TRUE; + + return TRUE; +} + + +LOCAL(boolean) +get_sof (j_decompress_ptr cinfo, boolean is_baseline, boolean is_prog, + boolean is_arith) +/* Process a SOFn marker */ +{ + INT32 length; + int c, ci; + jpeg_component_info * compptr; + INPUT_VARS(cinfo); + + cinfo->is_baseline = is_baseline; + cinfo->progressive_mode = is_prog; + cinfo->arith_code = is_arith; + + INPUT_2BYTES(cinfo, length, return FALSE); + + INPUT_BYTE(cinfo, cinfo->data_precision, return FALSE); + INPUT_2BYTES(cinfo, cinfo->image_height, return FALSE); + INPUT_2BYTES(cinfo, cinfo->image_width, return FALSE); + INPUT_BYTE(cinfo, cinfo->num_components, return FALSE); + + length -= 8; + + TRACEMS4(cinfo, 1, JTRC_SOF, cinfo->unread_marker, + (int) cinfo->image_width, (int) cinfo->image_height, + cinfo->num_components); + + if (cinfo->marker->saw_SOF) + ERREXIT(cinfo, JERR_SOF_DUPLICATE); + + /* We don't support files in which the image height is initially specified */ + /* as 0 and is later redefined by DNL. As long as we have to check that, */ + /* might as well have a general sanity check. */ + if (cinfo->image_height <= 0 || cinfo->image_width <= 0 + || cinfo->num_components <= 0) + ERREXIT(cinfo, JERR_EMPTY_IMAGE); + + if (length != (cinfo->num_components * 3)) + ERREXIT(cinfo, JERR_BAD_LENGTH); + + if (cinfo->comp_info == NULL) /* do only once, even if suspend */ + cinfo->comp_info = (jpeg_component_info *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->num_components * SIZEOF(jpeg_component_info)); + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + compptr->component_index = ci; + INPUT_BYTE(cinfo, compptr->component_id, return FALSE); + INPUT_BYTE(cinfo, c, return FALSE); + compptr->h_samp_factor = (c >> 4) & 15; + compptr->v_samp_factor = (c ) & 15; + INPUT_BYTE(cinfo, compptr->quant_tbl_no, return FALSE); + + TRACEMS4(cinfo, 1, JTRC_SOF_COMPONENT, + compptr->component_id, compptr->h_samp_factor, + compptr->v_samp_factor, compptr->quant_tbl_no); + } + + cinfo->marker->saw_SOF = TRUE; + + INPUT_SYNC(cinfo); + return TRUE; +} + + +LOCAL(boolean) +get_sos (j_decompress_ptr cinfo) +/* Process a SOS marker */ +{ + INT32 length; + int i, ci, n, c, cc; + jpeg_component_info * compptr; + INPUT_VARS(cinfo); + + if (! cinfo->marker->saw_SOF) + ERREXIT(cinfo, JERR_SOS_NO_SOF); + + INPUT_2BYTES(cinfo, length, return FALSE); + + INPUT_BYTE(cinfo, n, return FALSE); /* Number of components */ + + TRACEMS1(cinfo, 1, JTRC_SOS, n); + + if (length != (n * 2 + 6) || n > MAX_COMPS_IN_SCAN || + (n == 0 && !cinfo->progressive_mode)) + /* pseudo SOS marker only allowed in progressive mode */ + ERREXIT(cinfo, JERR_BAD_LENGTH); + + cinfo->comps_in_scan = n; + + /* Collect the component-spec parameters */ + + for (i = 0; i < n; i++) { + INPUT_BYTE(cinfo, cc, return FALSE); + INPUT_BYTE(cinfo, c, return FALSE); + + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + if (cc == compptr->component_id) + goto id_found; + } + + ERREXIT1(cinfo, JERR_BAD_COMPONENT_ID, cc); + + id_found: + + cinfo->cur_comp_info[i] = compptr; + compptr->dc_tbl_no = (c >> 4) & 15; + compptr->ac_tbl_no = (c ) & 15; + + TRACEMS3(cinfo, 1, JTRC_SOS_COMPONENT, cc, + compptr->dc_tbl_no, compptr->ac_tbl_no); + } + + /* Collect the additional scan parameters Ss, Se, Ah/Al. */ + INPUT_BYTE(cinfo, c, return FALSE); + cinfo->Ss = c; + INPUT_BYTE(cinfo, c, return FALSE); + cinfo->Se = c; + INPUT_BYTE(cinfo, c, return FALSE); + cinfo->Ah = (c >> 4) & 15; + cinfo->Al = (c ) & 15; + + TRACEMS4(cinfo, 1, JTRC_SOS_PARAMS, cinfo->Ss, cinfo->Se, + cinfo->Ah, cinfo->Al); + + /* Prepare to scan data & restart markers */ + cinfo->marker->next_restart_num = 0; + + /* Count another (non-pseudo) SOS marker */ + if (n) cinfo->input_scan_number++; + + INPUT_SYNC(cinfo); + return TRUE; +} + + +#ifdef D_ARITH_CODING_SUPPORTED + +LOCAL(boolean) +get_dac (j_decompress_ptr cinfo) +/* Process a DAC marker */ +{ + INT32 length; + int index, val; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + + while (length > 0) { + INPUT_BYTE(cinfo, index, return FALSE); + INPUT_BYTE(cinfo, val, return FALSE); + + length -= 2; + + TRACEMS2(cinfo, 1, JTRC_DAC, index, val); + + if (index < 0 || index >= (2*NUM_ARITH_TBLS)) + ERREXIT1(cinfo, JERR_DAC_INDEX, index); + + if (index >= NUM_ARITH_TBLS) { /* define AC table */ + cinfo->arith_ac_K[index-NUM_ARITH_TBLS] = (UINT8) val; + } else { /* define DC table */ + cinfo->arith_dc_L[index] = (UINT8) (val & 0x0F); + cinfo->arith_dc_U[index] = (UINT8) (val >> 4); + if (cinfo->arith_dc_L[index] > cinfo->arith_dc_U[index]) + ERREXIT1(cinfo, JERR_DAC_VALUE, val); + } + } + + if (length != 0) + ERREXIT(cinfo, JERR_BAD_LENGTH); + + INPUT_SYNC(cinfo); + return TRUE; +} + +#else /* ! D_ARITH_CODING_SUPPORTED */ + +#define get_dac(cinfo) skip_variable(cinfo) + +#endif /* D_ARITH_CODING_SUPPORTED */ + + +LOCAL(boolean) +get_dht (j_decompress_ptr cinfo) +/* Process a DHT marker */ +{ + INT32 length; + UINT8 bits[17]; + UINT8 huffval[256]; + int i, index, count; + JHUFF_TBL **htblptr; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + + while (length > 16) { + INPUT_BYTE(cinfo, index, return FALSE); + + TRACEMS1(cinfo, 1, JTRC_DHT, index); + + bits[0] = 0; + count = 0; + for (i = 1; i <= 16; i++) { + INPUT_BYTE(cinfo, bits[i], return FALSE); + count += bits[i]; + } + + length -= 1 + 16; + + TRACEMS8(cinfo, 2, JTRC_HUFFBITS, + bits[1], bits[2], bits[3], bits[4], + bits[5], bits[6], bits[7], bits[8]); + TRACEMS8(cinfo, 2, JTRC_HUFFBITS, + bits[9], bits[10], bits[11], bits[12], + bits[13], bits[14], bits[15], bits[16]); + + /* Here we just do minimal validation of the counts to avoid walking + * off the end of our table space. jdhuff.c will check more carefully. + */ + if (count > 256 || ((INT32) count) > length) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + + for (i = 0; i < count; i++) + INPUT_BYTE(cinfo, huffval[i], return FALSE); + + length -= count; + + if (index & 0x10) { /* AC table definition */ + index -= 0x10; + htblptr = &cinfo->ac_huff_tbl_ptrs[index]; + } else { /* DC table definition */ + htblptr = &cinfo->dc_huff_tbl_ptrs[index]; + } + + if (index < 0 || index >= NUM_HUFF_TBLS) + ERREXIT1(cinfo, JERR_DHT_INDEX, index); + + if (*htblptr == NULL) + *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + + MEMCOPY((*htblptr)->bits, bits, SIZEOF((*htblptr)->bits)); + MEMCOPY((*htblptr)->huffval, huffval, SIZEOF((*htblptr)->huffval)); + } + + if (length != 0) + ERREXIT(cinfo, JERR_BAD_LENGTH); + + INPUT_SYNC(cinfo); + return TRUE; +} + + +LOCAL(boolean) +get_dqt (j_decompress_ptr cinfo) +/* Process a DQT marker */ +{ + INT32 length, count, i; + int n, prec; + unsigned int tmp; + JQUANT_TBL *quant_ptr; + const int *natural_order; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + + while (length > 0) { + length--; + INPUT_BYTE(cinfo, n, return FALSE); + prec = n >> 4; + n &= 0x0F; + + TRACEMS2(cinfo, 1, JTRC_DQT, n, prec); + + if (n >= NUM_QUANT_TBLS) + ERREXIT1(cinfo, JERR_DQT_INDEX, n); + + if (cinfo->quant_tbl_ptrs[n] == NULL) + cinfo->quant_tbl_ptrs[n] = jpeg_alloc_quant_table((j_common_ptr) cinfo); + quant_ptr = cinfo->quant_tbl_ptrs[n]; + + if (prec) { + if (length < DCTSIZE2 * 2) { + /* Initialize full table for safety. */ + for (i = 0; i < DCTSIZE2; i++) { + quant_ptr->quantval[i] = 1; + } + count = length >> 1; + } else + count = DCTSIZE2; + } else { + if (length < DCTSIZE2) { + /* Initialize full table for safety. */ + for (i = 0; i < DCTSIZE2; i++) { + quant_ptr->quantval[i] = 1; + } + count = length; + } else + count = DCTSIZE2; + } + + switch (count) { + case (2*2): natural_order = jpeg_natural_order2; break; + case (3*3): natural_order = jpeg_natural_order3; break; + case (4*4): natural_order = jpeg_natural_order4; break; + case (5*5): natural_order = jpeg_natural_order5; break; + case (6*6): natural_order = jpeg_natural_order6; break; + case (7*7): natural_order = jpeg_natural_order7; break; + default: natural_order = jpeg_natural_order; break; + } + + for (i = 0; i < count; i++) { + if (prec) + INPUT_2BYTES(cinfo, tmp, return FALSE); + else + INPUT_BYTE(cinfo, tmp, return FALSE); + /* We convert the zigzag-order table to natural array order. */ + quant_ptr->quantval[natural_order[i]] = (UINT16) tmp; + } + + if (cinfo->err->trace_level >= 2) { + for (i = 0; i < DCTSIZE2; i += 8) { + TRACEMS8(cinfo, 2, JTRC_QUANTVALS, + quant_ptr->quantval[i], quant_ptr->quantval[i+1], + quant_ptr->quantval[i+2], quant_ptr->quantval[i+3], + quant_ptr->quantval[i+4], quant_ptr->quantval[i+5], + quant_ptr->quantval[i+6], quant_ptr->quantval[i+7]); + } + } + + length -= count; + if (prec) length -= count; + } + + if (length != 0) + ERREXIT(cinfo, JERR_BAD_LENGTH); + + INPUT_SYNC(cinfo); + return TRUE; +} + + +LOCAL(boolean) +get_dri (j_decompress_ptr cinfo) +/* Process a DRI marker */ +{ + INT32 length; + unsigned int tmp; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + + if (length != 4) + ERREXIT(cinfo, JERR_BAD_LENGTH); + + INPUT_2BYTES(cinfo, tmp, return FALSE); + + TRACEMS1(cinfo, 1, JTRC_DRI, tmp); + + cinfo->restart_interval = tmp; + + INPUT_SYNC(cinfo); + return TRUE; +} + + +/* + * Routines for processing APPn and COM markers. + * These are either saved in memory or discarded, per application request. + * APP0 and APP14 are specially checked to see if they are + * JFIF and Adobe markers, respectively. + */ + +#define APP0_DATA_LEN 14 /* Length of interesting data in APP0 */ +#define APP14_DATA_LEN 12 /* Length of interesting data in APP14 */ +#define APPN_DATA_LEN 14 /* Must be the largest of the above!! */ + + +LOCAL(void) +examine_app0 (j_decompress_ptr cinfo, JOCTET FAR * data, + unsigned int datalen, INT32 remaining) +/* Examine first few bytes from an APP0. + * Take appropriate action if it is a JFIF marker. + * datalen is # of bytes at data[], remaining is length of rest of marker data. + */ +{ + INT32 totallen = (INT32) datalen + remaining; + + if (datalen >= APP0_DATA_LEN && + GETJOCTET(data[0]) == 0x4A && + GETJOCTET(data[1]) == 0x46 && + GETJOCTET(data[2]) == 0x49 && + GETJOCTET(data[3]) == 0x46 && + GETJOCTET(data[4]) == 0) { + /* Found JFIF APP0 marker: save info */ + cinfo->saw_JFIF_marker = TRUE; + cinfo->JFIF_major_version = GETJOCTET(data[5]); + cinfo->JFIF_minor_version = GETJOCTET(data[6]); + cinfo->density_unit = GETJOCTET(data[7]); + cinfo->X_density = (GETJOCTET(data[8]) << 8) + GETJOCTET(data[9]); + cinfo->Y_density = (GETJOCTET(data[10]) << 8) + GETJOCTET(data[11]); + /* Check version. + * Major version must be 1, anything else signals an incompatible change. + * (We used to treat this as an error, but now it's a nonfatal warning, + * because some bozo at Hijaak couldn't read the spec.) + * Minor version should be 0..2, but process anyway if newer. + */ + if (cinfo->JFIF_major_version != 1) + WARNMS2(cinfo, JWRN_JFIF_MAJOR, + cinfo->JFIF_major_version, cinfo->JFIF_minor_version); + /* Generate trace messages */ + TRACEMS5(cinfo, 1, JTRC_JFIF, + cinfo->JFIF_major_version, cinfo->JFIF_minor_version, + cinfo->X_density, cinfo->Y_density, cinfo->density_unit); + /* Validate thumbnail dimensions and issue appropriate messages */ + if (GETJOCTET(data[12]) | GETJOCTET(data[13])) + TRACEMS2(cinfo, 1, JTRC_JFIF_THUMBNAIL, + GETJOCTET(data[12]), GETJOCTET(data[13])); + totallen -= APP0_DATA_LEN; + if (totallen != + ((INT32)GETJOCTET(data[12]) * (INT32)GETJOCTET(data[13]) * (INT32) 3)) + TRACEMS1(cinfo, 1, JTRC_JFIF_BADTHUMBNAILSIZE, (int) totallen); + } else if (datalen >= 6 && + GETJOCTET(data[0]) == 0x4A && + GETJOCTET(data[1]) == 0x46 && + GETJOCTET(data[2]) == 0x58 && + GETJOCTET(data[3]) == 0x58 && + GETJOCTET(data[4]) == 0) { + /* Found JFIF "JFXX" extension APP0 marker */ + /* The library doesn't actually do anything with these, + * but we try to produce a helpful trace message. + */ + switch (GETJOCTET(data[5])) { + case 0x10: + TRACEMS1(cinfo, 1, JTRC_THUMB_JPEG, (int) totallen); + break; + case 0x11: + TRACEMS1(cinfo, 1, JTRC_THUMB_PALETTE, (int) totallen); + break; + case 0x13: + TRACEMS1(cinfo, 1, JTRC_THUMB_RGB, (int) totallen); + break; + default: + TRACEMS2(cinfo, 1, JTRC_JFIF_EXTENSION, + GETJOCTET(data[5]), (int) totallen); + break; + } + } else { + /* Start of APP0 does not match "JFIF" or "JFXX", or too short */ + TRACEMS1(cinfo, 1, JTRC_APP0, (int) totallen); + } +} + + +LOCAL(void) +examine_app14 (j_decompress_ptr cinfo, JOCTET FAR * data, + unsigned int datalen, INT32 remaining) +/* Examine first few bytes from an APP14. + * Take appropriate action if it is an Adobe marker. + * datalen is # of bytes at data[], remaining is length of rest of marker data. + */ +{ + unsigned int version, flags0, flags1, transform; + + if (datalen >= APP14_DATA_LEN && + GETJOCTET(data[0]) == 0x41 && + GETJOCTET(data[1]) == 0x64 && + GETJOCTET(data[2]) == 0x6F && + GETJOCTET(data[3]) == 0x62 && + GETJOCTET(data[4]) == 0x65) { + /* Found Adobe APP14 marker */ + version = (GETJOCTET(data[5]) << 8) + GETJOCTET(data[6]); + flags0 = (GETJOCTET(data[7]) << 8) + GETJOCTET(data[8]); + flags1 = (GETJOCTET(data[9]) << 8) + GETJOCTET(data[10]); + transform = GETJOCTET(data[11]); + TRACEMS4(cinfo, 1, JTRC_ADOBE, version, flags0, flags1, transform); + cinfo->saw_Adobe_marker = TRUE; + cinfo->Adobe_transform = (UINT8) transform; + } else { + /* Start of APP14 does not match "Adobe", or too short */ + TRACEMS1(cinfo, 1, JTRC_APP14, (int) (datalen + remaining)); + } +} + + +METHODDEF(boolean) +get_interesting_appn (j_decompress_ptr cinfo) +/* Process an APP0 or APP14 marker without saving it */ +{ + INT32 length; + JOCTET b[APPN_DATA_LEN]; + unsigned int i, numtoread; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + + /* get the interesting part of the marker data */ + if (length >= APPN_DATA_LEN) + numtoread = APPN_DATA_LEN; + else if (length > 0) + numtoread = (unsigned int) length; + else + numtoread = 0; + for (i = 0; i < numtoread; i++) + INPUT_BYTE(cinfo, b[i], return FALSE); + length -= numtoread; + + /* process it */ + switch (cinfo->unread_marker) { + case M_APP0: + examine_app0(cinfo, (JOCTET FAR *) b, numtoread, length); + break; + case M_APP14: + examine_app14(cinfo, (JOCTET FAR *) b, numtoread, length); + break; + default: + /* can't get here unless jpeg_save_markers chooses wrong processor */ + ERREXIT1(cinfo, JERR_UNKNOWN_MARKER, cinfo->unread_marker); + break; + } + + /* skip any remaining data -- could be lots */ + INPUT_SYNC(cinfo); + if (length > 0) + (*cinfo->src->skip_input_data) (cinfo, (long) length); + + return TRUE; +} + + +#ifdef SAVE_MARKERS_SUPPORTED + +METHODDEF(boolean) +save_marker (j_decompress_ptr cinfo) +/* Save an APPn or COM marker into the marker list */ +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + jpeg_saved_marker_ptr cur_marker = marker->cur_marker; + unsigned int bytes_read, data_length; + JOCTET FAR * data; + INT32 length = 0; + INPUT_VARS(cinfo); + + if (cur_marker == NULL) { + /* begin reading a marker */ + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + if (length >= 0) { /* watch out for bogus length word */ + /* figure out how much we want to save */ + unsigned int limit; + if (cinfo->unread_marker == (int) M_COM) + limit = marker->length_limit_COM; + else + limit = marker->length_limit_APPn[cinfo->unread_marker - (int) M_APP0]; + if ((unsigned int) length < limit) + limit = (unsigned int) length; + /* allocate and initialize the marker item */ + cur_marker = (jpeg_saved_marker_ptr) + (*cinfo->mem->alloc_large) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(struct jpeg_marker_struct) + limit); + cur_marker->next = NULL; + cur_marker->marker = (UINT8) cinfo->unread_marker; + cur_marker->original_length = (unsigned int) length; + cur_marker->data_length = limit; + /* data area is just beyond the jpeg_marker_struct */ + data = cur_marker->data = (JOCTET FAR *) (cur_marker + 1); + marker->cur_marker = cur_marker; + marker->bytes_read = 0; + bytes_read = 0; + data_length = limit; + } else { + /* deal with bogus length word */ + bytes_read = data_length = 0; + data = NULL; + } + } else { + /* resume reading a marker */ + bytes_read = marker->bytes_read; + data_length = cur_marker->data_length; + data = cur_marker->data + bytes_read; + } + + while (bytes_read < data_length) { + INPUT_SYNC(cinfo); /* move the restart point to here */ + marker->bytes_read = bytes_read; + /* If there's not at least one byte in buffer, suspend */ + MAKE_BYTE_AVAIL(cinfo, return FALSE); + /* Copy bytes with reasonable rapidity */ + while (bytes_read < data_length && bytes_in_buffer > 0) { + *data++ = *next_input_byte++; + bytes_in_buffer--; + bytes_read++; + } + } + + /* Done reading what we want to read */ + if (cur_marker != NULL) { /* will be NULL if bogus length word */ + /* Add new marker to end of list */ + if (cinfo->marker_list == NULL) { + cinfo->marker_list = cur_marker; + } else { + jpeg_saved_marker_ptr prev = cinfo->marker_list; + while (prev->next != NULL) + prev = prev->next; + prev->next = cur_marker; + } + /* Reset pointer & calc remaining data length */ + data = cur_marker->data; + length = cur_marker->original_length - data_length; + } + /* Reset to initial state for next marker */ + marker->cur_marker = NULL; + + /* Process the marker if interesting; else just make a generic trace msg */ + switch (cinfo->unread_marker) { + case M_APP0: + examine_app0(cinfo, data, data_length, length); + break; + case M_APP14: + examine_app14(cinfo, data, data_length, length); + break; + default: + TRACEMS2(cinfo, 1, JTRC_MISC_MARKER, cinfo->unread_marker, + (int) (data_length + length)); + break; + } + + /* skip any remaining data -- could be lots */ + INPUT_SYNC(cinfo); /* do before skip_input_data */ + if (length > 0) + (*cinfo->src->skip_input_data) (cinfo, (long) length); + + return TRUE; +} + +#endif /* SAVE_MARKERS_SUPPORTED */ + + +METHODDEF(boolean) +skip_variable (j_decompress_ptr cinfo) +/* Skip over an unknown or uninteresting variable-length marker */ +{ + INT32 length; + INPUT_VARS(cinfo); + + INPUT_2BYTES(cinfo, length, return FALSE); + length -= 2; + + TRACEMS2(cinfo, 1, JTRC_MISC_MARKER, cinfo->unread_marker, (int) length); + + INPUT_SYNC(cinfo); /* do before skip_input_data */ + if (length > 0) + (*cinfo->src->skip_input_data) (cinfo, (long) length); + + return TRUE; +} + + +/* + * Find the next JPEG marker, save it in cinfo->unread_marker. + * Returns FALSE if had to suspend before reaching a marker; + * in that case cinfo->unread_marker is unchanged. + * + * Note that the result might not be a valid marker code, + * but it will never be 0 or FF. + */ + +LOCAL(boolean) +next_marker (j_decompress_ptr cinfo) +{ + int c; + INPUT_VARS(cinfo); + + for (;;) { + INPUT_BYTE(cinfo, c, return FALSE); + /* Skip any non-FF bytes. + * This may look a bit inefficient, but it will not occur in a valid file. + * We sync after each discarded byte so that a suspending data source + * can discard the byte from its buffer. + */ + while (c != 0xFF) { + cinfo->marker->discarded_bytes++; + INPUT_SYNC(cinfo); + INPUT_BYTE(cinfo, c, return FALSE); + } + /* This loop swallows any duplicate FF bytes. Extra FFs are legal as + * pad bytes, so don't count them in discarded_bytes. We assume there + * will not be so many consecutive FF bytes as to overflow a suspending + * data source's input buffer. + */ + do { + INPUT_BYTE(cinfo, c, return FALSE); + } while (c == 0xFF); + if (c != 0) + break; /* found a valid marker, exit loop */ + /* Reach here if we found a stuffed-zero data sequence (FF/00). + * Discard it and loop back to try again. + */ + cinfo->marker->discarded_bytes += 2; + INPUT_SYNC(cinfo); + } + + if (cinfo->marker->discarded_bytes != 0) { + WARNMS2(cinfo, JWRN_EXTRANEOUS_DATA, cinfo->marker->discarded_bytes, c); + cinfo->marker->discarded_bytes = 0; + } + + cinfo->unread_marker = c; + + INPUT_SYNC(cinfo); + return TRUE; +} + + +LOCAL(boolean) +first_marker (j_decompress_ptr cinfo) +/* Like next_marker, but used to obtain the initial SOI marker. */ +/* For this marker, we do not allow preceding garbage or fill; otherwise, + * we might well scan an entire input file before realizing it ain't JPEG. + * If an application wants to process non-JFIF files, it must seek to the + * SOI before calling the JPEG library. + */ +{ + int c, c2; + INPUT_VARS(cinfo); + + INPUT_BYTE(cinfo, c, return FALSE); + INPUT_BYTE(cinfo, c2, return FALSE); + if (c != 0xFF || c2 != (int) M_SOI) + ERREXIT2(cinfo, JERR_NO_SOI, c, c2); + + cinfo->unread_marker = c2; + + INPUT_SYNC(cinfo); + return TRUE; +} + + +/* + * Read markers until SOS or EOI. + * + * Returns same codes as are defined for jpeg_consume_input: + * JPEG_SUSPENDED, JPEG_REACHED_SOS, or JPEG_REACHED_EOI. + * + * Note: This function may return a pseudo SOS marker (with zero + * component number) for treat by input controller's consume_input. + * consume_input itself should filter out (skip) the pseudo marker + * after processing for the caller. + */ + +METHODDEF(int) +read_markers (j_decompress_ptr cinfo) +{ + /* Outer loop repeats once for each marker. */ + for (;;) { + /* Collect the marker proper, unless we already did. */ + /* NB: first_marker() enforces the requirement that SOI appear first. */ + if (cinfo->unread_marker == 0) { + if (! cinfo->marker->saw_SOI) { + if (! first_marker(cinfo)) + return JPEG_SUSPENDED; + } else { + if (! next_marker(cinfo)) + return JPEG_SUSPENDED; + } + } + /* At this point cinfo->unread_marker contains the marker code and the + * input point is just past the marker proper, but before any parameters. + * A suspension will cause us to return with this state still true. + */ + switch (cinfo->unread_marker) { + case M_SOI: + if (! get_soi(cinfo)) + return JPEG_SUSPENDED; + break; + + case M_SOF0: /* Baseline */ + if (! get_sof(cinfo, TRUE, FALSE, FALSE)) + return JPEG_SUSPENDED; + break; + + case M_SOF1: /* Extended sequential, Huffman */ + if (! get_sof(cinfo, FALSE, FALSE, FALSE)) + return JPEG_SUSPENDED; + break; + + case M_SOF2: /* Progressive, Huffman */ + if (! get_sof(cinfo, FALSE, TRUE, FALSE)) + return JPEG_SUSPENDED; + break; + + case M_SOF9: /* Extended sequential, arithmetic */ + if (! get_sof(cinfo, FALSE, FALSE, TRUE)) + return JPEG_SUSPENDED; + break; + + case M_SOF10: /* Progressive, arithmetic */ + if (! get_sof(cinfo, FALSE, TRUE, TRUE)) + return JPEG_SUSPENDED; + break; + + /* Currently unsupported SOFn types */ + case M_SOF3: /* Lossless, Huffman */ + case M_SOF5: /* Differential sequential, Huffman */ + case M_SOF6: /* Differential progressive, Huffman */ + case M_SOF7: /* Differential lossless, Huffman */ + case M_JPG: /* Reserved for JPEG extensions */ + case M_SOF11: /* Lossless, arithmetic */ + case M_SOF13: /* Differential sequential, arithmetic */ + case M_SOF14: /* Differential progressive, arithmetic */ + case M_SOF15: /* Differential lossless, arithmetic */ + ERREXIT1(cinfo, JERR_SOF_UNSUPPORTED, cinfo->unread_marker); + break; + + case M_SOS: + if (! get_sos(cinfo)) + return JPEG_SUSPENDED; + cinfo->unread_marker = 0; /* processed the marker */ + return JPEG_REACHED_SOS; + + case M_EOI: + TRACEMS(cinfo, 1, JTRC_EOI); + cinfo->unread_marker = 0; /* processed the marker */ + return JPEG_REACHED_EOI; + + case M_DAC: + if (! get_dac(cinfo)) + return JPEG_SUSPENDED; + break; + + case M_DHT: + if (! get_dht(cinfo)) + return JPEG_SUSPENDED; + break; + + case M_DQT: + if (! get_dqt(cinfo)) + return JPEG_SUSPENDED; + break; + + case M_DRI: + if (! get_dri(cinfo)) + return JPEG_SUSPENDED; + break; + + case M_APP0: + case M_APP1: + case M_APP2: + case M_APP3: + case M_APP4: + case M_APP5: + case M_APP6: + case M_APP7: + case M_APP8: + case M_APP9: + case M_APP10: + case M_APP11: + case M_APP12: + case M_APP13: + case M_APP14: + case M_APP15: + if (! (*((my_marker_ptr) cinfo->marker)->process_APPn[ + cinfo->unread_marker - (int) M_APP0]) (cinfo)) + return JPEG_SUSPENDED; + break; + + case M_COM: + if (! (*((my_marker_ptr) cinfo->marker)->process_COM) (cinfo)) + return JPEG_SUSPENDED; + break; + + case M_RST0: /* these are all parameterless */ + case M_RST1: + case M_RST2: + case M_RST3: + case M_RST4: + case M_RST5: + case M_RST6: + case M_RST7: + case M_TEM: + TRACEMS1(cinfo, 1, JTRC_PARMLESS_MARKER, cinfo->unread_marker); + break; + + case M_DNL: /* Ignore DNL ... perhaps the wrong thing */ + if (! skip_variable(cinfo)) + return JPEG_SUSPENDED; + break; + + default: /* must be DHP, EXP, JPGn, or RESn */ + /* For now, we treat the reserved markers as fatal errors since they are + * likely to be used to signal incompatible JPEG Part 3 extensions. + * Once the JPEG 3 version-number marker is well defined, this code + * ought to change! + */ + ERREXIT1(cinfo, JERR_UNKNOWN_MARKER, cinfo->unread_marker); + break; + } + /* Successfully processed marker, so reset state variable */ + cinfo->unread_marker = 0; + } /* end loop */ +} + + +/* + * Read a restart marker, which is expected to appear next in the datastream; + * if the marker is not there, take appropriate recovery action. + * Returns FALSE if suspension is required. + * + * This is called by the entropy decoder after it has read an appropriate + * number of MCUs. cinfo->unread_marker may be nonzero if the entropy decoder + * has already read a marker from the data source. Under normal conditions + * cinfo->unread_marker will be reset to 0 before returning; if not reset, + * it holds a marker which the decoder will be unable to read past. + */ + +METHODDEF(boolean) +read_restart_marker (j_decompress_ptr cinfo) +{ + /* Obtain a marker unless we already did. */ + /* Note that next_marker will complain if it skips any data. */ + if (cinfo->unread_marker == 0) { + if (! next_marker(cinfo)) + return FALSE; + } + + if (cinfo->unread_marker == + ((int) M_RST0 + cinfo->marker->next_restart_num)) { + /* Normal case --- swallow the marker and let entropy decoder continue */ + TRACEMS1(cinfo, 3, JTRC_RST, cinfo->marker->next_restart_num); + cinfo->unread_marker = 0; + } else { + /* Uh-oh, the restart markers have been messed up. */ + /* Let the data source manager determine how to resync. */ + if (! (*cinfo->src->resync_to_restart) (cinfo, + cinfo->marker->next_restart_num)) + return FALSE; + } + + /* Update next-restart state */ + cinfo->marker->next_restart_num = (cinfo->marker->next_restart_num + 1) & 7; + + return TRUE; +} + + +/* + * This is the default resync_to_restart method for data source managers + * to use if they don't have any better approach. Some data source managers + * may be able to back up, or may have additional knowledge about the data + * which permits a more intelligent recovery strategy; such managers would + * presumably supply their own resync method. + * + * read_restart_marker calls resync_to_restart if it finds a marker other than + * the restart marker it was expecting. (This code is *not* used unless + * a nonzero restart interval has been declared.) cinfo->unread_marker is + * the marker code actually found (might be anything, except 0 or FF). + * The desired restart marker number (0..7) is passed as a parameter. + * This routine is supposed to apply whatever error recovery strategy seems + * appropriate in order to position the input stream to the next data segment. + * Note that cinfo->unread_marker is treated as a marker appearing before + * the current data-source input point; usually it should be reset to zero + * before returning. + * Returns FALSE if suspension is required. + * + * This implementation is substantially constrained by wanting to treat the + * input as a data stream; this means we can't back up. Therefore, we have + * only the following actions to work with: + * 1. Simply discard the marker and let the entropy decoder resume at next + * byte of file. + * 2. Read forward until we find another marker, discarding intervening + * data. (In theory we could look ahead within the current bufferload, + * without having to discard data if we don't find the desired marker. + * This idea is not implemented here, in part because it makes behavior + * dependent on buffer size and chance buffer-boundary positions.) + * 3. Leave the marker unread (by failing to zero cinfo->unread_marker). + * This will cause the entropy decoder to process an empty data segment, + * inserting dummy zeroes, and then we will reprocess the marker. + * + * #2 is appropriate if we think the desired marker lies ahead, while #3 is + * appropriate if the found marker is a future restart marker (indicating + * that we have missed the desired restart marker, probably because it got + * corrupted). + * We apply #2 or #3 if the found marker is a restart marker no more than + * two counts behind or ahead of the expected one. We also apply #2 if the + * found marker is not a legal JPEG marker code (it's certainly bogus data). + * If the found marker is a restart marker more than 2 counts away, we do #1 + * (too much risk that the marker is erroneous; with luck we will be able to + * resync at some future point). + * For any valid non-restart JPEG marker, we apply #3. This keeps us from + * overrunning the end of a scan. An implementation limited to single-scan + * files might find it better to apply #2 for markers other than EOI, since + * any other marker would have to be bogus data in that case. + */ + +GLOBAL(boolean) +jpeg_resync_to_restart (j_decompress_ptr cinfo, int desired) +{ + int marker = cinfo->unread_marker; + int action = 1; + + /* Always put up a warning. */ + WARNMS2(cinfo, JWRN_MUST_RESYNC, marker, desired); + + /* Outer loop handles repeated decision after scanning forward. */ + for (;;) { + if (marker < (int) M_SOF0) + action = 2; /* invalid marker */ + else if (marker < (int) M_RST0 || marker > (int) M_RST7) + action = 3; /* valid non-restart marker */ + else { + if (marker == ((int) M_RST0 + ((desired+1) & 7)) || + marker == ((int) M_RST0 + ((desired+2) & 7))) + action = 3; /* one of the next two expected restarts */ + else if (marker == ((int) M_RST0 + ((desired-1) & 7)) || + marker == ((int) M_RST0 + ((desired-2) & 7))) + action = 2; /* a prior restart, so advance */ + else + action = 1; /* desired restart or too far away */ + } + TRACEMS2(cinfo, 4, JTRC_RECOVERY_ACTION, marker, action); + switch (action) { + case 1: + /* Discard marker and let entropy decoder resume processing. */ + cinfo->unread_marker = 0; + return TRUE; + case 2: + /* Scan to the next marker, and repeat the decision loop. */ + if (! next_marker(cinfo)) + return FALSE; + marker = cinfo->unread_marker; + break; + case 3: + /* Return without advancing past this marker. */ + /* Entropy decoder will be forced to process an empty segment. */ + return TRUE; + } + } /* end loop */ +} + + +/* + * Reset marker processing state to begin a fresh datastream. + */ + +METHODDEF(void) +reset_marker_reader (j_decompress_ptr cinfo) +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + + cinfo->comp_info = NULL; /* until allocated by get_sof */ + cinfo->input_scan_number = 0; /* no SOS seen yet */ + cinfo->unread_marker = 0; /* no pending marker */ + marker->pub.saw_SOI = FALSE; /* set internal state too */ + marker->pub.saw_SOF = FALSE; + marker->pub.discarded_bytes = 0; + marker->cur_marker = NULL; +} + + +/* + * Initialize the marker reader module. + * This is called only once, when the decompression object is created. + */ + +GLOBAL(void) +jinit_marker_reader (j_decompress_ptr cinfo) +{ + my_marker_ptr marker; + int i; + + /* Create subobject in permanent pool */ + marker = (my_marker_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + SIZEOF(my_marker_reader)); + cinfo->marker = (struct jpeg_marker_reader *) marker; + /* Initialize public method pointers */ + marker->pub.reset_marker_reader = reset_marker_reader; + marker->pub.read_markers = read_markers; + marker->pub.read_restart_marker = read_restart_marker; + /* Initialize COM/APPn processing. + * By default, we examine and then discard APP0 and APP14, + * but simply discard COM and all other APPn. + */ + marker->process_COM = skip_variable; + marker->length_limit_COM = 0; + for (i = 0; i < 16; i++) { + marker->process_APPn[i] = skip_variable; + marker->length_limit_APPn[i] = 0; + } + marker->process_APPn[0] = get_interesting_appn; + marker->process_APPn[14] = get_interesting_appn; + /* Reset marker processing state */ + reset_marker_reader(cinfo); +} + + +/* + * Control saving of COM and APPn markers into marker_list. + */ + +#ifdef SAVE_MARKERS_SUPPORTED + +GLOBAL(void) +jpeg_save_markers (j_decompress_ptr cinfo, int marker_code, + unsigned int length_limit) +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + long maxlength; + jpeg_marker_parser_method processor; + + /* Length limit mustn't be larger than what we can allocate + * (should only be a concern in a 16-bit environment). + */ + maxlength = cinfo->mem->max_alloc_chunk - SIZEOF(struct jpeg_marker_struct); + if (((long) length_limit) > maxlength) + length_limit = (unsigned int) maxlength; + + /* Choose processor routine to use. + * APP0/APP14 have special requirements. + */ + if (length_limit) { + processor = save_marker; + /* If saving APP0/APP14, save at least enough for our internal use. */ + if (marker_code == (int) M_APP0 && length_limit < APP0_DATA_LEN) + length_limit = APP0_DATA_LEN; + else if (marker_code == (int) M_APP14 && length_limit < APP14_DATA_LEN) + length_limit = APP14_DATA_LEN; + } else { + processor = skip_variable; + /* If discarding APP0/APP14, use our regular on-the-fly processor. */ + if (marker_code == (int) M_APP0 || marker_code == (int) M_APP14) + processor = get_interesting_appn; + } + + if (marker_code == (int) M_COM) { + marker->process_COM = processor; + marker->length_limit_COM = length_limit; + } else if (marker_code >= (int) M_APP0 && marker_code <= (int) M_APP15) { + marker->process_APPn[marker_code - (int) M_APP0] = processor; + marker->length_limit_APPn[marker_code - (int) M_APP0] = length_limit; + } else + ERREXIT1(cinfo, JERR_UNKNOWN_MARKER, marker_code); +} + +#endif /* SAVE_MARKERS_SUPPORTED */ + + +/* + * Install a special processing method for COM or APPn markers. + */ + +GLOBAL(void) +jpeg_set_marker_processor (j_decompress_ptr cinfo, int marker_code, + jpeg_marker_parser_method routine) +{ + my_marker_ptr marker = (my_marker_ptr) cinfo->marker; + + if (marker_code == (int) M_COM) + marker->process_COM = routine; + else if (marker_code >= (int) M_APP0 && marker_code <= (int) M_APP15) + marker->process_APPn[marker_code - (int) M_APP0] = routine; + else + ERREXIT1(cinfo, JERR_UNKNOWN_MARKER, marker_code); +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmaster.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmaster.c new file mode 100644 index 000000000..fef72a21b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmaster.c @@ -0,0 +1,531 @@ +/* + * jdmaster.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2002-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains master control logic for the JPEG decompressor. + * These routines are concerned with selecting the modules to be executed + * and with determining the number of passes and the work to be done in each + * pass. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private state */ + +typedef struct { + struct jpeg_decomp_master pub; /* public fields */ + + int pass_number; /* # of passes completed */ + + boolean using_merged_upsample; /* TRUE if using merged upsample/cconvert */ + + /* Saved references to initialized quantizer modules, + * in case we need to switch modes. + */ + struct jpeg_color_quantizer * quantizer_1pass; + struct jpeg_color_quantizer * quantizer_2pass; +} my_decomp_master; + +typedef my_decomp_master * my_master_ptr; + + +/* + * Determine whether merged upsample/color conversion should be used. + * CRUCIAL: this must match the actual capabilities of jdmerge.c! + */ + +LOCAL(boolean) +use_merged_upsample (j_decompress_ptr cinfo) +{ +#ifdef UPSAMPLE_MERGING_SUPPORTED + /* Merging is the equivalent of plain box-filter upsampling */ + if (cinfo->do_fancy_upsampling || cinfo->CCIR601_sampling) + return FALSE; + /* jdmerge.c only supports YCC=>RGB color conversion */ + if (cinfo->jpeg_color_space != JCS_YCbCr || cinfo->num_components != 3 || + cinfo->out_color_space != JCS_RGB || + cinfo->out_color_components != RGB_PIXELSIZE) + return FALSE; + /* and it only handles 2h1v or 2h2v sampling ratios */ + if (cinfo->comp_info[0].h_samp_factor != 2 || + cinfo->comp_info[1].h_samp_factor != 1 || + cinfo->comp_info[2].h_samp_factor != 1 || + cinfo->comp_info[0].v_samp_factor > 2 || + cinfo->comp_info[1].v_samp_factor != 1 || + cinfo->comp_info[2].v_samp_factor != 1) + return FALSE; + /* furthermore, it doesn't work if we've scaled the IDCTs differently */ + if (cinfo->comp_info[0].DCT_h_scaled_size != cinfo->min_DCT_h_scaled_size || + cinfo->comp_info[1].DCT_h_scaled_size != cinfo->min_DCT_h_scaled_size || + cinfo->comp_info[2].DCT_h_scaled_size != cinfo->min_DCT_h_scaled_size || + cinfo->comp_info[0].DCT_v_scaled_size != cinfo->min_DCT_v_scaled_size || + cinfo->comp_info[1].DCT_v_scaled_size != cinfo->min_DCT_v_scaled_size || + cinfo->comp_info[2].DCT_v_scaled_size != cinfo->min_DCT_v_scaled_size) + return FALSE; + /* ??? also need to test for upsample-time rescaling, when & if supported */ + return TRUE; /* by golly, it'll work... */ +#else + return FALSE; +#endif +} + + +/* + * Compute output image dimensions and related values. + * NOTE: this is exported for possible use by application. + * Hence it mustn't do anything that can't be done twice. + * Also note that it may be called before the master module is initialized! + */ + +GLOBAL(void) +jpeg_calc_output_dimensions (j_decompress_ptr cinfo) +/* Do computations that are needed before master selection phase. + * This function is used for full decompression. + */ +{ +#ifdef IDCT_SCALING_SUPPORTED + int ci; + jpeg_component_info *compptr; +#endif + + /* Prevent application from calling me at wrong times */ + if (cinfo->global_state != DSTATE_READY) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + /* Compute core output image dimensions and DCT scaling choices. */ + jpeg_core_output_dimensions(cinfo); + +#ifdef IDCT_SCALING_SUPPORTED + + /* In selecting the actual DCT scaling for each component, we try to + * scale up the chroma components via IDCT scaling rather than upsampling. + * This saves time if the upsampler gets to use 1:1 scaling. + * Note this code adapts subsampling ratios which are powers of 2. + */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + int ssize = 1; + while (cinfo->min_DCT_h_scaled_size * ssize <= + (cinfo->do_fancy_upsampling ? DCTSIZE : DCTSIZE / 2) && + (cinfo->max_h_samp_factor % (compptr->h_samp_factor * ssize * 2)) == 0) { + ssize = ssize * 2; + } + compptr->DCT_h_scaled_size = cinfo->min_DCT_h_scaled_size * ssize; + ssize = 1; + while (cinfo->min_DCT_v_scaled_size * ssize <= + (cinfo->do_fancy_upsampling ? DCTSIZE : DCTSIZE / 2) && + (cinfo->max_v_samp_factor % (compptr->v_samp_factor * ssize * 2)) == 0) { + ssize = ssize * 2; + } + compptr->DCT_v_scaled_size = cinfo->min_DCT_v_scaled_size * ssize; + + /* We don't support IDCT ratios larger than 2. */ + if (compptr->DCT_h_scaled_size > compptr->DCT_v_scaled_size * 2) + compptr->DCT_h_scaled_size = compptr->DCT_v_scaled_size * 2; + else if (compptr->DCT_v_scaled_size > compptr->DCT_h_scaled_size * 2) + compptr->DCT_v_scaled_size = compptr->DCT_h_scaled_size * 2; + } + + /* Recompute downsampled dimensions of components; + * application needs to know these if using raw downsampled data. + */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Size in samples, after IDCT scaling */ + compptr->downsampled_width = (JDIMENSION) + jdiv_round_up((long) cinfo->image_width * + (long) (compptr->h_samp_factor * compptr->DCT_h_scaled_size), + (long) (cinfo->max_h_samp_factor * cinfo->block_size)); + compptr->downsampled_height = (JDIMENSION) + jdiv_round_up((long) cinfo->image_height * + (long) (compptr->v_samp_factor * compptr->DCT_v_scaled_size), + (long) (cinfo->max_v_samp_factor * cinfo->block_size)); + } + +#endif /* IDCT_SCALING_SUPPORTED */ + + /* Report number of components in selected colorspace. */ + /* Probably this should be in the color conversion module... */ + switch (cinfo->out_color_space) { + case JCS_GRAYSCALE: + cinfo->out_color_components = 1; + break; + case JCS_RGB: + cinfo->out_color_components = RGB_PIXELSIZE; + break; + case JCS_YCbCr: + cinfo->out_color_components = 3; + break; + case JCS_CMYK: + case JCS_YCCK: + cinfo->out_color_components = 4; + break; + default: /* else must be same colorspace as in file */ + cinfo->out_color_components = cinfo->num_components; + break; + } + cinfo->output_components = (cinfo->quantize_colors ? 1 : + cinfo->out_color_components); + + /* See if upsampler will want to emit more than one row at a time */ + if (use_merged_upsample(cinfo)) + cinfo->rec_outbuf_height = cinfo->max_v_samp_factor; + else + cinfo->rec_outbuf_height = 1; +} + + +/* + * Several decompression processes need to range-limit values to the range + * 0..MAXJSAMPLE; the input value may fall somewhat outside this range + * due to noise introduced by quantization, roundoff error, etc. These + * processes are inner loops and need to be as fast as possible. On most + * machines, particularly CPUs with pipelines or instruction prefetch, + * a (subscript-check-less) C table lookup + * x = sample_range_limit[x]; + * is faster than explicit tests + * if (x < 0) x = 0; + * else if (x > MAXJSAMPLE) x = MAXJSAMPLE; + * These processes all use a common table prepared by the routine below. + * + * For most steps we can mathematically guarantee that the initial value + * of x is within MAXJSAMPLE+1 of the legal range, so a table running from + * -(MAXJSAMPLE+1) to 2*MAXJSAMPLE+1 is sufficient. But for the initial + * limiting step (just after the IDCT), a wildly out-of-range value is + * possible if the input data is corrupt. To avoid any chance of indexing + * off the end of memory and getting a bad-pointer trap, we perform the + * post-IDCT limiting thus: + * x = range_limit[x & MASK]; + * where MASK is 2 bits wider than legal sample data, ie 10 bits for 8-bit + * samples. Under normal circumstances this is more than enough range and + * a correct output will be generated; with bogus input data the mask will + * cause wraparound, and we will safely generate a bogus-but-in-range output. + * For the post-IDCT step, we want to convert the data from signed to unsigned + * representation by adding CENTERJSAMPLE at the same time that we limit it. + * So the post-IDCT limiting table ends up looking like this: + * CENTERJSAMPLE,CENTERJSAMPLE+1,...,MAXJSAMPLE, + * MAXJSAMPLE (repeat 2*(MAXJSAMPLE+1)-CENTERJSAMPLE times), + * 0 (repeat 2*(MAXJSAMPLE+1)-CENTERJSAMPLE times), + * 0,1,...,CENTERJSAMPLE-1 + * Negative inputs select values from the upper half of the table after + * masking. + * + * We can save some space by overlapping the start of the post-IDCT table + * with the simpler range limiting table. The post-IDCT table begins at + * sample_range_limit + CENTERJSAMPLE. + * + * Note that the table is allocated in near data space on PCs; it's small + * enough and used often enough to justify this. + */ + +LOCAL(void) +prepare_range_limit_table (j_decompress_ptr cinfo) +/* Allocate and fill in the sample_range_limit table */ +{ + JSAMPLE * table; + int i; + + table = (JSAMPLE *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (5 * (MAXJSAMPLE+1) + CENTERJSAMPLE) * SIZEOF(JSAMPLE)); + table += (MAXJSAMPLE+1); /* allow negative subscripts of simple table */ + cinfo->sample_range_limit = table; + /* First segment of "simple" table: limit[x] = 0 for x < 0 */ + MEMZERO(table - (MAXJSAMPLE+1), (MAXJSAMPLE+1) * SIZEOF(JSAMPLE)); + /* Main part of "simple" table: limit[x] = x */ + for (i = 0; i <= MAXJSAMPLE; i++) + table[i] = (JSAMPLE) i; + table += CENTERJSAMPLE; /* Point to where post-IDCT table starts */ + /* End of simple table, rest of first half of post-IDCT table */ + for (i = CENTERJSAMPLE; i < 2*(MAXJSAMPLE+1); i++) + table[i] = MAXJSAMPLE; + /* Second half of post-IDCT table */ + MEMZERO(table + (2 * (MAXJSAMPLE+1)), + (2 * (MAXJSAMPLE+1) - CENTERJSAMPLE) * SIZEOF(JSAMPLE)); + MEMCOPY(table + (4 * (MAXJSAMPLE+1) - CENTERJSAMPLE), + cinfo->sample_range_limit, CENTERJSAMPLE * SIZEOF(JSAMPLE)); +} + + +/* + * Master selection of decompression modules. + * This is done once at jpeg_start_decompress time. We determine + * which modules will be used and give them appropriate initialization calls. + * We also initialize the decompressor input side to begin consuming data. + * + * Since jpeg_read_header has finished, we know what is in the SOF + * and (first) SOS markers. We also have all the application parameter + * settings. + */ + +LOCAL(void) +master_selection (j_decompress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + boolean use_c_buffer; + long samplesperrow; + JDIMENSION jd_samplesperrow; + + /* Initialize dimensions and other stuff */ + jpeg_calc_output_dimensions(cinfo); + prepare_range_limit_table(cinfo); + + /* Width of an output scanline must be representable as JDIMENSION. */ + samplesperrow = (long) cinfo->output_width * (long) cinfo->out_color_components; + jd_samplesperrow = (JDIMENSION) samplesperrow; + if ((long) jd_samplesperrow != samplesperrow) + ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); + + /* Initialize my private state */ + master->pass_number = 0; + master->using_merged_upsample = use_merged_upsample(cinfo); + + /* Color quantizer selection */ + master->quantizer_1pass = NULL; + master->quantizer_2pass = NULL; + /* No mode changes if not using buffered-image mode. */ + if (! cinfo->quantize_colors || ! cinfo->buffered_image) { + cinfo->enable_1pass_quant = FALSE; + cinfo->enable_external_quant = FALSE; + cinfo->enable_2pass_quant = FALSE; + } + if (cinfo->quantize_colors) { + if (cinfo->raw_data_out) + ERREXIT(cinfo, JERR_NOTIMPL); + /* 2-pass quantizer only works in 3-component color space. */ + if (cinfo->out_color_components != 3) { + cinfo->enable_1pass_quant = TRUE; + cinfo->enable_external_quant = FALSE; + cinfo->enable_2pass_quant = FALSE; + cinfo->colormap = NULL; + } else if (cinfo->colormap != NULL) { + cinfo->enable_external_quant = TRUE; + } else if (cinfo->two_pass_quantize) { + cinfo->enable_2pass_quant = TRUE; + } else { + cinfo->enable_1pass_quant = TRUE; + } + + if (cinfo->enable_1pass_quant) { +#ifdef QUANT_1PASS_SUPPORTED + jinit_1pass_quantizer(cinfo); + master->quantizer_1pass = cinfo->cquantize; +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } + + /* We use the 2-pass code to map to external colormaps. */ + if (cinfo->enable_2pass_quant || cinfo->enable_external_quant) { +#ifdef QUANT_2PASS_SUPPORTED + jinit_2pass_quantizer(cinfo); + master->quantizer_2pass = cinfo->cquantize; +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } + /* If both quantizers are initialized, the 2-pass one is left active; + * this is necessary for starting with quantization to an external map. + */ + } + + /* Post-processing: in particular, color conversion first */ + if (! cinfo->raw_data_out) { + if (master->using_merged_upsample) { +#ifdef UPSAMPLE_MERGING_SUPPORTED + jinit_merged_upsampler(cinfo); /* does color conversion too */ +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif + } else { + jinit_color_deconverter(cinfo); + jinit_upsampler(cinfo); + } + jinit_d_post_controller(cinfo, cinfo->enable_2pass_quant); + } + /* Inverse DCT */ + jinit_inverse_dct(cinfo); + /* Entropy decoding: either Huffman or arithmetic coding. */ + if (cinfo->arith_code) + jinit_arith_decoder(cinfo); + else { + jinit_huff_decoder(cinfo); + } + + /* Initialize principal buffer controllers. */ + use_c_buffer = cinfo->inputctl->has_multiple_scans || cinfo->buffered_image; + jinit_d_coef_controller(cinfo, use_c_buffer); + + if (! cinfo->raw_data_out) + jinit_d_main_controller(cinfo, FALSE /* never need full buffer here */); + + /* We can now tell the memory manager to allocate virtual arrays. */ + (*cinfo->mem->realize_virt_arrays) ((j_common_ptr) cinfo); + + /* Initialize input side of decompressor to consume first scan. */ + (*cinfo->inputctl->start_input_pass) (cinfo); + +#ifdef D_MULTISCAN_FILES_SUPPORTED + /* If jpeg_start_decompress will read the whole file, initialize + * progress monitoring appropriately. The input step is counted + * as one pass. + */ + if (cinfo->progress != NULL && ! cinfo->buffered_image && + cinfo->inputctl->has_multiple_scans) { + int nscans; + /* Estimate number of scans to set pass_limit. */ + if (cinfo->progressive_mode) { + /* Arbitrarily estimate 2 interleaved DC scans + 3 AC scans/component. */ + nscans = 2 + 3 * cinfo->num_components; + } else { + /* For a nonprogressive multiscan file, estimate 1 scan per component. */ + nscans = cinfo->num_components; + } + cinfo->progress->pass_counter = 0L; + cinfo->progress->pass_limit = (long) cinfo->total_iMCU_rows * nscans; + cinfo->progress->completed_passes = 0; + cinfo->progress->total_passes = (cinfo->enable_2pass_quant ? 3 : 2); + /* Count the input pass as done */ + master->pass_number++; + } +#endif /* D_MULTISCAN_FILES_SUPPORTED */ +} + + +/* + * Per-pass setup. + * This is called at the beginning of each output pass. We determine which + * modules will be active during this pass and give them appropriate + * start_pass calls. We also set is_dummy_pass to indicate whether this + * is a "real" output pass or a dummy pass for color quantization. + * (In the latter case, jdapistd.c will crank the pass to completion.) + */ + +METHODDEF(void) +prepare_for_output_pass (j_decompress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + + if (master->pub.is_dummy_pass) { +#ifdef QUANT_2PASS_SUPPORTED + /* Final pass of 2-pass quantization */ + master->pub.is_dummy_pass = FALSE; + (*cinfo->cquantize->start_pass) (cinfo, FALSE); + (*cinfo->post->start_pass) (cinfo, JBUF_CRANK_DEST); + (*cinfo->main->start_pass) (cinfo, JBUF_CRANK_DEST); +#else + ERREXIT(cinfo, JERR_NOT_COMPILED); +#endif /* QUANT_2PASS_SUPPORTED */ + } else { + if (cinfo->quantize_colors && cinfo->colormap == NULL) { + /* Select new quantization method */ + if (cinfo->two_pass_quantize && cinfo->enable_2pass_quant) { + cinfo->cquantize = master->quantizer_2pass; + master->pub.is_dummy_pass = TRUE; + } else if (cinfo->enable_1pass_quant) { + cinfo->cquantize = master->quantizer_1pass; + } else { + ERREXIT(cinfo, JERR_MODE_CHANGE); + } + } + (*cinfo->idct->start_pass) (cinfo); + (*cinfo->coef->start_output_pass) (cinfo); + if (! cinfo->raw_data_out) { + if (! master->using_merged_upsample) + (*cinfo->cconvert->start_pass) (cinfo); + (*cinfo->upsample->start_pass) (cinfo); + if (cinfo->quantize_colors) + (*cinfo->cquantize->start_pass) (cinfo, master->pub.is_dummy_pass); + (*cinfo->post->start_pass) (cinfo, + (master->pub.is_dummy_pass ? JBUF_SAVE_AND_PASS : JBUF_PASS_THRU)); + (*cinfo->main->start_pass) (cinfo, JBUF_PASS_THRU); + } + } + + /* Set up progress monitor's pass info if present */ + if (cinfo->progress != NULL) { + cinfo->progress->completed_passes = master->pass_number; + cinfo->progress->total_passes = master->pass_number + + (master->pub.is_dummy_pass ? 2 : 1); + /* In buffered-image mode, we assume one more output pass if EOI not + * yet reached, but no more passes if EOI has been reached. + */ + if (cinfo->buffered_image && ! cinfo->inputctl->eoi_reached) { + cinfo->progress->total_passes += (cinfo->enable_2pass_quant ? 2 : 1); + } + } +} + + +/* + * Finish up at end of an output pass. + */ + +METHODDEF(void) +finish_output_pass (j_decompress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + + if (cinfo->quantize_colors) + (*cinfo->cquantize->finish_pass) (cinfo); + master->pass_number++; +} + + +#ifdef D_MULTISCAN_FILES_SUPPORTED + +/* + * Switch to a new external colormap between output passes. + */ + +GLOBAL(void) +jpeg_new_colormap (j_decompress_ptr cinfo) +{ + my_master_ptr master = (my_master_ptr) cinfo->master; + + /* Prevent application from calling me at wrong times */ + if (cinfo->global_state != DSTATE_BUFIMAGE) + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + + if (cinfo->quantize_colors && cinfo->enable_external_quant && + cinfo->colormap != NULL) { + /* Select 2-pass quantizer for external colormap use */ + cinfo->cquantize = master->quantizer_2pass; + /* Notify quantizer of colormap change */ + (*cinfo->cquantize->new_color_map) (cinfo); + master->pub.is_dummy_pass = FALSE; /* just in case */ + } else + ERREXIT(cinfo, JERR_MODE_CHANGE); +} + +#endif /* D_MULTISCAN_FILES_SUPPORTED */ + + +/* + * Initialize master decompression control and select active modules. + * This is performed at the start of jpeg_start_decompress. + */ + +GLOBAL(void) +jinit_master_decompress (j_decompress_ptr cinfo) +{ + my_master_ptr master; + + master = (my_master_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_decomp_master)); + cinfo->master = (struct jpeg_decomp_master *) master; + master->pub.prepare_for_output_pass = prepare_for_output_pass; + master->pub.finish_output_pass = finish_output_pass; + + master->pub.is_dummy_pass = FALSE; + + master_selection(cinfo); +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmerge.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmerge.c new file mode 100644 index 000000000..37444468c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdmerge.c @@ -0,0 +1,400 @@ +/* + * jdmerge.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains code for merged upsampling/color conversion. + * + * This file combines functions from jdsample.c and jdcolor.c; + * read those files first to understand what's going on. + * + * When the chroma components are to be upsampled by simple replication + * (ie, box filtering), we can save some work in color conversion by + * calculating all the output pixels corresponding to a pair of chroma + * samples at one time. In the conversion equations + * R = Y + K1 * Cr + * G = Y + K2 * Cb + K3 * Cr + * B = Y + K4 * Cb + * only the Y term varies among the group of pixels corresponding to a pair + * of chroma samples, so the rest of the terms can be calculated just once. + * At typical sampling ratios, this eliminates half or three-quarters of the + * multiplications needed for color conversion. + * + * This file currently provides implementations for the following cases: + * YCbCr => RGB color conversion only. + * Sampling ratios of 2h1v or 2h2v. + * No scaling needed at upsample time. + * Corner-aligned (non-CCIR601) sampling alignment. + * Other special cases could be added, but in most applications these are + * the only common cases. (For uncommon cases we fall back on the more + * general code in jdsample.c and jdcolor.c.) + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + +#ifdef UPSAMPLE_MERGING_SUPPORTED + + +/* Private subobject */ + +typedef struct { + struct jpeg_upsampler pub; /* public fields */ + + /* Pointer to routine to do actual upsampling/conversion of one row group */ + JMETHOD(void, upmethod, (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, + JSAMPARRAY output_buf)); + + /* Private state for YCC->RGB conversion */ + int * Cr_r_tab; /* => table for Cr to R conversion */ + int * Cb_b_tab; /* => table for Cb to B conversion */ + INT32 * Cr_g_tab; /* => table for Cr to G conversion */ + INT32 * Cb_g_tab; /* => table for Cb to G conversion */ + + /* For 2:1 vertical sampling, we produce two output rows at a time. + * We need a "spare" row buffer to hold the second output row if the + * application provides just a one-row buffer; we also use the spare + * to discard the dummy last row if the image height is odd. + */ + JSAMPROW spare_row; + boolean spare_full; /* T if spare buffer is occupied */ + + JDIMENSION out_row_width; /* samples per output row */ + JDIMENSION rows_to_go; /* counts rows remaining in image */ +} my_upsampler; + +typedef my_upsampler * my_upsample_ptr; + +#define SCALEBITS 16 /* speediest right-shift on some machines */ +#define ONE_HALF ((INT32) 1 << (SCALEBITS-1)) +#define FIX(x) ((INT32) ((x) * (1L<RGB colorspace conversion. + * This is taken directly from jdcolor.c; see that file for more info. + */ + +LOCAL(void) +build_ycc_rgb_table (j_decompress_ptr cinfo) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + int i; + INT32 x; + SHIFT_TEMPS + + upsample->Cr_r_tab = (int *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(int)); + upsample->Cb_b_tab = (int *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(int)); + upsample->Cr_g_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(INT32)); + upsample->Cb_g_tab = (INT32 *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (MAXJSAMPLE+1) * SIZEOF(INT32)); + + for (i = 0, x = -CENTERJSAMPLE; i <= MAXJSAMPLE; i++, x++) { + /* i is the actual input pixel value, in the range 0..MAXJSAMPLE */ + /* The Cb or Cr value we are thinking of is x = i - CENTERJSAMPLE */ + /* Cr=>R value is nearest int to 1.40200 * x */ + upsample->Cr_r_tab[i] = (int) + RIGHT_SHIFT(FIX(1.40200) * x + ONE_HALF, SCALEBITS); + /* Cb=>B value is nearest int to 1.77200 * x */ + upsample->Cb_b_tab[i] = (int) + RIGHT_SHIFT(FIX(1.77200) * x + ONE_HALF, SCALEBITS); + /* Cr=>G value is scaled-up -0.71414 * x */ + upsample->Cr_g_tab[i] = (- FIX(0.71414)) * x; + /* Cb=>G value is scaled-up -0.34414 * x */ + /* We also add in ONE_HALF so that need not do it in inner loop */ + upsample->Cb_g_tab[i] = (- FIX(0.34414)) * x + ONE_HALF; + } +} + + +/* + * Initialize for an upsampling pass. + */ + +METHODDEF(void) +start_pass_merged_upsample (j_decompress_ptr cinfo) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + + /* Mark the spare buffer empty */ + upsample->spare_full = FALSE; + /* Initialize total-height counter for detecting bottom of image */ + upsample->rows_to_go = cinfo->output_height; +} + + +/* + * Control routine to do upsampling (and color conversion). + * + * The control routine just handles the row buffering considerations. + */ + +METHODDEF(void) +merged_2v_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +/* 2:1 vertical sampling case: may need a spare row. */ +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPROW work_ptrs[2]; + JDIMENSION num_rows; /* number of rows returned to caller */ + + if (upsample->spare_full) { + /* If we have a spare row saved from a previous cycle, just return it. */ + jcopy_sample_rows(& upsample->spare_row, 0, output_buf + *out_row_ctr, 0, + 1, upsample->out_row_width); + num_rows = 1; + upsample->spare_full = FALSE; + } else { + /* Figure number of rows to return to caller. */ + num_rows = 2; + /* Not more than the distance to the end of the image. */ + if (num_rows > upsample->rows_to_go) + num_rows = upsample->rows_to_go; + /* And not more than what the client can accept: */ + out_rows_avail -= *out_row_ctr; + if (num_rows > out_rows_avail) + num_rows = out_rows_avail; + /* Create output pointer array for upsampler. */ + work_ptrs[0] = output_buf[*out_row_ctr]; + if (num_rows > 1) { + work_ptrs[1] = output_buf[*out_row_ctr + 1]; + } else { + work_ptrs[1] = upsample->spare_row; + upsample->spare_full = TRUE; + } + /* Now do the upsampling. */ + (*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr, work_ptrs); + } + + /* Adjust counts */ + *out_row_ctr += num_rows; + upsample->rows_to_go -= num_rows; + /* When the buffer is emptied, declare this input row group consumed */ + if (! upsample->spare_full) + (*in_row_group_ctr)++; +} + + +METHODDEF(void) +merged_1v_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +/* 1:1 vertical sampling case: much easier, never need a spare row. */ +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + + /* Just do the upsampling. */ + (*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr, + output_buf + *out_row_ctr); + /* Adjust counts */ + (*out_row_ctr)++; + (*in_row_group_ctr)++; +} + + +/* + * These are the routines invoked by the control routines to do + * the actual upsampling/conversion. One row group is processed per call. + * + * Note: since we may be writing directly into application-supplied buffers, + * we have to be honest about the output width; we can't assume the buffer + * has been rounded up to an even width. + */ + + +/* + * Upsample and color convert for the case of 2:1 horizontal and 1:1 vertical. + */ + +METHODDEF(void) +h2v1_merged_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, + JSAMPARRAY output_buf) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + register int y, cred, cgreen, cblue; + int cb, cr; + register JSAMPROW outptr; + JSAMPROW inptr0, inptr1, inptr2; + JDIMENSION col; + /* copy these pointers into registers if possible */ + register JSAMPLE * range_limit = cinfo->sample_range_limit; + int * Crrtab = upsample->Cr_r_tab; + int * Cbbtab = upsample->Cb_b_tab; + INT32 * Crgtab = upsample->Cr_g_tab; + INT32 * Cbgtab = upsample->Cb_g_tab; + SHIFT_TEMPS + + inptr0 = input_buf[0][in_row_group_ctr]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr = output_buf[0]; + /* Loop for each pair of output pixels */ + for (col = cinfo->output_width >> 1; col > 0; col--) { + /* Do the chroma part of the calculation */ + cb = GETJSAMPLE(*inptr1++); + cr = GETJSAMPLE(*inptr2++); + cred = Crrtab[cr]; + cgreen = (int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], SCALEBITS); + cblue = Cbbtab[cb]; + /* Fetch 2 Y values and emit 2 pixels */ + y = GETJSAMPLE(*inptr0++); + outptr[RGB_RED] = range_limit[y + cred]; + outptr[RGB_GREEN] = range_limit[y + cgreen]; + outptr[RGB_BLUE] = range_limit[y + cblue]; + outptr += RGB_PIXELSIZE; + y = GETJSAMPLE(*inptr0++); + outptr[RGB_RED] = range_limit[y + cred]; + outptr[RGB_GREEN] = range_limit[y + cgreen]; + outptr[RGB_BLUE] = range_limit[y + cblue]; + outptr += RGB_PIXELSIZE; + } + /* If image width is odd, do the last output column separately */ + if (cinfo->output_width & 1) { + cb = GETJSAMPLE(*inptr1); + cr = GETJSAMPLE(*inptr2); + cred = Crrtab[cr]; + cgreen = (int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], SCALEBITS); + cblue = Cbbtab[cb]; + y = GETJSAMPLE(*inptr0); + outptr[RGB_RED] = range_limit[y + cred]; + outptr[RGB_GREEN] = range_limit[y + cgreen]; + outptr[RGB_BLUE] = range_limit[y + cblue]; + } +} + + +/* + * Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical. + */ + +METHODDEF(void) +h2v2_merged_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, + JSAMPARRAY output_buf) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + register int y, cred, cgreen, cblue; + int cb, cr; + register JSAMPROW outptr0, outptr1; + JSAMPROW inptr00, inptr01, inptr1, inptr2; + JDIMENSION col; + /* copy these pointers into registers if possible */ + register JSAMPLE * range_limit = cinfo->sample_range_limit; + int * Crrtab = upsample->Cr_r_tab; + int * Cbbtab = upsample->Cb_b_tab; + INT32 * Crgtab = upsample->Cr_g_tab; + INT32 * Cbgtab = upsample->Cb_g_tab; + SHIFT_TEMPS + + inptr00 = input_buf[0][in_row_group_ctr*2]; + inptr01 = input_buf[0][in_row_group_ctr*2 + 1]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr0 = output_buf[0]; + outptr1 = output_buf[1]; + /* Loop for each group of output pixels */ + for (col = cinfo->output_width >> 1; col > 0; col--) { + /* Do the chroma part of the calculation */ + cb = GETJSAMPLE(*inptr1++); + cr = GETJSAMPLE(*inptr2++); + cred = Crrtab[cr]; + cgreen = (int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], SCALEBITS); + cblue = Cbbtab[cb]; + /* Fetch 4 Y values and emit 4 pixels */ + y = GETJSAMPLE(*inptr00++); + outptr0[RGB_RED] = range_limit[y + cred]; + outptr0[RGB_GREEN] = range_limit[y + cgreen]; + outptr0[RGB_BLUE] = range_limit[y + cblue]; + outptr0 += RGB_PIXELSIZE; + y = GETJSAMPLE(*inptr00++); + outptr0[RGB_RED] = range_limit[y + cred]; + outptr0[RGB_GREEN] = range_limit[y + cgreen]; + outptr0[RGB_BLUE] = range_limit[y + cblue]; + outptr0 += RGB_PIXELSIZE; + y = GETJSAMPLE(*inptr01++); + outptr1[RGB_RED] = range_limit[y + cred]; + outptr1[RGB_GREEN] = range_limit[y + cgreen]; + outptr1[RGB_BLUE] = range_limit[y + cblue]; + outptr1 += RGB_PIXELSIZE; + y = GETJSAMPLE(*inptr01++); + outptr1[RGB_RED] = range_limit[y + cred]; + outptr1[RGB_GREEN] = range_limit[y + cgreen]; + outptr1[RGB_BLUE] = range_limit[y + cblue]; + outptr1 += RGB_PIXELSIZE; + } + /* If image width is odd, do the last output column separately */ + if (cinfo->output_width & 1) { + cb = GETJSAMPLE(*inptr1); + cr = GETJSAMPLE(*inptr2); + cred = Crrtab[cr]; + cgreen = (int) RIGHT_SHIFT(Cbgtab[cb] + Crgtab[cr], SCALEBITS); + cblue = Cbbtab[cb]; + y = GETJSAMPLE(*inptr00); + outptr0[RGB_RED] = range_limit[y + cred]; + outptr0[RGB_GREEN] = range_limit[y + cgreen]; + outptr0[RGB_BLUE] = range_limit[y + cblue]; + y = GETJSAMPLE(*inptr01); + outptr1[RGB_RED] = range_limit[y + cred]; + outptr1[RGB_GREEN] = range_limit[y + cgreen]; + outptr1[RGB_BLUE] = range_limit[y + cblue]; + } +} + + +/* + * Module initialization routine for merged upsampling/color conversion. + * + * NB: this is called under the conditions determined by use_merged_upsample() + * in jdmaster.c. That routine MUST correspond to the actual capabilities + * of this module; no safety checks are made here. + */ + +GLOBAL(void) +jinit_merged_upsampler (j_decompress_ptr cinfo) +{ + my_upsample_ptr upsample; + + upsample = (my_upsample_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_upsampler)); + cinfo->upsample = (struct jpeg_upsampler *) upsample; + upsample->pub.start_pass = start_pass_merged_upsample; + upsample->pub.need_context_rows = FALSE; + + upsample->out_row_width = cinfo->output_width * cinfo->out_color_components; + + if (cinfo->max_v_samp_factor == 2) { + upsample->pub.upsample = merged_2v_upsample; + upsample->upmethod = h2v2_merged_upsample; + /* Allocate a spare row buffer */ + upsample->spare_row = (JSAMPROW) + (*cinfo->mem->alloc_large) ((j_common_ptr) cinfo, JPOOL_IMAGE, + (size_t) (upsample->out_row_width * SIZEOF(JSAMPLE))); + } else { + upsample->pub.upsample = merged_1v_upsample; + upsample->upmethod = h2v1_merged_upsample; + /* No spare row needed */ + upsample->spare_row = NULL; + } + + build_ycc_rgb_table(cinfo); +} + +#endif /* UPSAMPLE_MERGING_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdpostct.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdpostct.c new file mode 100644 index 000000000..571563d72 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdpostct.c @@ -0,0 +1,290 @@ +/* + * jdpostct.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the decompression postprocessing controller. + * This controller manages the upsampling, color conversion, and color + * quantization/reduction steps; specifically, it controls the buffering + * between upsample/color conversion and color quantization/reduction. + * + * If no color quantization/reduction is required, then this module has no + * work to do, and it just hands off to the upsample/color conversion code. + * An integrated upsample/convert/quantize process would replace this module + * entirely. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Private buffer controller object */ + +typedef struct { + struct jpeg_d_post_controller pub; /* public fields */ + + /* Color quantization source buffer: this holds output data from + * the upsample/color conversion step to be passed to the quantizer. + * For two-pass color quantization, we need a full-image buffer; + * for one-pass operation, a strip buffer is sufficient. + */ + jvirt_sarray_ptr whole_image; /* virtual array, or NULL if one-pass */ + JSAMPARRAY buffer; /* strip buffer, or current strip of virtual */ + JDIMENSION strip_height; /* buffer size in rows */ + /* for two-pass mode only: */ + JDIMENSION starting_row; /* row # of first row in current strip */ + JDIMENSION next_row; /* index of next row to fill/empty in strip */ +} my_post_controller; + +typedef my_post_controller * my_post_ptr; + + +/* Forward declarations */ +METHODDEF(void) post_process_1pass + JPP((j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); +#ifdef QUANT_2PASS_SUPPORTED +METHODDEF(void) post_process_prepass + JPP((j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); +METHODDEF(void) post_process_2pass + JPP((j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail)); +#endif + + +/* + * Initialize for a processing pass. + */ + +METHODDEF(void) +start_pass_dpost (j_decompress_ptr cinfo, J_BUF_MODE pass_mode) +{ + my_post_ptr post = (my_post_ptr) cinfo->post; + + switch (pass_mode) { + case JBUF_PASS_THRU: + if (cinfo->quantize_colors) { + /* Single-pass processing with color quantization. */ + post->pub.post_process_data = post_process_1pass; + /* We could be doing buffered-image output before starting a 2-pass + * color quantization; in that case, jinit_d_post_controller did not + * allocate a strip buffer. Use the virtual-array buffer as workspace. + */ + if (post->buffer == NULL) { + post->buffer = (*cinfo->mem->access_virt_sarray) + ((j_common_ptr) cinfo, post->whole_image, + (JDIMENSION) 0, post->strip_height, TRUE); + } + } else { + /* For single-pass processing without color quantization, + * I have no work to do; just call the upsampler directly. + */ + post->pub.post_process_data = cinfo->upsample->upsample; + } + break; +#ifdef QUANT_2PASS_SUPPORTED + case JBUF_SAVE_AND_PASS: + /* First pass of 2-pass quantization */ + if (post->whole_image == NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + post->pub.post_process_data = post_process_prepass; + break; + case JBUF_CRANK_DEST: + /* Second pass of 2-pass quantization */ + if (post->whole_image == NULL) + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + post->pub.post_process_data = post_process_2pass; + break; +#endif /* QUANT_2PASS_SUPPORTED */ + default: + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); + break; + } + post->starting_row = post->next_row = 0; +} + + +/* + * Process some data in the one-pass (strip buffer) case. + * This is used for color precision reduction as well as one-pass quantization. + */ + +METHODDEF(void) +post_process_1pass (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_post_ptr post = (my_post_ptr) cinfo->post; + JDIMENSION num_rows, max_rows; + + /* Fill the buffer, but not more than what we can dump out in one go. */ + /* Note we rely on the upsampler to detect bottom of image. */ + max_rows = out_rows_avail - *out_row_ctr; + if (max_rows > post->strip_height) + max_rows = post->strip_height; + num_rows = 0; + (*cinfo->upsample->upsample) (cinfo, + input_buf, in_row_group_ctr, in_row_groups_avail, + post->buffer, &num_rows, max_rows); + /* Quantize and emit data. */ + (*cinfo->cquantize->color_quantize) (cinfo, + post->buffer, output_buf + *out_row_ctr, (int) num_rows); + *out_row_ctr += num_rows; +} + + +#ifdef QUANT_2PASS_SUPPORTED + +/* + * Process some data in the first pass of 2-pass quantization. + */ + +METHODDEF(void) +post_process_prepass (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_post_ptr post = (my_post_ptr) cinfo->post; + JDIMENSION old_next_row, num_rows; + + /* Reposition virtual buffer if at start of strip. */ + if (post->next_row == 0) { + post->buffer = (*cinfo->mem->access_virt_sarray) + ((j_common_ptr) cinfo, post->whole_image, + post->starting_row, post->strip_height, TRUE); + } + + /* Upsample some data (up to a strip height's worth). */ + old_next_row = post->next_row; + (*cinfo->upsample->upsample) (cinfo, + input_buf, in_row_group_ctr, in_row_groups_avail, + post->buffer, &post->next_row, post->strip_height); + + /* Allow quantizer to scan new data. No data is emitted, */ + /* but we advance out_row_ctr so outer loop can tell when we're done. */ + if (post->next_row > old_next_row) { + num_rows = post->next_row - old_next_row; + (*cinfo->cquantize->color_quantize) (cinfo, post->buffer + old_next_row, + (JSAMPARRAY) NULL, (int) num_rows); + *out_row_ctr += num_rows; + } + + /* Advance if we filled the strip. */ + if (post->next_row >= post->strip_height) { + post->starting_row += post->strip_height; + post->next_row = 0; + } +} + + +/* + * Process some data in the second pass of 2-pass quantization. + */ + +METHODDEF(void) +post_process_2pass (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_post_ptr post = (my_post_ptr) cinfo->post; + JDIMENSION num_rows, max_rows; + + /* Reposition virtual buffer if at start of strip. */ + if (post->next_row == 0) { + post->buffer = (*cinfo->mem->access_virt_sarray) + ((j_common_ptr) cinfo, post->whole_image, + post->starting_row, post->strip_height, FALSE); + } + + /* Determine number of rows to emit. */ + num_rows = post->strip_height - post->next_row; /* available in strip */ + max_rows = out_rows_avail - *out_row_ctr; /* available in output area */ + if (num_rows > max_rows) + num_rows = max_rows; + /* We have to check bottom of image here, can't depend on upsampler. */ + max_rows = cinfo->output_height - post->starting_row; + if (num_rows > max_rows) + num_rows = max_rows; + + /* Quantize and emit data. */ + (*cinfo->cquantize->color_quantize) (cinfo, + post->buffer + post->next_row, output_buf + *out_row_ctr, + (int) num_rows); + *out_row_ctr += num_rows; + + /* Advance if we filled the strip. */ + post->next_row += num_rows; + if (post->next_row >= post->strip_height) { + post->starting_row += post->strip_height; + post->next_row = 0; + } +} + +#endif /* QUANT_2PASS_SUPPORTED */ + + +/* + * Initialize postprocessing controller. + */ + +GLOBAL(void) +jinit_d_post_controller (j_decompress_ptr cinfo, boolean need_full_buffer) +{ + my_post_ptr post; + + post = (my_post_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_post_controller)); + cinfo->post = (struct jpeg_d_post_controller *) post; + post->pub.start_pass = start_pass_dpost; + post->whole_image = NULL; /* flag for no virtual arrays */ + post->buffer = NULL; /* flag for no strip buffer */ + + /* Create the quantization buffer, if needed */ + if (cinfo->quantize_colors) { + /* The buffer strip height is max_v_samp_factor, which is typically + * an efficient number of rows for upsampling to return. + * (In the presence of output rescaling, we might want to be smarter?) + */ + post->strip_height = (JDIMENSION) cinfo->max_v_samp_factor; + if (need_full_buffer) { + /* Two-pass color quantization: need full-image storage. */ + /* We round up the number of rows to a multiple of the strip height. */ +#ifdef QUANT_2PASS_SUPPORTED + post->whole_image = (*cinfo->mem->request_virt_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, FALSE, + cinfo->output_width * cinfo->out_color_components, + (JDIMENSION) jround_up((long) cinfo->output_height, + (long) post->strip_height), + post->strip_height); +#else + ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); +#endif /* QUANT_2PASS_SUPPORTED */ + } else { + /* One-pass color quantization: just make a strip buffer. */ + post->buffer = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + cinfo->output_width * cinfo->out_color_components, + post->strip_height); + } + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdsample.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdsample.c new file mode 100644 index 000000000..7bc8885b0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdsample.c @@ -0,0 +1,361 @@ +/* + * jdsample.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modified 2002-2008 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains upsampling routines. + * + * Upsampling input data is counted in "row groups". A row group + * is defined to be (v_samp_factor * DCT_v_scaled_size / min_DCT_v_scaled_size) + * sample rows of each component. Upsampling will normally produce + * max_v_samp_factor pixel rows from each row group (but this could vary + * if the upsampler is applying a scale factor of its own). + * + * An excellent reference for image resampling is + * Digital Image Warping, George Wolberg, 1990. + * Pub. by IEEE Computer Society Press, Los Alamitos, CA. ISBN 0-8186-8944-7. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Pointer to routine to upsample a single component */ +typedef JMETHOD(void, upsample1_ptr, + (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr)); + +/* Private subobject */ + +typedef struct { + struct jpeg_upsampler pub; /* public fields */ + + /* Color conversion buffer. When using separate upsampling and color + * conversion steps, this buffer holds one upsampled row group until it + * has been color converted and output. + * Note: we do not allocate any storage for component(s) which are full-size, + * ie do not need rescaling. The corresponding entry of color_buf[] is + * simply set to point to the input data array, thereby avoiding copying. + */ + JSAMPARRAY color_buf[MAX_COMPONENTS]; + + /* Per-component upsampling method pointers */ + upsample1_ptr methods[MAX_COMPONENTS]; + + int next_row_out; /* counts rows emitted from color_buf */ + JDIMENSION rows_to_go; /* counts rows remaining in image */ + + /* Height of an input row group for each component. */ + int rowgroup_height[MAX_COMPONENTS]; + + /* These arrays save pixel expansion factors so that int_expand need not + * recompute them each time. They are unused for other upsampling methods. + */ + UINT8 h_expand[MAX_COMPONENTS]; + UINT8 v_expand[MAX_COMPONENTS]; +} my_upsampler; + +typedef my_upsampler * my_upsample_ptr; + + +/* + * Initialize for an upsampling pass. + */ + +METHODDEF(void) +start_pass_upsample (j_decompress_ptr cinfo) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + + /* Mark the conversion buffer empty */ + upsample->next_row_out = cinfo->max_v_samp_factor; + /* Initialize total-height counter for detecting bottom of image */ + upsample->rows_to_go = cinfo->output_height; +} + + +/* + * Control routine to do upsampling (and color conversion). + * + * In this version we upsample each component independently. + * We upsample one row group into the conversion buffer, then apply + * color conversion a row at a time. + */ + +METHODDEF(void) +sep_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, + JDIMENSION in_row_groups_avail, + JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, + JDIMENSION out_rows_avail) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + int ci; + jpeg_component_info * compptr; + JDIMENSION num_rows; + + /* Fill the conversion buffer, if it's empty */ + if (upsample->next_row_out >= cinfo->max_v_samp_factor) { + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Invoke per-component upsample method. Notice we pass a POINTER + * to color_buf[ci], so that fullsize_upsample can change it. + */ + (*upsample->methods[ci]) (cinfo, compptr, + input_buf[ci] + (*in_row_group_ctr * upsample->rowgroup_height[ci]), + upsample->color_buf + ci); + } + upsample->next_row_out = 0; + } + + /* Color-convert and emit rows */ + + /* How many we have in the buffer: */ + num_rows = (JDIMENSION) (cinfo->max_v_samp_factor - upsample->next_row_out); + /* Not more than the distance to the end of the image. Need this test + * in case the image height is not a multiple of max_v_samp_factor: + */ + if (num_rows > upsample->rows_to_go) + num_rows = upsample->rows_to_go; + /* And not more than what the client can accept: */ + out_rows_avail -= *out_row_ctr; + if (num_rows > out_rows_avail) + num_rows = out_rows_avail; + + (*cinfo->cconvert->color_convert) (cinfo, upsample->color_buf, + (JDIMENSION) upsample->next_row_out, + output_buf + *out_row_ctr, + (int) num_rows); + + /* Adjust counts */ + *out_row_ctr += num_rows; + upsample->rows_to_go -= num_rows; + upsample->next_row_out += num_rows; + /* When the buffer is emptied, declare this input row group consumed */ + if (upsample->next_row_out >= cinfo->max_v_samp_factor) + (*in_row_group_ctr)++; +} + + +/* + * These are the routines invoked by sep_upsample to upsample pixel values + * of a single component. One row group is processed per call. + */ + + +/* + * For full-size components, we just make color_buf[ci] point at the + * input buffer, and thus avoid copying any data. Note that this is + * safe only because sep_upsample doesn't declare the input row group + * "consumed" until we are done color converting and emitting it. + */ + +METHODDEF(void) +fullsize_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) +{ + *output_data_ptr = input_data; +} + + +/* + * This is a no-op version used for "uninteresting" components. + * These components will not be referenced by color conversion. + */ + +METHODDEF(void) +noop_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) +{ + *output_data_ptr = NULL; /* safety check */ +} + + +/* + * This version handles any integral sampling ratios. + * This is not used for typical JPEG files, so it need not be fast. + * Nor, for that matter, is it particularly accurate: the algorithm is + * simple replication of the input pixel onto the corresponding output + * pixels. The hi-falutin sampling literature refers to this as a + * "box filter". A box filter tends to introduce visible artifacts, + * so if you are actually going to use 3:1 or 4:1 sampling ratios + * you would be well advised to improve this code. + */ + +METHODDEF(void) +int_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPARRAY output_data = *output_data_ptr; + register JSAMPROW inptr, outptr; + register JSAMPLE invalue; + register int h; + JSAMPROW outend; + int h_expand, v_expand; + int inrow, outrow; + + h_expand = upsample->h_expand[compptr->component_index]; + v_expand = upsample->v_expand[compptr->component_index]; + + inrow = outrow = 0; + while (outrow < cinfo->max_v_samp_factor) { + /* Generate one output row with proper horizontal expansion */ + inptr = input_data[inrow]; + outptr = output_data[outrow]; + outend = outptr + cinfo->output_width; + while (outptr < outend) { + invalue = *inptr++; /* don't need GETJSAMPLE() here */ + for (h = h_expand; h > 0; h--) { + *outptr++ = invalue; + } + } + /* Generate any additional output rows by duplicating the first one */ + if (v_expand > 1) { + jcopy_sample_rows(output_data, outrow, output_data, outrow+1, + v_expand-1, cinfo->output_width); + } + inrow++; + outrow += v_expand; + } +} + + +/* + * Fast processing for the common case of 2:1 horizontal and 1:1 vertical. + * It's still a box filter. + */ + +METHODDEF(void) +h2v1_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) +{ + JSAMPARRAY output_data = *output_data_ptr; + register JSAMPROW inptr, outptr; + register JSAMPLE invalue; + JSAMPROW outend; + int outrow; + + for (outrow = 0; outrow < cinfo->max_v_samp_factor; outrow++) { + inptr = input_data[outrow]; + outptr = output_data[outrow]; + outend = outptr + cinfo->output_width; + while (outptr < outend) { + invalue = *inptr++; /* don't need GETJSAMPLE() here */ + *outptr++ = invalue; + *outptr++ = invalue; + } + } +} + + +/* + * Fast processing for the common case of 2:1 horizontal and 2:1 vertical. + * It's still a box filter. + */ + +METHODDEF(void) +h2v2_upsample (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr) +{ + JSAMPARRAY output_data = *output_data_ptr; + register JSAMPROW inptr, outptr; + register JSAMPLE invalue; + JSAMPROW outend; + int inrow, outrow; + + inrow = outrow = 0; + while (outrow < cinfo->max_v_samp_factor) { + inptr = input_data[inrow]; + outptr = output_data[outrow]; + outend = outptr + cinfo->output_width; + while (outptr < outend) { + invalue = *inptr++; /* don't need GETJSAMPLE() here */ + *outptr++ = invalue; + *outptr++ = invalue; + } + jcopy_sample_rows(output_data, outrow, output_data, outrow+1, + 1, cinfo->output_width); + inrow++; + outrow += 2; + } +} + + +/* + * Module initialization routine for upsampling. + */ + +GLOBAL(void) +jinit_upsampler (j_decompress_ptr cinfo) +{ + my_upsample_ptr upsample; + int ci; + jpeg_component_info * compptr; + boolean need_buffer; + int h_in_group, v_in_group, h_out_group, v_out_group; + + upsample = (my_upsample_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_upsampler)); + cinfo->upsample = (struct jpeg_upsampler *) upsample; + upsample->pub.start_pass = start_pass_upsample; + upsample->pub.upsample = sep_upsample; + upsample->pub.need_context_rows = FALSE; /* until we find out differently */ + + if (cinfo->CCIR601_sampling) /* this isn't supported */ + ERREXIT(cinfo, JERR_CCIR601_NOTIMPL); + + /* Verify we can handle the sampling factors, select per-component methods, + * and create storage as needed. + */ + for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; + ci++, compptr++) { + /* Compute size of an "input group" after IDCT scaling. This many samples + * are to be converted to max_h_samp_factor * max_v_samp_factor pixels. + */ + h_in_group = (compptr->h_samp_factor * compptr->DCT_h_scaled_size) / + cinfo->min_DCT_h_scaled_size; + v_in_group = (compptr->v_samp_factor * compptr->DCT_v_scaled_size) / + cinfo->min_DCT_v_scaled_size; + h_out_group = cinfo->max_h_samp_factor; + v_out_group = cinfo->max_v_samp_factor; + upsample->rowgroup_height[ci] = v_in_group; /* save for use later */ + need_buffer = TRUE; + if (! compptr->component_needed) { + /* Don't bother to upsample an uninteresting component. */ + upsample->methods[ci] = noop_upsample; + need_buffer = FALSE; + } else if (h_in_group == h_out_group && v_in_group == v_out_group) { + /* Fullsize components can be processed without any work. */ + upsample->methods[ci] = fullsize_upsample; + need_buffer = FALSE; + } else if (h_in_group * 2 == h_out_group && + v_in_group == v_out_group) { + /* Special case for 2h1v upsampling */ + upsample->methods[ci] = h2v1_upsample; + } else if (h_in_group * 2 == h_out_group && + v_in_group * 2 == v_out_group) { + /* Special case for 2h2v upsampling */ + upsample->methods[ci] = h2v2_upsample; + } else if ((h_out_group % h_in_group) == 0 && + (v_out_group % v_in_group) == 0) { + /* Generic integral-factors upsampling method */ + upsample->methods[ci] = int_upsample; + upsample->h_expand[ci] = (UINT8) (h_out_group / h_in_group); + upsample->v_expand[ci] = (UINT8) (v_out_group / v_in_group); + } else + ERREXIT(cinfo, JERR_FRACT_SAMPLE_NOTIMPL); + if (need_buffer) { + upsample->color_buf[ci] = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (JDIMENSION) jround_up((long) cinfo->output_width, + (long) cinfo->max_h_samp_factor), + (JDIMENSION) cinfo->max_v_samp_factor); + } + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdtrans.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdtrans.c new file mode 100644 index 000000000..22dd47fb5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jdtrans.c @@ -0,0 +1,140 @@ +/* + * jdtrans.c + * + * Copyright (C) 1995-1997, Thomas G. Lane. + * Modified 2000-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains library routines for transcoding decompression, + * that is, reading raw DCT coefficient arrays from an input JPEG file. + * The routines in jdapimin.c will also be needed by a transcoder. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* Forward declarations */ +LOCAL(void) transdecode_master_selection JPP((j_decompress_ptr cinfo)); + + +/* + * Read the coefficient arrays from a JPEG file. + * jpeg_read_header must be completed before calling this. + * + * The entire image is read into a set of virtual coefficient-block arrays, + * one per component. The return value is a pointer to the array of + * virtual-array descriptors. These can be manipulated directly via the + * JPEG memory manager, or handed off to jpeg_write_coefficients(). + * To release the memory occupied by the virtual arrays, call + * jpeg_finish_decompress() when done with the data. + * + * An alternative usage is to simply obtain access to the coefficient arrays + * during a buffered-image-mode decompression operation. This is allowed + * after any jpeg_finish_output() call. The arrays can be accessed until + * jpeg_finish_decompress() is called. (Note that any call to the library + * may reposition the arrays, so don't rely on access_virt_barray() results + * to stay valid across library calls.) + * + * Returns NULL if suspended. This case need be checked only if + * a suspending data source is used. + */ + +GLOBAL(jvirt_barray_ptr *) +jpeg_read_coefficients (j_decompress_ptr cinfo) +{ + if (cinfo->global_state == DSTATE_READY) { + /* First call: initialize active modules */ + transdecode_master_selection(cinfo); + cinfo->global_state = DSTATE_RDCOEFS; + } + if (cinfo->global_state == DSTATE_RDCOEFS) { + /* Absorb whole file into the coef buffer */ + for (;;) { + int retcode; + /* Call progress monitor hook if present */ + if (cinfo->progress != NULL) + (*cinfo->progress->progress_monitor) ((j_common_ptr) cinfo); + /* Absorb some more input */ + retcode = (*cinfo->inputctl->consume_input) (cinfo); + if (retcode == JPEG_SUSPENDED) + return NULL; + if (retcode == JPEG_REACHED_EOI) + break; + /* Advance progress counter if appropriate */ + if (cinfo->progress != NULL && + (retcode == JPEG_ROW_COMPLETED || retcode == JPEG_REACHED_SOS)) { + if (++cinfo->progress->pass_counter >= cinfo->progress->pass_limit) { + /* startup underestimated number of scans; ratchet up one scan */ + cinfo->progress->pass_limit += (long) cinfo->total_iMCU_rows; + } + } + } + /* Set state so that jpeg_finish_decompress does the right thing */ + cinfo->global_state = DSTATE_STOPPING; + } + /* At this point we should be in state DSTATE_STOPPING if being used + * standalone, or in state DSTATE_BUFIMAGE if being invoked to get access + * to the coefficients during a full buffered-image-mode decompression. + */ + if ((cinfo->global_state == DSTATE_STOPPING || + cinfo->global_state == DSTATE_BUFIMAGE) && cinfo->buffered_image) { + return cinfo->coef->coef_arrays; + } + /* Oops, improper usage */ + ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); + return NULL; /* keep compiler happy */ +} + + +/* + * Master selection of decompression modules for transcoding. + * This substitutes for jdmaster.c's initialization of the full decompressor. + */ + +LOCAL(void) +transdecode_master_selection (j_decompress_ptr cinfo) +{ + /* This is effectively a buffered-image operation. */ + cinfo->buffered_image = TRUE; + + /* Compute output image dimensions and related values. */ + jpeg_core_output_dimensions(cinfo); + + /* Entropy decoding: either Huffman or arithmetic coding. */ + if (cinfo->arith_code) + jinit_arith_decoder(cinfo); + else { + jinit_huff_decoder(cinfo); + } + + /* Always get a full-image coefficient buffer. */ + jinit_d_coef_controller(cinfo, TRUE); + + /* We can now tell the memory manager to allocate virtual arrays. */ + (*cinfo->mem->realize_virt_arrays) ((j_common_ptr) cinfo); + + /* Initialize input side of decompressor to consume first scan. */ + (*cinfo->inputctl->start_input_pass) (cinfo); + + /* Initialize progress monitoring. */ + if (cinfo->progress != NULL) { + int nscans; + /* Estimate number of scans to set pass_limit. */ + if (cinfo->progressive_mode) { + /* Arbitrarily estimate 2 interleaved DC scans + 3 AC scans/component. */ + nscans = 2 + 3 * cinfo->num_components; + } else if (cinfo->inputctl->has_multiple_scans) { + /* For a nonprogressive multiscan file, estimate 1 scan per component. */ + nscans = cinfo->num_components; + } else { + nscans = 1; + } + cinfo->progress->pass_counter = 0L; + cinfo->progress->pass_limit = (long) cinfo->total_iMCU_rows * nscans; + cinfo->progress->completed_passes = 0; + cinfo->progress->total_passes = 1; + } +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jerror.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jerror.c new file mode 100644 index 000000000..cf9c73793 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jerror.c @@ -0,0 +1,253 @@ +/* + * jerror.c + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains simple error-reporting and trace-message routines. + * These are suitable for Unix-like systems and others where writing to + * stderr is the right thing to do. Many applications will want to replace + * some or all of these routines. + * + * If you define USE_WINDOWS_MESSAGEBOX in jconfig.h or in the makefile, + * you get a Windows-specific hack to display error messages in a dialog box. + * It ain't much, but it beats dropping error messages into the bit bucket, + * which is what happens to output to stderr under most Windows C compilers. + * + * These routines are used by both the compression and decompression code. + */ + +/* this is not a core library module, so it doesn't define JPEG_INTERNALS */ +#include "jinclude.h" +#include "jpeglib.h" +#include "jversion.h" +#include "jerror.h" + +#ifdef USE_WINDOWS_MESSAGEBOX +#include +#endif + +#ifndef EXIT_FAILURE /* define exit() codes if not provided */ +#define EXIT_FAILURE 1 +#endif + + +/* + * Create the message string table. + * We do this from the master message list in jerror.h by re-reading + * jerror.h with a suitable definition for macro JMESSAGE. + * The message table is made an external symbol just in case any applications + * want to refer to it directly. + */ + +#ifdef NEED_SHORT_EXTERNAL_NAMES +#define jpeg_std_message_table jMsgTable +#endif + +#define JMESSAGE(code,string) string , + +const char * const jpeg_std_message_table[] = { +#include "jerror.h" + NULL +}; + + +/* + * Error exit handler: must not return to caller. + * + * Applications may override this if they want to get control back after + * an error. Typically one would longjmp somewhere instead of exiting. + * The setjmp buffer can be made a private field within an expanded error + * handler object. Note that the info needed to generate an error message + * is stored in the error object, so you can generate the message now or + * later, at your convenience. + * You should make sure that the JPEG object is cleaned up (with jpeg_abort + * or jpeg_destroy) at some point. + */ + +METHODDEF(void) +error_exit (j_common_ptr cinfo) +{ + /* Always display the message */ + (*cinfo->err->output_message) (cinfo); + + /* Let the memory manager delete any temp files before we die */ + jpeg_destroy(cinfo); + + //exit(EXIT_FAILURE); +} + + +/* + * Actual output of an error or trace message. + * Applications may override this method to send JPEG messages somewhere + * other than stderr. + * + * On Windows, printing to stderr is generally completely useless, + * so we provide optional code to produce an error-dialog popup. + * Most Windows applications will still prefer to override this routine, + * but if they don't, it'll do something at least marginally useful. + * + * NOTE: to use the library in an environment that doesn't support the + * C stdio library, you may have to delete the call to fprintf() entirely, + * not just not use this routine. + */ + +METHODDEF(void) +output_message (j_common_ptr cinfo) +{ + char buffer[JMSG_LENGTH_MAX]; + + /* Create the message */ + (*cinfo->err->format_message) (cinfo, buffer); + +#ifdef USE_WINDOWS_MESSAGEBOX + /* Display it in a message dialog box */ + MessageBox(GetActiveWindow(), buffer, "JPEG Library Error", + MB_OK | MB_ICONERROR); +#else + /* Send it to stderr, adding a newline */ +// fprintf(stderr, "%s\n", buffer); +// printf( "%s\n", buffer); +#endif +} + + +/* + * Decide whether to emit a trace or warning message. + * msg_level is one of: + * -1: recoverable corrupt-data warning, may want to abort. + * 0: important advisory messages (always display to user). + * 1: first level of tracing detail. + * 2,3,...: successively more detailed tracing messages. + * An application might override this method if it wanted to abort on warnings + * or change the policy about which messages to display. + */ + +METHODDEF(void) +emit_message (j_common_ptr cinfo, int msg_level) +{ + struct jpeg_error_mgr * err = cinfo->err; + + if (msg_level < 0) { + /* It's a warning message. Since corrupt files may generate many warnings, + * the policy implemented here is to show only the first warning, + * unless trace_level >= 3. + */ + if (err->num_warnings == 0 || err->trace_level >= 3) + (*err->output_message) (cinfo); + /* Always count warnings in num_warnings. */ + err->num_warnings++; + } else { + /* It's a trace message. Show it if trace_level >= msg_level. */ + if (err->trace_level >= msg_level) + (*err->output_message) (cinfo); + } +} + + +/* + * Format a message string for the most recent JPEG error or message. + * The message is stored into buffer, which should be at least JMSG_LENGTH_MAX + * characters. Note that no '\n' character is added to the string. + * Few applications should need to override this method. + */ + +METHODDEF(void) +format_message (j_common_ptr cinfo, char * buffer) +{ + struct jpeg_error_mgr * err = cinfo->err; + int msg_code = err->msg_code; + const char * msgtext = NULL; + const char * msgptr; + char ch; + boolean isstring; + + /* Look up message string in proper table */ + if (msg_code > 0 && msg_code <= err->last_jpeg_message) { + msgtext = err->jpeg_message_table[msg_code]; + } else if (err->addon_message_table != NULL && + msg_code >= err->first_addon_message && + msg_code <= err->last_addon_message) { + msgtext = err->addon_message_table[msg_code - err->first_addon_message]; + } + + /* Defend against bogus message number */ + if (msgtext == NULL) { + err->msg_parm.i[0] = msg_code; + msgtext = err->jpeg_message_table[0]; + } + + /* Check for string parameter, as indicated by %s in the message text */ + isstring = FALSE; + msgptr = msgtext; + while ((ch = *msgptr++) != '\0') { + if (ch == '%') { + if (*msgptr == 's') isstring = TRUE; + break; + } + } + + /* Format the message into the passed buffer */ + if (isstring) + sprintf(buffer, msgtext, err->msg_parm.s); + else + sprintf(buffer, msgtext, + err->msg_parm.i[0], err->msg_parm.i[1], + err->msg_parm.i[2], err->msg_parm.i[3], + err->msg_parm.i[4], err->msg_parm.i[5], + err->msg_parm.i[6], err->msg_parm.i[7]); +} + + +/* + * Reset error state variables at start of a new image. + * This is called during compression startup to reset trace/error + * processing to default state, without losing any application-specific + * method pointers. An application might possibly want to override + * this method if it has additional error processing state. + */ + +METHODDEF(void) +reset_error_mgr (j_common_ptr cinfo) +{ + cinfo->err->num_warnings = 0; + /* trace_level is not reset since it is an application-supplied parameter */ + cinfo->err->msg_code = 0; /* may be useful as a flag for "no error" */ +} + + +/* + * Fill in the standard error-handling methods in a jpeg_error_mgr object. + * Typical call is: + * struct jpeg_compress_struct cinfo; + * struct jpeg_error_mgr err; + * + * cinfo.err = jpeg_std_error(&err); + * after which the application may override some of the methods. + */ + +GLOBAL(struct jpeg_error_mgr *) +jpeg_std_error (struct jpeg_error_mgr * err) +{ + err->error_exit = error_exit; + err->emit_message = emit_message; + err->output_message = output_message; + err->format_message = format_message; + err->reset_error_mgr = reset_error_mgr; + + err->trace_level = 0; /* default = no tracing */ + err->num_warnings = 0; /* no warnings emitted yet */ + err->msg_code = 0; /* may be useful as a flag for "no error" */ + + /* Initialize message table pointers */ + err->jpeg_message_table = jpeg_std_message_table; + err->last_jpeg_message = (int) JMSG_LASTMSGCODE - 1; + + err->addon_message_table = NULL; + err->first_addon_message = 0; /* for safety */ + err->last_addon_message = 0; + + return err; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctflt.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctflt.c new file mode 100644 index 000000000..74d0d862d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctflt.c @@ -0,0 +1,174 @@ +/* + * jfdctflt.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * Modified 2003-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a floating-point implementation of the + * forward DCT (Discrete Cosine Transform). + * + * This implementation should be more accurate than either of the integer + * DCT implementations. However, it may not give the same results on all + * machines because of differences in roundoff behavior. Speed will depend + * on the hardware's floating point capacity. + * + * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT + * on each column. Direct algorithms are also available, but they are + * much more complex and seem not to be any faster when reduced to code. + * + * This implementation is based on Arai, Agui, and Nakajima's algorithm for + * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in + * Japanese, but the algorithm is described in the Pennebaker & Mitchell + * JPEG textbook (see REFERENCES section in file README). The following code + * is based directly on figure 4-8 in P&M. + * While an 8-point DCT cannot be done in less than 11 multiplies, it is + * possible to arrange the computation so that many of the multiplies are + * simple scalings of the final outputs. These multiplies can then be + * folded into the multiplications or divisions by the JPEG quantization + * table entries. The AA&N method leaves only 5 multiplies and 29 adds + * to be done in the DCT itself. + * The primary disadvantage of this method is that with a fixed-point + * implementation, accuracy is lost due to imprecise representation of the + * scaled quantization values. However, that problem does not arise if + * we use floating point arithmetic. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_FLOAT_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ +#endif + + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL(void) +jpeg_fdct_float (FAST_FLOAT * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + FAST_FLOAT tmp10, tmp11, tmp12, tmp13; + FAST_FLOAT z1, z2, z3, z4, z5, z11, z13; + FAST_FLOAT *dataptr; + JSAMPROW elemptr; + int ctr; + + /* Pass 1: process rows. */ + + dataptr = data; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Load data into workspace */ + tmp0 = (FAST_FLOAT) (GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7])); + tmp7 = (FAST_FLOAT) (GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7])); + tmp1 = (FAST_FLOAT) (GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6])); + tmp6 = (FAST_FLOAT) (GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6])); + tmp2 = (FAST_FLOAT) (GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5])); + tmp5 = (FAST_FLOAT) (GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5])); + tmp3 = (FAST_FLOAT) (GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4])); + tmp4 = (FAST_FLOAT) (GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4])); + + /* Even part */ + + tmp10 = tmp0 + tmp3; /* phase 2 */ + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + /* Apply unsigned->signed conversion */ + dataptr[0] = tmp10 + tmp11 - 8 * CENTERJSAMPLE; /* phase 3 */ + dataptr[4] = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * ((FAST_FLOAT) 0.707106781); /* c4 */ + dataptr[2] = tmp13 + z1; /* phase 5 */ + dataptr[6] = tmp13 - z1; + + /* Odd part */ + + tmp10 = tmp4 + tmp5; /* phase 2 */ + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + /* The rotator is modified from fig 4-8 to avoid extra negations. */ + z5 = (tmp10 - tmp12) * ((FAST_FLOAT) 0.382683433); /* c6 */ + z2 = ((FAST_FLOAT) 0.541196100) * tmp10 + z5; /* c2-c6 */ + z4 = ((FAST_FLOAT) 1.306562965) * tmp12 + z5; /* c2+c6 */ + z3 = tmp11 * ((FAST_FLOAT) 0.707106781); /* c4 */ + + z11 = tmp7 + z3; /* phase 5 */ + z13 = tmp7 - z3; + + dataptr[5] = z13 + z2; /* phase 6 */ + dataptr[3] = z13 - z2; + dataptr[1] = z11 + z4; + dataptr[7] = z11 - z4; + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + /* Even part */ + + tmp10 = tmp0 + tmp3; /* phase 2 */ + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[DCTSIZE*0] = tmp10 + tmp11; /* phase 3 */ + dataptr[DCTSIZE*4] = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * ((FAST_FLOAT) 0.707106781); /* c4 */ + dataptr[DCTSIZE*2] = tmp13 + z1; /* phase 5 */ + dataptr[DCTSIZE*6] = tmp13 - z1; + + /* Odd part */ + + tmp10 = tmp4 + tmp5; /* phase 2 */ + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + /* The rotator is modified from fig 4-8 to avoid extra negations. */ + z5 = (tmp10 - tmp12) * ((FAST_FLOAT) 0.382683433); /* c6 */ + z2 = ((FAST_FLOAT) 0.541196100) * tmp10 + z5; /* c2-c6 */ + z4 = ((FAST_FLOAT) 1.306562965) * tmp12 + z5; /* c2+c6 */ + z3 = tmp11 * ((FAST_FLOAT) 0.707106781); /* c4 */ + + z11 = tmp7 + z3; /* phase 5 */ + z13 = tmp7 - z3; + + dataptr[DCTSIZE*5] = z13 + z2; /* phase 6 */ + dataptr[DCTSIZE*3] = z13 - z2; + dataptr[DCTSIZE*1] = z11 + z4; + dataptr[DCTSIZE*7] = z11 - z4; + + dataptr++; /* advance pointer to next column */ + } +} + +#endif /* DCT_FLOAT_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctfst.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctfst.c new file mode 100644 index 000000000..8cad5f229 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctfst.c @@ -0,0 +1,230 @@ +/* + * jfdctfst.c + * + * Copyright (C) 1994-1996, Thomas G. Lane. + * Modified 2003-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a fast, not so accurate integer implementation of the + * forward DCT (Discrete Cosine Transform). + * + * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT + * on each column. Direct algorithms are also available, but they are + * much more complex and seem not to be any faster when reduced to code. + * + * This implementation is based on Arai, Agui, and Nakajima's algorithm for + * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in + * Japanese, but the algorithm is described in the Pennebaker & Mitchell + * JPEG textbook (see REFERENCES section in file README). The following code + * is based directly on figure 4-8 in P&M. + * While an 8-point DCT cannot be done in less than 11 multiplies, it is + * possible to arrange the computation so that many of the multiplies are + * simple scalings of the final outputs. These multiplies can then be + * folded into the multiplications or divisions by the JPEG quantization + * table entries. The AA&N method leaves only 5 multiplies and 29 adds + * to be done in the DCT itself. + * The primary disadvantage of this method is that with fixed-point math, + * accuracy is lost due to imprecise representation of the scaled + * quantization values. The smaller the quantization table entry, the less + * precise the scaled value, so this implementation does worse with high- + * quality-setting files than with low-quality ones. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_IFAST_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ +#endif + + +/* Scaling decisions are generally the same as in the LL&M algorithm; + * see jfdctint.c for more details. However, we choose to descale + * (right shift) multiplication products as soon as they are formed, + * rather than carrying additional fractional bits into subsequent additions. + * This compromises accuracy slightly, but it lets us save a few shifts. + * More importantly, 16-bit arithmetic is then adequate (for 8-bit samples) + * everywhere except in the multiplications proper; this saves a good deal + * of work on 16-bit-int machines. + * + * Again to save a few shifts, the intermediate results between pass 1 and + * pass 2 are not upscaled, but are represented only to integral precision. + * + * A final compromise is to represent the multiplicative constants to only + * 8 fractional bits, rather than 13. This saves some shifting work on some + * machines, and may also reduce the cost of multiplication (since there + * are fewer one-bits in the constants). + */ + +#define CONST_BITS 8 + + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 8 +#define FIX_0_382683433 ((INT32) 98) /* FIX(0.382683433) */ +#define FIX_0_541196100 ((INT32) 139) /* FIX(0.541196100) */ +#define FIX_0_707106781 ((INT32) 181) /* FIX(0.707106781) */ +#define FIX_1_306562965 ((INT32) 334) /* FIX(1.306562965) */ +#else +#define FIX_0_382683433 FIX(0.382683433) +#define FIX_0_541196100 FIX(0.541196100) +#define FIX_0_707106781 FIX(0.707106781) +#define FIX_1_306562965 FIX(1.306562965) +#endif + + +/* We can gain a little more speed, with a further compromise in accuracy, + * by omitting the addition in a descaling shift. This yields an incorrectly + * rounded result half the time... + */ + +#ifndef USE_ACCURATE_ROUNDING +#undef DESCALE +#define DESCALE(x,n) RIGHT_SHIFT(x, n) +#endif + + +/* Multiply a DCTELEM variable by an INT32 constant, and immediately + * descale to yield a DCTELEM result. + */ + +#define MULTIPLY(var,const) ((DCTELEM) DESCALE((var) * (const), CONST_BITS)) + + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL(void) +jpeg_fdct_ifast (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + DCTELEM tmp10, tmp11, tmp12, tmp13; + DCTELEM z1, z2, z3, z4, z5, z11, z13; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + + dataptr = data; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Load data into workspace */ + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]); + tmp7 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]); + tmp6 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]); + tmp5 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]); + tmp4 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]); + + /* Even part */ + + tmp10 = tmp0 + tmp3; /* phase 2 */ + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + /* Apply unsigned->signed conversion */ + dataptr[0] = tmp10 + tmp11 - 8 * CENTERJSAMPLE; /* phase 3 */ + dataptr[4] = tmp10 - tmp11; + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ + dataptr[2] = tmp13 + z1; /* phase 5 */ + dataptr[6] = tmp13 - z1; + + /* Odd part */ + + tmp10 = tmp4 + tmp5; /* phase 2 */ + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + /* The rotator is modified from fig 4-8 to avoid extra negations. */ + z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ + z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ + z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ + z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ + + z11 = tmp7 + z3; /* phase 5 */ + z13 = tmp7 - z3; + + dataptr[5] = z13 + z2; /* phase 6 */ + dataptr[3] = z13 - z2; + dataptr[1] = z11 + z4; + dataptr[7] = z11 - z4; + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + /* Even part */ + + tmp10 = tmp0 + tmp3; /* phase 2 */ + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + dataptr[DCTSIZE*0] = tmp10 + tmp11; /* phase 3 */ + dataptr[DCTSIZE*4] = tmp10 - tmp11; + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ + dataptr[DCTSIZE*2] = tmp13 + z1; /* phase 5 */ + dataptr[DCTSIZE*6] = tmp13 - z1; + + /* Odd part */ + + tmp10 = tmp4 + tmp5; /* phase 2 */ + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + /* The rotator is modified from fig 4-8 to avoid extra negations. */ + z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ + z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ + z4 = MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ + z3 = MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ + + z11 = tmp7 + z3; /* phase 5 */ + z13 = tmp7 - z3; + + dataptr[DCTSIZE*5] = z13 + z2; /* phase 6 */ + dataptr[DCTSIZE*3] = z13 - z2; + dataptr[DCTSIZE*1] = z11 + z4; + dataptr[DCTSIZE*7] = z11 - z4; + + dataptr++; /* advance pointer to next column */ + } +} + +#endif /* DCT_IFAST_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctint.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctint.c new file mode 100644 index 000000000..1dde58c49 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jfdctint.c @@ -0,0 +1,4348 @@ +/* + * jfdctint.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modification developed 2003-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a slow-but-accurate integer implementation of the + * forward DCT (Discrete Cosine Transform). + * + * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT + * on each column. Direct algorithms are also available, but they are + * much more complex and seem not to be any faster when reduced to code. + * + * This implementation is based on an algorithm described in + * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT + * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, + * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. + * The primary algorithm described there uses 11 multiplies and 29 adds. + * We use their alternate method with 12 multiplies and 32 adds. + * The advantage of this method is that no data path contains more than one + * multiplication; this allows a very simple and accurate implementation in + * scaled fixed-point arithmetic, with a minimal number of shifts. + * + * We also provide FDCT routines with various input sample block sizes for + * direct resolution reduction or enlargement and for direct resolving the + * common 2x1 and 1x2 subsampling cases without additional resampling: NxN + * (N=1...16), 2NxN, and Nx2N (N=1...8) pixels for one 8x8 output DCT block. + * + * For N<8 we fill the remaining block coefficients with zero. + * For N>8 we apply a partial N-point FDCT on the input samples, computing + * just the lower 8 frequency coefficients and discarding the rest. + * + * We must scale the output coefficients of the N-point FDCT appropriately + * to the standard 8-point FDCT level by 8/N per 1-D pass. This scaling + * is folded into the constant multipliers (pass 2) and/or final/initial + * shifting. + * + * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases + * since there would be too many additional constants to pre-calculate. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_ISLOW_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */ +#endif + + +/* + * The poop on this scaling stuff is as follows: + * + * Each 1-D DCT step produces outputs which are a factor of sqrt(N) + * larger than the true DCT outputs. The final outputs are therefore + * a factor of N larger than desired; since N=8 this can be cured by + * a simple right shift at the end of the algorithm. The advantage of + * this arrangement is that we save two multiplications per 1-D DCT, + * because the y0 and y4 outputs need not be divided by sqrt(N). + * In the IJG code, this factor of 8 is removed by the quantization step + * (in jcdctmgr.c), NOT in this module. + * + * We have to do addition and subtraction of the integer inputs, which + * is no problem, and multiplication by fractional constants, which is + * a problem to do in integer arithmetic. We multiply all the constants + * by CONST_SCALE and convert them to integer constants (thus retaining + * CONST_BITS bits of precision in the constants). After doing a + * multiplication we have to divide the product by CONST_SCALE, with proper + * rounding, to produce the correct output. This division can be done + * cheaply as a right shift of CONST_BITS bits. We postpone shifting + * as long as possible so that partial sums can be added together with + * full fractional precision. + * + * The outputs of the first pass are scaled up by PASS1_BITS bits so that + * they are represented to better-than-integral precision. These outputs + * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word + * with the recommended scaling. (For 12-bit sample data, the intermediate + * array is INT32 anyway.) + * + * To avoid overflow of the 32-bit intermediate results in pass 2, we must + * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis + * shows that the values given below are the most effective. + */ + +#if BITS_IN_JSAMPLE == 8 +#define CONST_BITS 13 +#define PASS1_BITS 2 +#else +#define CONST_BITS 13 +#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ +#endif + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 13 +#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */ +#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */ +#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */ +#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */ +#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */ +#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */ +#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */ +#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */ +#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */ +#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */ +#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */ +#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */ +#else +#define FIX_0_298631336 FIX(0.298631336) +#define FIX_0_390180644 FIX(0.390180644) +#define FIX_0_541196100 FIX(0.541196100) +#define FIX_0_765366865 FIX(0.765366865) +#define FIX_0_899976223 FIX(0.899976223) +#define FIX_1_175875602 FIX(1.175875602) +#define FIX_1_501321110 FIX(1.501321110) +#define FIX_1_847759065 FIX(1.847759065) +#define FIX_1_961570560 FIX(1.961570560) +#define FIX_2_053119869 FIX(2.053119869) +#define FIX_2_562915447 FIX(2.562915447) +#define FIX_3_072711026 FIX(3.072711026) +#endif + + +/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result. + * For 8-bit samples with the recommended scaling, all the variable + * and constant values involved are no more than 16 bits wide, so a + * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. + * For 12-bit samples, a full 32-bit multiplication will be needed. + */ + +#if BITS_IN_JSAMPLE == 8 +#define MULTIPLY(var,const) MULTIPLY16C16(var,const) +#else +#define MULTIPLY(var,const) ((var) * (const)) +#endif + + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL(void) +jpeg_fdct_islow (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + dataptr = data; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]); + + tmp10 = tmp0 + tmp3; + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + dataptr[2] = (DCTELEM) RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), + CONST_BITS-PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + tmp10 + tmp12, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) + RIGHT_SHIFT(tmp1 + tmp11 + tmp13, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) + RIGHT_SHIFT(tmp2 + tmp11 + tmp12, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) + RIGHT_SHIFT(tmp3 + tmp10 + tmp13, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + + /* Add fudge factor here for final descale. */ + tmp10 = tmp0 + tmp3 + (ONE << (PASS1_BITS-1)); + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp10 + tmp11, PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) RIGHT_SHIFT(tmp10 - tmp11, PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS+PASS1_BITS-1); + dataptr[DCTSIZE*2] = (DCTELEM) + RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) + RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), CONST_BITS+PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS+PASS1_BITS-1); + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[DCTSIZE*1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + tmp10 + tmp12, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + RIGHT_SHIFT(tmp1 + tmp11 + tmp13, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) + RIGHT_SHIFT(tmp2 + tmp11 + tmp12, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*7] = (DCTELEM) + RIGHT_SHIFT(tmp3 + tmp10 + tmp13, CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + +#ifdef DCT_SCALING_SUPPORTED + + +/* + * Perform the forward DCT on a 7x7 sample block. + */ + +GLOBAL(void) +jpeg_fdct_7x7 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12; + INT32 z1, z2, z3; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* cK represents sqrt(2) * cos(K*pi/14). */ + + dataptr = data; + for (ctr = 0; ctr < 7; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[6]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[5]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[4]); + tmp3 = GETJSAMPLE(elemptr[3]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[6]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[5]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[4]); + + z1 = tmp0 + tmp2; + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((z1 + tmp1 + tmp3 - 7 * CENTERJSAMPLE) << PASS1_BITS); + tmp3 += tmp3; + z1 -= tmp3; + z1 -= tmp3; + z1 = MULTIPLY(z1, FIX(0.353553391)); /* (c2+c6-c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp2, FIX(0.920609002)); /* (c2+c4-c6)/2 */ + z3 = MULTIPLY(tmp1 - tmp2, FIX(0.314692123)); /* c6 */ + dataptr[2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS-PASS1_BITS); + z1 -= z2; + z2 = MULTIPLY(tmp0 - tmp1, FIX(0.881747734)); /* c4 */ + dataptr[4] = (DCTELEM) + DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.707106781)), /* c2+c6-c4 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.378756276)); /* -c1 */ + tmp1 += tmp2; + tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.613604268)); /* c5 */ + tmp0 += tmp3; + tmp2 += tmp3 + MULTIPLY(tmp12, FIX(1.870828693)); /* c3+c1-c5 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/7)**2 = 64/49, which we fold + * into the constant multipliers: + * cK now represents sqrt(2) * cos(K*pi/14) * 64/49. + */ + + dataptr = data; + for (ctr = 0; ctr < 7; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*6]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*5]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*4]; + tmp3 = dataptr[DCTSIZE*3]; + + tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*6]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*5]; + tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*4]; + + z1 = tmp0 + tmp2; + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(z1 + tmp1 + tmp3, FIX(1.306122449)), /* 64/49 */ + CONST_BITS+PASS1_BITS); + tmp3 += tmp3; + z1 -= tmp3; + z1 -= tmp3; + z1 = MULTIPLY(z1, FIX(0.461784020)); /* (c2+c6-c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp2, FIX(1.202428084)); /* (c2+c4-c6)/2 */ + z3 = MULTIPLY(tmp1 - tmp2, FIX(0.411026446)); /* c6 */ + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS+PASS1_BITS); + z1 -= z2; + z2 = MULTIPLY(tmp0 - tmp1, FIX(1.151670509)); /* c4 */ + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.923568041)), /* c2+c6-c4 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.221765677)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.222383464)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.800824523)); /* -c1 */ + tmp1 += tmp2; + tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.801442310)); /* c5 */ + tmp0 += tmp3; + tmp2 += tmp3 + MULTIPLY(tmp12, FIX(2.443531355)); /* c3+c1-c5 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 6x6 sample block. + */ + +GLOBAL(void) +jpeg_fdct_6x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2; + INT32 tmp10, tmp11, tmp12; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* cK represents sqrt(2) * cos(K*pi/12). */ + + dataptr = data; + for (ctr = 0; ctr < 6; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]); + tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]); + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */ + CONST_BITS-PASS1_BITS); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */ + CONST_BITS-PASS1_BITS); + + dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << PASS1_BITS)); + dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << PASS1_BITS); + dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << PASS1_BITS)); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/6)**2 = 16/9, which we fold + * into the constant multipliers: + * cK now represents sqrt(2) * cos(K*pi/12) * 16/9. + */ + + dataptr = data; + for (ctr = 0; ctr < 6; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5]; + tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 5x5 sample block. + */ + +GLOBAL(void) +jpeg_fdct_5x5 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2; + INT32 tmp10, tmp11; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* cK represents sqrt(2) * cos(K*pi/10). */ + + dataptr = data; + for (ctr = 0; ctr < 5; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[4]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[3]); + tmp2 = GETJSAMPLE(elemptr[2]); + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[4]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[3]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp2 - 5 * CENTERJSAMPLE) << (PASS1_BITS+1)); + tmp11 = MULTIPLY(tmp11, FIX(0.790569415)); /* (c2+c4)/2 */ + tmp10 -= tmp2 << 2; + tmp10 = MULTIPLY(tmp10, FIX(0.353553391)); /* (c2-c4)/2 */ + dataptr[2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS-PASS1_BITS-1); + dataptr[4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS-PASS1_BITS-1); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp1, FIX(0.831253876)); /* c3 */ + + dataptr[1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.513743148)), /* c1-c3 */ + CONST_BITS-PASS1_BITS-1); + dataptr[3] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.176250899)), /* c1+c3 */ + CONST_BITS-PASS1_BITS-1); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/5)**2 = 64/25, which we partially + * fold into the constant multipliers (other part was done in pass 1): + * cK now represents sqrt(2) * cos(K*pi/10) * 32/25. + */ + + dataptr = data; + for (ctr = 0; ctr < 5; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*4]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*2]; + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*4]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*3]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp2, FIX(1.28)), /* 32/25 */ + CONST_BITS+PASS1_BITS); + tmp11 = MULTIPLY(tmp11, FIX(1.011928851)); /* (c2+c4)/2 */ + tmp10 -= tmp2 << 2; + tmp10 = MULTIPLY(tmp10, FIX(0.452548340)); /* (c2-c4)/2 */ + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp1, FIX(1.064004961)); /* c3 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.657591230)), /* c1-c3 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.785601151)), /* c1+c3 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 4x4 sample block. + */ + +GLOBAL(void) +jpeg_fdct_4x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1; + INT32 tmp10, tmp11; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We must also scale the output by (8/4)**2 = 2**2, which we add here. */ + /* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT]. */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 - 4 * CENTERJSAMPLE) << (PASS1_BITS+2)); + dataptr[2] = (DCTELEM) ((tmp0 - tmp1) << (PASS1_BITS+2)); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-3); + + dataptr[1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS-PASS1_BITS-2); + dataptr[3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS-PASS1_BITS-2); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3] + (ONE << (PASS1_BITS-1)); + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2]; + + tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2]; + + dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp0 + tmp1, PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) RIGHT_SHIFT(tmp0 - tmp1, PASS1_BITS); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS+PASS1_BITS-1); + + dataptr[DCTSIZE*1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 3x3 sample block. + */ + +GLOBAL(void) +jpeg_fdct_3x3 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We scale the results further by 2**2 as part of output adaption */ + /* scaling for different DCT size. */ + /* cK represents sqrt(2) * cos(K*pi/6). */ + + dataptr = data; + for (ctr = 0; ctr < 3; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[2]); + tmp1 = GETJSAMPLE(elemptr[1]); + + tmp2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[2]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 - 3 * CENTERJSAMPLE) << (PASS1_BITS+2)); + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(0.707106781)), /* c2 */ + CONST_BITS-PASS1_BITS-2); + + /* Odd part */ + + dataptr[1] = (DCTELEM) + DESCALE(MULTIPLY(tmp2, FIX(1.224744871)), /* c1 */ + CONST_BITS-PASS1_BITS-2); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/3)**2 = 64/9, which we partially + * fold into the constant multipliers (other part was done in pass 1): + * cK now represents sqrt(2) * cos(K*pi/6) * 16/9. + */ + + dataptr = data; + for (ctr = 0; ctr < 3; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*2]; + tmp1 = dataptr[DCTSIZE*1]; + + tmp2 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*2]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(1.257078722)), /* c2 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(MULTIPLY(tmp2, FIX(2.177324216)), /* c1 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 2x2 sample block. + */ + +GLOBAL(void) +jpeg_fdct_2x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + JSAMPROW elemptr; + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + + /* Row 0 */ + elemptr = sample_data[0] + start_col; + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[1]); + tmp1 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[1]); + + /* Row 1 */ + elemptr = sample_data[1] + start_col; + + tmp2 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[1]); + tmp3 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[1]); + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/2)**2 = 2**4. + */ + + /* Column 0 */ + /* Apply unsigned->signed conversion */ + data[DCTSIZE*0] = (DCTELEM) ((tmp0 + tmp2 - 4 * CENTERJSAMPLE) << 4); + data[DCTSIZE*1] = (DCTELEM) ((tmp0 - tmp2) << 4); + + /* Column 1 */ + data[DCTSIZE*0+1] = (DCTELEM) ((tmp1 + tmp3) << 4); + data[DCTSIZE*1+1] = (DCTELEM) ((tmp1 - tmp3) << 4); +} + + +/* + * Perform the forward DCT on a 1x1 sample block. + */ + +GLOBAL(void) +jpeg_fdct_1x1 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* We leave the result scaled up by an overall factor of 8. */ + /* We must also scale the output by (8/1)**2 = 2**6. */ + /* Apply unsigned->signed conversion */ + data[0] = (DCTELEM) + ((GETJSAMPLE(sample_data[0][start_col]) - CENTERJSAMPLE) << 6); +} + + +/* + * Perform the forward DCT on a 9x9 sample block. + */ + +GLOBAL(void) +jpeg_fdct_9x9 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2; + DCTELEM workspace[8]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* we scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* cK represents sqrt(2) * cos(K*pi/18). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[8]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[7]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[6]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[5]); + tmp4 = GETJSAMPLE(elemptr[4]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[8]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[7]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[6]); + tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[5]); + + z1 = tmp0 + tmp2 + tmp3; + z2 = tmp1 + tmp4; + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) ((z1 + z2 - 9 * CENTERJSAMPLE) << 1); + dataptr[6] = (DCTELEM) + DESCALE(MULTIPLY(z1 - z2 - z2, FIX(0.707106781)), /* c6 */ + CONST_BITS-1); + z1 = MULTIPLY(tmp0 - tmp2, FIX(1.328926049)); /* c2 */ + z2 = MULTIPLY(tmp1 - tmp4 - tmp4, FIX(0.707106781)); /* c6 */ + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp2 - tmp3, FIX(1.083350441)) /* c4 */ + + z1 + z2, CONST_BITS-1); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp3 - tmp0, FIX(0.245575608)) /* c8 */ + + z1 - z2, CONST_BITS-1); + + /* Odd part */ + + dataptr[3] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12 - tmp13, FIX(1.224744871)), /* c3 */ + CONST_BITS-1); + + tmp11 = MULTIPLY(tmp11, FIX(1.224744871)); /* c3 */ + tmp0 = MULTIPLY(tmp10 + tmp12, FIX(0.909038955)); /* c5 */ + tmp1 = MULTIPLY(tmp10 + tmp13, FIX(0.483689525)); /* c7 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp11 + tmp0 + tmp1, CONST_BITS-1); + + tmp2 = MULTIPLY(tmp12 - tmp13, FIX(1.392728481)); /* c1 */ + + dataptr[5] = (DCTELEM) DESCALE(tmp0 - tmp11 - tmp2, CONST_BITS-1); + dataptr[7] = (DCTELEM) DESCALE(tmp1 - tmp11 + tmp2, CONST_BITS-1); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 9) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/9)**2 = 64/81, which we partially + * fold into the constant multipliers and final/initial shifting: + * cK now represents sqrt(2) * cos(K*pi/18) * 128/81. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*0]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*7]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*6]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*5]; + tmp4 = dataptr[DCTSIZE*4]; + + tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*0]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*7]; + tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*6]; + tmp13 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*5]; + + z1 = tmp0 + tmp2 + tmp3; + z2 = tmp1 + tmp4; + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(z1 + z2, FIX(1.580246914)), /* 128/81 */ + CONST_BITS+2); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(MULTIPLY(z1 - z2 - z2, FIX(1.117403309)), /* c6 */ + CONST_BITS+2); + z1 = MULTIPLY(tmp0 - tmp2, FIX(2.100031287)); /* c2 */ + z2 = MULTIPLY(tmp1 - tmp4 - tmp4, FIX(1.117403309)); /* c6 */ + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp2 - tmp3, FIX(1.711961190)) /* c4 */ + + z1 + z2, CONST_BITS+2); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp3 - tmp0, FIX(0.388070096)) /* c8 */ + + z1 - z2, CONST_BITS+2); + + /* Odd part */ + + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12 - tmp13, FIX(1.935399303)), /* c3 */ + CONST_BITS+2); + + tmp11 = MULTIPLY(tmp11, FIX(1.935399303)); /* c3 */ + tmp0 = MULTIPLY(tmp10 + tmp12, FIX(1.436506004)); /* c5 */ + tmp1 = MULTIPLY(tmp10 + tmp13, FIX(0.764348879)); /* c7 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp11 + tmp0 + tmp1, CONST_BITS+2); + + tmp2 = MULTIPLY(tmp12 - tmp13, FIX(2.200854883)); /* c1 */ + + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp0 - tmp11 - tmp2, CONST_BITS+2); + dataptr[DCTSIZE*7] = (DCTELEM) + DESCALE(tmp1 - tmp11 + tmp2, CONST_BITS+2); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 10x10 sample block. + */ + +GLOBAL(void) +jpeg_fdct_10x10 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + DCTELEM workspace[8*2]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* we scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* cK represents sqrt(2) * cos(K*pi/20). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[9]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[8]); + tmp12 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[7]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[6]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[5]); + + tmp10 = tmp0 + tmp4; + tmp13 = tmp0 - tmp4; + tmp11 = tmp1 + tmp3; + tmp14 = tmp1 - tmp3; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[9]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[8]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[7]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[6]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[5]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 - 10 * CENTERJSAMPLE) << 1); + tmp12 += tmp12; + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.144122806)) - /* c4 */ + MULTIPLY(tmp11 - tmp12, FIX(0.437016024)), /* c8 */ + CONST_BITS-1); + tmp10 = MULTIPLY(tmp13 + tmp14, FIX(0.831253876)); /* c6 */ + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.513743148)), /* c2-c6 */ + CONST_BITS-1); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.176250899)), /* c2+c6 */ + CONST_BITS-1); + + /* Odd part */ + + tmp10 = tmp0 + tmp4; + tmp11 = tmp1 - tmp3; + dataptr[5] = (DCTELEM) ((tmp10 - tmp11 - tmp2) << 1); + tmp2 <<= CONST_BITS; + dataptr[1] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.396802247)) + /* c1 */ + MULTIPLY(tmp1, FIX(1.260073511)) + tmp2 + /* c3 */ + MULTIPLY(tmp3, FIX(0.642039522)) + /* c7 */ + MULTIPLY(tmp4, FIX(0.221231742)), /* c9 */ + CONST_BITS-1); + tmp12 = MULTIPLY(tmp0 - tmp4, FIX(0.951056516)) - /* (c3+c7)/2 */ + MULTIPLY(tmp1 + tmp3, FIX(0.587785252)); /* (c1-c9)/2 */ + tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.309016994)) + /* (c3-c7)/2 */ + (tmp11 << (CONST_BITS - 1)) - tmp2; + dataptr[3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS-1); + dataptr[7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS-1); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 10) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/10)**2 = 16/25, which we partially + * fold into the constant multipliers and final/initial shifting: + * cK now represents sqrt(2) * cos(K*pi/20) * 32/25. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*0]; + tmp12 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*7]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*6]; + tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; + + tmp10 = tmp0 + tmp4; + tmp13 = tmp0 - tmp4; + tmp11 = tmp1 + tmp3; + tmp14 = tmp1 - tmp3; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*0]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*7]; + tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*6]; + tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(1.28)), /* 32/25 */ + CONST_BITS+2); + tmp12 += tmp12; + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.464477191)) - /* c4 */ + MULTIPLY(tmp11 - tmp12, FIX(0.559380511)), /* c8 */ + CONST_BITS+2); + tmp10 = MULTIPLY(tmp13 + tmp14, FIX(1.064004961)); /* c6 */ + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.657591230)), /* c2-c6 */ + CONST_BITS+2); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.785601151)), /* c2+c6 */ + CONST_BITS+2); + + /* Odd part */ + + tmp10 = tmp0 + tmp4; + tmp11 = tmp1 - tmp3; + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp2, FIX(1.28)), /* 32/25 */ + CONST_BITS+2); + tmp2 = MULTIPLY(tmp2, FIX(1.28)); /* 32/25 */ + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.787906876)) + /* c1 */ + MULTIPLY(tmp1, FIX(1.612894094)) + tmp2 + /* c3 */ + MULTIPLY(tmp3, FIX(0.821810588)) + /* c7 */ + MULTIPLY(tmp4, FIX(0.283176630)), /* c9 */ + CONST_BITS+2); + tmp12 = MULTIPLY(tmp0 - tmp4, FIX(1.217352341)) - /* (c3+c7)/2 */ + MULTIPLY(tmp1 + tmp3, FIX(0.752365123)); /* (c1-c9)/2 */ + tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.395541753)) + /* (c3-c7)/2 */ + MULTIPLY(tmp11, FIX(0.64)) - tmp2; /* 16/25 */ + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS+2); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS+2); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on an 11x11 sample block. + */ + +GLOBAL(void) +jpeg_fdct_11x11 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 z1, z2, z3; + DCTELEM workspace[8*3]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* we scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* cK represents sqrt(2) * cos(K*pi/22). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[10]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[9]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[8]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[7]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[6]); + tmp5 = GETJSAMPLE(elemptr[5]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[10]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[9]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[8]); + tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[7]); + tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[6]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 - 11 * CENTERJSAMPLE) << 1); + tmp5 += tmp5; + tmp0 -= tmp5; + tmp1 -= tmp5; + tmp2 -= tmp5; + tmp3 -= tmp5; + tmp4 -= tmp5; + z1 = MULTIPLY(tmp0 + tmp3, FIX(1.356927976)) + /* c2 */ + MULTIPLY(tmp2 + tmp4, FIX(0.201263574)); /* c10 */ + z2 = MULTIPLY(tmp1 - tmp3, FIX(0.926112931)); /* c6 */ + z3 = MULTIPLY(tmp0 - tmp1, FIX(1.189712156)); /* c4 */ + dataptr[2] = (DCTELEM) + DESCALE(z1 + z2 - MULTIPLY(tmp3, FIX(1.018300590)) /* c2+c8-c6 */ + - MULTIPLY(tmp4, FIX(1.390975730)), /* c4+c10 */ + CONST_BITS-1); + dataptr[4] = (DCTELEM) + DESCALE(z2 + z3 + MULTIPLY(tmp1, FIX(0.062335650)) /* c4-c6-c10 */ + - MULTIPLY(tmp2, FIX(1.356927976)) /* c2 */ + + MULTIPLY(tmp4, FIX(0.587485545)), /* c8 */ + CONST_BITS-1); + dataptr[6] = (DCTELEM) + DESCALE(z1 + z3 - MULTIPLY(tmp0, FIX(1.620527200)) /* c2+c4-c6 */ + - MULTIPLY(tmp2, FIX(0.788749120)), /* c8+c10 */ + CONST_BITS-1); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.286413905)); /* c3 */ + tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.068791298)); /* c5 */ + tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.764581576)); /* c7 */ + tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(tmp10, FIX(1.719967871)) /* c7+c5+c3-c1 */ + + MULTIPLY(tmp14, FIX(0.398430003)); /* c9 */ + tmp4 = MULTIPLY(tmp11 + tmp12, - FIX(0.764581576)); /* -c7 */ + tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.399818907)); /* -c1 */ + tmp1 += tmp4 + tmp5 + MULTIPLY(tmp11, FIX(1.276416582)) /* c9+c7+c1-c3 */ + - MULTIPLY(tmp14, FIX(1.068791298)); /* c5 */ + tmp10 = MULTIPLY(tmp12 + tmp13, FIX(0.398430003)); /* c9 */ + tmp2 += tmp4 + tmp10 - MULTIPLY(tmp12, FIX(1.989053629)) /* c9+c5+c3-c7 */ + + MULTIPLY(tmp14, FIX(1.399818907)); /* c1 */ + tmp3 += tmp5 + tmp10 + MULTIPLY(tmp13, FIX(1.305598626)) /* c1+c5-c9-c7 */ + - MULTIPLY(tmp14, FIX(1.286413905)); /* c3 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-1); + dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-1); + dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-1); + dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS-1); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 11) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/11)**2 = 64/121, which we partially + * fold into the constant multipliers and final/initial shifting: + * cK now represents sqrt(2) * cos(K*pi/22) * 128/121. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*2]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*1]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*0]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*7]; + tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*6]; + tmp5 = dataptr[DCTSIZE*5]; + + tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*2]; + tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*1]; + tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*0]; + tmp13 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*7]; + tmp14 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*6]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5, + FIX(1.057851240)), /* 128/121 */ + CONST_BITS+2); + tmp5 += tmp5; + tmp0 -= tmp5; + tmp1 -= tmp5; + tmp2 -= tmp5; + tmp3 -= tmp5; + tmp4 -= tmp5; + z1 = MULTIPLY(tmp0 + tmp3, FIX(1.435427942)) + /* c2 */ + MULTIPLY(tmp2 + tmp4, FIX(0.212906922)); /* c10 */ + z2 = MULTIPLY(tmp1 - tmp3, FIX(0.979689713)); /* c6 */ + z3 = MULTIPLY(tmp0 - tmp1, FIX(1.258538479)); /* c4 */ + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(z1 + z2 - MULTIPLY(tmp3, FIX(1.077210542)) /* c2+c8-c6 */ + - MULTIPLY(tmp4, FIX(1.471445400)), /* c4+c10 */ + CONST_BITS+2); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(z2 + z3 + MULTIPLY(tmp1, FIX(0.065941844)) /* c4-c6-c10 */ + - MULTIPLY(tmp2, FIX(1.435427942)) /* c2 */ + + MULTIPLY(tmp4, FIX(0.621472312)), /* c8 */ + CONST_BITS+2); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(z1 + z3 - MULTIPLY(tmp0, FIX(1.714276708)) /* c2+c4-c6 */ + - MULTIPLY(tmp2, FIX(0.834379234)), /* c8+c10 */ + CONST_BITS+2); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.360834544)); /* c3 */ + tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.130622199)); /* c5 */ + tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.808813568)); /* c7 */ + tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(tmp10, FIX(1.819470145)) /* c7+c5+c3-c1 */ + + MULTIPLY(tmp14, FIX(0.421479672)); /* c9 */ + tmp4 = MULTIPLY(tmp11 + tmp12, - FIX(0.808813568)); /* -c7 */ + tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.480800167)); /* -c1 */ + tmp1 += tmp4 + tmp5 + MULTIPLY(tmp11, FIX(1.350258864)) /* c9+c7+c1-c3 */ + - MULTIPLY(tmp14, FIX(1.130622199)); /* c5 */ + tmp10 = MULTIPLY(tmp12 + tmp13, FIX(0.421479672)); /* c9 */ + tmp2 += tmp4 + tmp10 - MULTIPLY(tmp12, FIX(2.104122847)) /* c9+c5+c3-c7 */ + + MULTIPLY(tmp14, FIX(1.480800167)); /* c1 */ + tmp3 += tmp5 + tmp10 + MULTIPLY(tmp13, FIX(1.381129125)) /* c1+c5-c9-c7 */ + - MULTIPLY(tmp14, FIX(1.360834544)); /* c3 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+2); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+2); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+2); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+2); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 12x12 sample block. + */ + +GLOBAL(void) +jpeg_fdct_12x12 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + DCTELEM workspace[8*4]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + /* cK represents sqrt(2) * cos(K*pi/24). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[11]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[10]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[9]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[8]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[7]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[6]); + + tmp10 = tmp0 + tmp5; + tmp13 = tmp0 - tmp5; + tmp11 = tmp1 + tmp4; + tmp14 = tmp1 - tmp4; + tmp12 = tmp2 + tmp3; + tmp15 = tmp2 - tmp3; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[11]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[10]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[9]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[8]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[7]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[6]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) (tmp10 + tmp11 + tmp12 - 12 * CENTERJSAMPLE); + dataptr[6] = (DCTELEM) (tmp13 - tmp14 - tmp15); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.224744871)), /* c4 */ + CONST_BITS); + dataptr[2] = (DCTELEM) + DESCALE(tmp14 - tmp15 + MULTIPLY(tmp13 + tmp15, FIX(1.366025404)), /* c2 */ + CONST_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp1 + tmp4, FIX_0_541196100); /* c9 */ + tmp14 = tmp10 + MULTIPLY(tmp1, FIX_0_765366865); /* c3-c9 */ + tmp15 = tmp10 - MULTIPLY(tmp4, FIX_1_847759065); /* c3+c9 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.121971054)); /* c5 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.860918669)); /* c7 */ + tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.580774953)) /* c5+c7-c1 */ + + MULTIPLY(tmp5, FIX(0.184591911)); /* c11 */ + tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.184591911)); /* -c11 */ + tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.339493912)) /* c1+c5-c11 */ + + MULTIPLY(tmp5, FIX(0.860918669)); /* c7 */ + tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.725788011)) /* c1+c11-c7 */ + - MULTIPLY(tmp5, FIX(1.121971054)); /* c5 */ + tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.306562965)) /* c3 */ + - MULTIPLY(tmp2 + tmp5, FIX_0_541196100); /* c9 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 12) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/12)**2 = 4/9, which we partially + * fold into the constant multipliers and final shifting: + * cK now represents sqrt(2) * cos(K*pi/24) * 8/9. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*3]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*2]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*1]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*0]; + tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*7]; + tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*6]; + + tmp10 = tmp0 + tmp5; + tmp13 = tmp0 - tmp5; + tmp11 = tmp1 + tmp4; + tmp14 = tmp1 - tmp4; + tmp12 = tmp2 + tmp3; + tmp15 = tmp2 - tmp3; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*3]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*2]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*1]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*0]; + tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*7]; + tmp5 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*6]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(0.888888889)), /* 8/9 */ + CONST_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(MULTIPLY(tmp13 - tmp14 - tmp15, FIX(0.888888889)), /* 8/9 */ + CONST_BITS+1); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.088662108)), /* c4 */ + CONST_BITS+1); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp14 - tmp15, FIX(0.888888889)) + /* 8/9 */ + MULTIPLY(tmp13 + tmp15, FIX(1.214244803)), /* c2 */ + CONST_BITS+1); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp1 + tmp4, FIX(0.481063200)); /* c9 */ + tmp14 = tmp10 + MULTIPLY(tmp1, FIX(0.680326102)); /* c3-c9 */ + tmp15 = tmp10 - MULTIPLY(tmp4, FIX(1.642452502)); /* c3+c9 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(0.997307603)); /* c5 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.765261039)); /* c7 */ + tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.516244403)) /* c5+c7-c1 */ + + MULTIPLY(tmp5, FIX(0.164081699)); /* c11 */ + tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.164081699)); /* -c11 */ + tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.079550144)) /* c1+c5-c11 */ + + MULTIPLY(tmp5, FIX(0.765261039)); /* c7 */ + tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.645144899)) /* c1+c11-c7 */ + - MULTIPLY(tmp5, FIX(0.997307603)); /* c5 */ + tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.161389302)) /* c3 */ + - MULTIPLY(tmp2 + tmp5, FIX(0.481063200)); /* c9 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+1); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+1); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 13x13 sample block. + */ + +GLOBAL(void) +jpeg_fdct_13x13 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + INT32 z1, z2; + DCTELEM workspace[8*5]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + /* cK represents sqrt(2) * cos(K*pi/26). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[12]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[11]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[10]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[9]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[8]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[7]); + tmp6 = GETJSAMPLE(elemptr[6]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[12]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[11]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[10]); + tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[9]); + tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[8]); + tmp15 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[7]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + (tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6 - 13 * CENTERJSAMPLE); + tmp6 += tmp6; + tmp0 -= tmp6; + tmp1 -= tmp6; + tmp2 -= tmp6; + tmp3 -= tmp6; + tmp4 -= tmp6; + tmp5 -= tmp6; + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.373119086)) + /* c2 */ + MULTIPLY(tmp1, FIX(1.058554052)) + /* c6 */ + MULTIPLY(tmp2, FIX(0.501487041)) - /* c10 */ + MULTIPLY(tmp3, FIX(0.170464608)) - /* c12 */ + MULTIPLY(tmp4, FIX(0.803364869)) - /* c8 */ + MULTIPLY(tmp5, FIX(1.252223920)), /* c4 */ + CONST_BITS); + z1 = MULTIPLY(tmp0 - tmp2, FIX(1.155388986)) - /* (c4+c6)/2 */ + MULTIPLY(tmp3 - tmp4, FIX(0.435816023)) - /* (c2-c10)/2 */ + MULTIPLY(tmp1 - tmp5, FIX(0.316450131)); /* (c8-c12)/2 */ + z2 = MULTIPLY(tmp0 + tmp2, FIX(0.096834934)) - /* (c4-c6)/2 */ + MULTIPLY(tmp3 + tmp4, FIX(0.937303064)) + /* (c2+c10)/2 */ + MULTIPLY(tmp1 + tmp5, FIX(0.486914739)); /* (c8+c12)/2 */ + + dataptr[4] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 - z2, CONST_BITS); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.322312651)); /* c3 */ + tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.163874945)); /* c5 */ + tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.937797057)) + /* c7 */ + MULTIPLY(tmp14 + tmp15, FIX(0.338443458)); /* c11 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(tmp10, FIX(2.020082300)) + /* c3+c5+c7-c1 */ + MULTIPLY(tmp14, FIX(0.318774355)); /* c9-c11 */ + tmp4 = MULTIPLY(tmp14 - tmp15, FIX(0.937797057)) - /* c7 */ + MULTIPLY(tmp11 + tmp12, FIX(0.338443458)); /* c11 */ + tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.163874945)); /* -c5 */ + tmp1 += tmp4 + tmp5 + + MULTIPLY(tmp11, FIX(0.837223564)) - /* c5+c9+c11-c3 */ + MULTIPLY(tmp14, FIX(2.341699410)); /* c1+c7 */ + tmp6 = MULTIPLY(tmp12 + tmp13, - FIX(0.657217813)); /* -c9 */ + tmp2 += tmp4 + tmp6 - + MULTIPLY(tmp12, FIX(1.572116027)) + /* c1+c5-c9-c11 */ + MULTIPLY(tmp15, FIX(2.260109708)); /* c3+c7 */ + tmp3 += tmp5 + tmp6 + + MULTIPLY(tmp13, FIX(2.205608352)) - /* c3+c5+c9-c7 */ + MULTIPLY(tmp15, FIX(1.742345811)); /* c1+c11 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 13) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/13)**2 = 64/169, which we partially + * fold into the constant multipliers and final shifting: + * cK now represents sqrt(2) * cos(K*pi/26) * 128/169. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*4]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*2]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*1]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*0]; + tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*7]; + tmp6 = dataptr[DCTSIZE*6]; + + tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*4]; + tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*3]; + tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*2]; + tmp13 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*1]; + tmp14 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*0]; + tmp15 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*7]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6, + FIX(0.757396450)), /* 128/169 */ + CONST_BITS+1); + tmp6 += tmp6; + tmp0 -= tmp6; + tmp1 -= tmp6; + tmp2 -= tmp6; + tmp3 -= tmp6; + tmp4 -= tmp6; + tmp5 -= tmp6; + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.039995521)) + /* c2 */ + MULTIPLY(tmp1, FIX(0.801745081)) + /* c6 */ + MULTIPLY(tmp2, FIX(0.379824504)) - /* c10 */ + MULTIPLY(tmp3, FIX(0.129109289)) - /* c12 */ + MULTIPLY(tmp4, FIX(0.608465700)) - /* c8 */ + MULTIPLY(tmp5, FIX(0.948429952)), /* c4 */ + CONST_BITS+1); + z1 = MULTIPLY(tmp0 - tmp2, FIX(0.875087516)) - /* (c4+c6)/2 */ + MULTIPLY(tmp3 - tmp4, FIX(0.330085509)) - /* (c2-c10)/2 */ + MULTIPLY(tmp1 - tmp5, FIX(0.239678205)); /* (c8-c12)/2 */ + z2 = MULTIPLY(tmp0 + tmp2, FIX(0.073342435)) - /* (c4-c6)/2 */ + MULTIPLY(tmp3 + tmp4, FIX(0.709910013)) + /* (c2+c10)/2 */ + MULTIPLY(tmp1 + tmp5, FIX(0.368787494)); /* (c8+c12)/2 */ + + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 - z2, CONST_BITS+1); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.001514908)); /* c3 */ + tmp2 = MULTIPLY(tmp10 + tmp12, FIX(0.881514751)); /* c5 */ + tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.710284161)) + /* c7 */ + MULTIPLY(tmp14 + tmp15, FIX(0.256335874)); /* c11 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(tmp10, FIX(1.530003162)) + /* c3+c5+c7-c1 */ + MULTIPLY(tmp14, FIX(0.241438564)); /* c9-c11 */ + tmp4 = MULTIPLY(tmp14 - tmp15, FIX(0.710284161)) - /* c7 */ + MULTIPLY(tmp11 + tmp12, FIX(0.256335874)); /* c11 */ + tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(0.881514751)); /* -c5 */ + tmp1 += tmp4 + tmp5 + + MULTIPLY(tmp11, FIX(0.634110155)) - /* c5+c9+c11-c3 */ + MULTIPLY(tmp14, FIX(1.773594819)); /* c1+c7 */ + tmp6 = MULTIPLY(tmp12 + tmp13, - FIX(0.497774438)); /* -c9 */ + tmp2 += tmp4 + tmp6 - + MULTIPLY(tmp12, FIX(1.190715098)) + /* c1+c5-c9-c11 */ + MULTIPLY(tmp15, FIX(1.711799069)); /* c3+c7 */ + tmp3 += tmp5 + tmp6 + + MULTIPLY(tmp13, FIX(1.670519935)) - /* c3+c5+c9-c7 */ + MULTIPLY(tmp15, FIX(1.319646532)); /* c1+c11 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+1); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+1); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 14x14 sample block. + */ + +GLOBAL(void) +jpeg_fdct_14x14 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + DCTELEM workspace[8*6]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + /* cK represents sqrt(2) * cos(K*pi/28). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[13]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[12]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[11]); + tmp13 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[10]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[9]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[8]); + tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[7]); + + tmp10 = tmp0 + tmp6; + tmp14 = tmp0 - tmp6; + tmp11 = tmp1 + tmp5; + tmp15 = tmp1 - tmp5; + tmp12 = tmp2 + tmp4; + tmp16 = tmp2 - tmp4; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[13]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[12]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[11]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[10]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[9]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[8]); + tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[7]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + (tmp10 + tmp11 + tmp12 + tmp13 - 14 * CENTERJSAMPLE); + tmp13 += tmp13; + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.274162392)) + /* c4 */ + MULTIPLY(tmp11 - tmp13, FIX(0.314692123)) - /* c12 */ + MULTIPLY(tmp12 - tmp13, FIX(0.881747734)), /* c8 */ + CONST_BITS); + + tmp10 = MULTIPLY(tmp14 + tmp15, FIX(1.105676686)); /* c6 */ + + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.273079590)) /* c2-c6 */ + + MULTIPLY(tmp16, FIX(0.613604268)), /* c10 */ + CONST_BITS); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.719280954)) /* c6+c10 */ + - MULTIPLY(tmp16, FIX(1.378756276)), /* c2 */ + CONST_BITS); + + /* Odd part */ + + tmp10 = tmp1 + tmp2; + tmp11 = tmp5 - tmp4; + dataptr[7] = (DCTELEM) (tmp0 - tmp10 + tmp3 - tmp11 - tmp6); + tmp3 <<= CONST_BITS; + tmp10 = MULTIPLY(tmp10, - FIX(0.158341681)); /* -c13 */ + tmp11 = MULTIPLY(tmp11, FIX(1.405321284)); /* c1 */ + tmp10 += tmp11 - tmp3; + tmp11 = MULTIPLY(tmp0 + tmp2, FIX(1.197448846)) + /* c5 */ + MULTIPLY(tmp4 + tmp6, FIX(0.752406978)); /* c9 */ + dataptr[5] = (DCTELEM) + DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(2.373959773)) /* c3+c5-c13 */ + + MULTIPLY(tmp4, FIX(1.119999435)), /* c1+c11-c9 */ + CONST_BITS); + tmp12 = MULTIPLY(tmp0 + tmp1, FIX(1.334852607)) + /* c3 */ + MULTIPLY(tmp5 - tmp6, FIX(0.467085129)); /* c11 */ + dataptr[3] = (DCTELEM) + DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.424103948)) /* c3-c9-c13 */ + - MULTIPLY(tmp5, FIX(3.069855259)), /* c1+c5+c11 */ + CONST_BITS); + dataptr[1] = (DCTELEM) + DESCALE(tmp11 + tmp12 + tmp3 + tmp6 - + MULTIPLY(tmp0 + tmp6, FIX(1.126980169)), /* c3+c5-c1 */ + CONST_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 14) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/14)**2 = 16/49, which we partially + * fold into the constant multipliers and final shifting: + * cK now represents sqrt(2) * cos(K*pi/28) * 32/49. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*3]; + tmp13 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*2]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*0]; + tmp6 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; + + tmp10 = tmp0 + tmp6; + tmp14 = tmp0 - tmp6; + tmp11 = tmp1 + tmp5; + tmp15 = tmp1 - tmp5; + tmp12 = tmp2 + tmp4; + tmp16 = tmp2 - tmp4; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*3]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*2]; + tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*0]; + tmp6 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12 + tmp13, + FIX(0.653061224)), /* 32/49 */ + CONST_BITS+1); + tmp13 += tmp13; + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(0.832106052)) + /* c4 */ + MULTIPLY(tmp11 - tmp13, FIX(0.205513223)) - /* c12 */ + MULTIPLY(tmp12 - tmp13, FIX(0.575835255)), /* c8 */ + CONST_BITS+1); + + tmp10 = MULTIPLY(tmp14 + tmp15, FIX(0.722074570)); /* c6 */ + + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.178337691)) /* c2-c6 */ + + MULTIPLY(tmp16, FIX(0.400721155)), /* c10 */ + CONST_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.122795725)) /* c6+c10 */ + - MULTIPLY(tmp16, FIX(0.900412262)), /* c2 */ + CONST_BITS+1); + + /* Odd part */ + + tmp10 = tmp1 + tmp2; + tmp11 = tmp5 - tmp4; + dataptr[DCTSIZE*7] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp10 + tmp3 - tmp11 - tmp6, + FIX(0.653061224)), /* 32/49 */ + CONST_BITS+1); + tmp3 = MULTIPLY(tmp3 , FIX(0.653061224)); /* 32/49 */ + tmp10 = MULTIPLY(tmp10, - FIX(0.103406812)); /* -c13 */ + tmp11 = MULTIPLY(tmp11, FIX(0.917760839)); /* c1 */ + tmp10 += tmp11 - tmp3; + tmp11 = MULTIPLY(tmp0 + tmp2, FIX(0.782007410)) + /* c5 */ + MULTIPLY(tmp4 + tmp6, FIX(0.491367823)); /* c9 */ + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(1.550341076)) /* c3+c5-c13 */ + + MULTIPLY(tmp4, FIX(0.731428202)), /* c1+c11-c9 */ + CONST_BITS+1); + tmp12 = MULTIPLY(tmp0 + tmp1, FIX(0.871740478)) + /* c3 */ + MULTIPLY(tmp5 - tmp6, FIX(0.305035186)); /* c11 */ + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.276965844)) /* c3-c9-c13 */ + - MULTIPLY(tmp5, FIX(2.004803435)), /* c1+c5+c11 */ + CONST_BITS+1); + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp11 + tmp12 + tmp3 + - MULTIPLY(tmp0, FIX(0.735987049)) /* c3+c5-c1 */ + - MULTIPLY(tmp6, FIX(0.082925825)), /* c9-c11-c13 */ + CONST_BITS+1); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 15x15 sample block. + */ + +GLOBAL(void) +jpeg_fdct_15x15 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 z1, z2, z3; + DCTELEM workspace[8*7]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + /* cK represents sqrt(2) * cos(K*pi/30). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[14]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[13]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[12]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[11]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[10]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[9]); + tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[8]); + tmp7 = GETJSAMPLE(elemptr[7]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[14]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[13]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[12]); + tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[11]); + tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[10]); + tmp15 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[9]); + tmp16 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[8]); + + z1 = tmp0 + tmp4 + tmp5; + z2 = tmp1 + tmp3 + tmp6; + z3 = tmp2 + tmp7; + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) (z1 + z2 + z3 - 15 * CENTERJSAMPLE); + z3 += z3; + dataptr[6] = (DCTELEM) + DESCALE(MULTIPLY(z1 - z3, FIX(1.144122806)) - /* c6 */ + MULTIPLY(z2 - z3, FIX(0.437016024)), /* c12 */ + CONST_BITS); + tmp2 += ((tmp1 + tmp4) >> 1) - tmp7 - tmp7; + z1 = MULTIPLY(tmp3 - tmp2, FIX(1.531135173)) - /* c2+c14 */ + MULTIPLY(tmp6 - tmp2, FIX(2.238241955)); /* c4+c8 */ + z2 = MULTIPLY(tmp5 - tmp2, FIX(0.798468008)) - /* c8-c14 */ + MULTIPLY(tmp0 - tmp2, FIX(0.091361227)); /* c2-c4 */ + z3 = MULTIPLY(tmp0 - tmp3, FIX(1.383309603)) + /* c2 */ + MULTIPLY(tmp6 - tmp5, FIX(0.946293579)) + /* c8 */ + MULTIPLY(tmp1 - tmp4, FIX(0.790569415)); /* (c6+c12)/2 */ + + dataptr[2] = (DCTELEM) DESCALE(z1 + z3, CONST_BITS); + dataptr[4] = (DCTELEM) DESCALE(z2 + z3, CONST_BITS); + + /* Odd part */ + + tmp2 = MULTIPLY(tmp10 - tmp12 - tmp13 + tmp15 + tmp16, + FIX(1.224744871)); /* c5 */ + tmp1 = MULTIPLY(tmp10 - tmp14 - tmp15, FIX(1.344997024)) + /* c3 */ + MULTIPLY(tmp11 - tmp13 - tmp16, FIX(0.831253876)); /* c9 */ + tmp12 = MULTIPLY(tmp12, FIX(1.224744871)); /* c5 */ + tmp4 = MULTIPLY(tmp10 - tmp16, FIX(1.406466353)) + /* c1 */ + MULTIPLY(tmp11 + tmp14, FIX(1.344997024)) + /* c3 */ + MULTIPLY(tmp13 + tmp15, FIX(0.575212477)); /* c11 */ + tmp0 = MULTIPLY(tmp13, FIX(0.475753014)) - /* c7-c11 */ + MULTIPLY(tmp14, FIX(0.513743148)) + /* c3-c9 */ + MULTIPLY(tmp16, FIX(1.700497885)) + tmp4 + tmp12; /* c1+c13 */ + tmp3 = MULTIPLY(tmp10, - FIX(0.355500862)) - /* -(c1-c7) */ + MULTIPLY(tmp11, FIX(2.176250899)) - /* c3+c9 */ + MULTIPLY(tmp15, FIX(0.869244010)) + tmp4 - tmp12; /* c11+c13 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 15) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/15)**2 = 64/225, which we partially + * fold into the constant multipliers and final shifting: + * cK now represents sqrt(2) * cos(K*pi/30) * 256/225. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*6]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*5]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*4]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*3]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*2]; + tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*1]; + tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*0]; + tmp7 = dataptr[DCTSIZE*7]; + + tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*6]; + tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*5]; + tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*4]; + tmp13 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*3]; + tmp14 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*2]; + tmp15 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*1]; + tmp16 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*0]; + + z1 = tmp0 + tmp4 + tmp5; + z2 = tmp1 + tmp3 + tmp6; + z3 = tmp2 + tmp7; + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(z1 + z2 + z3, FIX(1.137777778)), /* 256/225 */ + CONST_BITS+2); + z3 += z3; + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(MULTIPLY(z1 - z3, FIX(1.301757503)) - /* c6 */ + MULTIPLY(z2 - z3, FIX(0.497227121)), /* c12 */ + CONST_BITS+2); + tmp2 += ((tmp1 + tmp4) >> 1) - tmp7 - tmp7; + z1 = MULTIPLY(tmp3 - tmp2, FIX(1.742091575)) - /* c2+c14 */ + MULTIPLY(tmp6 - tmp2, FIX(2.546621957)); /* c4+c8 */ + z2 = MULTIPLY(tmp5 - tmp2, FIX(0.908479156)) - /* c8-c14 */ + MULTIPLY(tmp0 - tmp2, FIX(0.103948774)); /* c2-c4 */ + z3 = MULTIPLY(tmp0 - tmp3, FIX(1.573898926)) + /* c2 */ + MULTIPLY(tmp6 - tmp5, FIX(1.076671805)) + /* c8 */ + MULTIPLY(tmp1 - tmp4, FIX(0.899492312)); /* (c6+c12)/2 */ + + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z3, CONST_BITS+2); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(z2 + z3, CONST_BITS+2); + + /* Odd part */ + + tmp2 = MULTIPLY(tmp10 - tmp12 - tmp13 + tmp15 + tmp16, + FIX(1.393487498)); /* c5 */ + tmp1 = MULTIPLY(tmp10 - tmp14 - tmp15, FIX(1.530307725)) + /* c3 */ + MULTIPLY(tmp11 - tmp13 - tmp16, FIX(0.945782187)); /* c9 */ + tmp12 = MULTIPLY(tmp12, FIX(1.393487498)); /* c5 */ + tmp4 = MULTIPLY(tmp10 - tmp16, FIX(1.600246161)) + /* c1 */ + MULTIPLY(tmp11 + tmp14, FIX(1.530307725)) + /* c3 */ + MULTIPLY(tmp13 + tmp15, FIX(0.654463974)); /* c11 */ + tmp0 = MULTIPLY(tmp13, FIX(0.541301207)) - /* c7-c11 */ + MULTIPLY(tmp14, FIX(0.584525538)) + /* c3-c9 */ + MULTIPLY(tmp16, FIX(1.934788705)) + tmp4 + tmp12; /* c1+c13 */ + tmp3 = MULTIPLY(tmp10, - FIX(0.404480980)) - /* -(c1-c7) */ + MULTIPLY(tmp11, FIX(2.476089912)) - /* c3+c9 */ + MULTIPLY(tmp15, FIX(0.989006518)) + tmp4 - tmp12; /* c11+c13 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+2); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+2); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+2); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+2); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 16x16 sample block. + */ + +GLOBAL(void) +jpeg_fdct_16x16 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17; + DCTELEM workspace[DCTSIZE2]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* cK represents sqrt(2) * cos(K*pi/32). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[15]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[14]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[13]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[12]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[11]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[10]); + tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[9]); + tmp7 = GETJSAMPLE(elemptr[7]) + GETJSAMPLE(elemptr[8]); + + tmp10 = tmp0 + tmp7; + tmp14 = tmp0 - tmp7; + tmp11 = tmp1 + tmp6; + tmp15 = tmp1 - tmp6; + tmp12 = tmp2 + tmp5; + tmp16 = tmp2 - tmp5; + tmp13 = tmp3 + tmp4; + tmp17 = tmp3 - tmp4; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[15]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[14]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[13]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[12]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[11]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[10]); + tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[9]); + tmp7 = GETJSAMPLE(elemptr[7]) - GETJSAMPLE(elemptr[8]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 + tmp13 - 16 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */ + MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */ + CONST_BITS-PASS1_BITS); + + tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */ + MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */ + + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */ + + MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */ + - MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */ + MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */ + MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */ + MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */ + tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */ + MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */ + tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */ + MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */ + tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */ + MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */ + MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */ + tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */ + - MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */ + tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */ + + MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */ + tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */ + + MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == DCTSIZE * 2) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/16)**2 = 1/2**2. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*3]; + tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*2]; + tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*1]; + tmp7 = dataptr[DCTSIZE*7] + wsptr[DCTSIZE*0]; + + tmp10 = tmp0 + tmp7; + tmp14 = tmp0 - tmp7; + tmp11 = tmp1 + tmp6; + tmp15 = tmp1 - tmp6; + tmp12 = tmp2 + tmp5; + tmp16 = tmp2 - tmp5; + tmp13 = tmp3 + tmp4; + tmp17 = tmp3 - tmp4; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*3]; + tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*2]; + tmp6 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*1]; + tmp7 = dataptr[DCTSIZE*7] - wsptr[DCTSIZE*0]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(tmp10 + tmp11 + tmp12 + tmp13, PASS1_BITS+2); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */ + MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */ + CONST_BITS+PASS1_BITS+2); + + tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */ + MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */ + + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */ + + MULTIPLY(tmp16, FIX(2.172734804)), /* c2+10 */ + CONST_BITS+PASS1_BITS+2); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */ + - MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */ + CONST_BITS+PASS1_BITS+2); + + /* Odd part */ + + tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */ + MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */ + MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */ + MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */ + tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */ + MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */ + tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */ + MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */ + tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */ + MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */ + MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */ + tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */ + - MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */ + tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */ + + MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */ + tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */ + + MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS+2); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS+2); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS+2); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS+2); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 16x8 sample block. + * + * 16-point FDCT in pass 1 (rows), 8-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_16x8 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17; + INT32 z1; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 16-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/32). */ + + dataptr = data; + ctr = 0; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[15]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[14]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[13]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[12]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[11]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[10]); + tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[9]); + tmp7 = GETJSAMPLE(elemptr[7]) + GETJSAMPLE(elemptr[8]); + + tmp10 = tmp0 + tmp7; + tmp14 = tmp0 - tmp7; + tmp11 = tmp1 + tmp6; + tmp15 = tmp1 - tmp6; + tmp12 = tmp2 + tmp5; + tmp16 = tmp2 - tmp5; + tmp13 = tmp3 + tmp4; + tmp17 = tmp3 - tmp4; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[15]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[14]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[13]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[12]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[11]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[10]); + tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[9]); + tmp7 = GETJSAMPLE(elemptr[7]) - GETJSAMPLE(elemptr[8]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 + tmp13 - 16 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */ + MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */ + CONST_BITS-PASS1_BITS); + + tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */ + MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */ + + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */ + + MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */ + - MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */ + MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */ + MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */ + MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */ + tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */ + MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */ + tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */ + MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */ + tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */ + MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */ + MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */ + tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */ + - MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */ + tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */ + + MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */ + tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */ + + MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by 8/16 = 1/2. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + + tmp10 = tmp0 + tmp3; + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS+1); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS+1); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, FIX_0_765366865), + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 - MULTIPLY(tmp13, FIX_1_847759065), + CONST_BITS+PASS1_BITS+1); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0 + tmp10 + tmp12, + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1 + tmp11 + tmp13, + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2 + tmp11 + tmp12, + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3 + tmp10 + tmp13, + CONST_BITS+PASS1_BITS+1); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 14x7 sample block. + * + * 14-point FDCT in pass 1 (rows), 7-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_14x7 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 z1, z2, z3; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Zero bottom row of output coefficient block. */ + MEMZERO(&data[DCTSIZE*7], SIZEOF(DCTELEM) * DCTSIZE); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 14-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/28). */ + + dataptr = data; + for (ctr = 0; ctr < 7; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[13]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[12]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[11]); + tmp13 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[10]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[9]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[8]); + tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[7]); + + tmp10 = tmp0 + tmp6; + tmp14 = tmp0 - tmp6; + tmp11 = tmp1 + tmp5; + tmp15 = tmp1 - tmp5; + tmp12 = tmp2 + tmp4; + tmp16 = tmp2 - tmp4; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[13]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[12]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[11]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[10]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[9]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[8]); + tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[7]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 + tmp13 - 14 * CENTERJSAMPLE) << PASS1_BITS); + tmp13 += tmp13; + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.274162392)) + /* c4 */ + MULTIPLY(tmp11 - tmp13, FIX(0.314692123)) - /* c12 */ + MULTIPLY(tmp12 - tmp13, FIX(0.881747734)), /* c8 */ + CONST_BITS-PASS1_BITS); + + tmp10 = MULTIPLY(tmp14 + tmp15, FIX(1.105676686)); /* c6 */ + + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.273079590)) /* c2-c6 */ + + MULTIPLY(tmp16, FIX(0.613604268)), /* c10 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.719280954)) /* c6+c10 */ + - MULTIPLY(tmp16, FIX(1.378756276)), /* c2 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = tmp1 + tmp2; + tmp11 = tmp5 - tmp4; + dataptr[7] = (DCTELEM) ((tmp0 - tmp10 + tmp3 - tmp11 - tmp6) << PASS1_BITS); + tmp3 <<= CONST_BITS; + tmp10 = MULTIPLY(tmp10, - FIX(0.158341681)); /* -c13 */ + tmp11 = MULTIPLY(tmp11, FIX(1.405321284)); /* c1 */ + tmp10 += tmp11 - tmp3; + tmp11 = MULTIPLY(tmp0 + tmp2, FIX(1.197448846)) + /* c5 */ + MULTIPLY(tmp4 + tmp6, FIX(0.752406978)); /* c9 */ + dataptr[5] = (DCTELEM) + DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(2.373959773)) /* c3+c5-c13 */ + + MULTIPLY(tmp4, FIX(1.119999435)), /* c1+c11-c9 */ + CONST_BITS-PASS1_BITS); + tmp12 = MULTIPLY(tmp0 + tmp1, FIX(1.334852607)) + /* c3 */ + MULTIPLY(tmp5 - tmp6, FIX(0.467085129)); /* c11 */ + dataptr[3] = (DCTELEM) + DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.424103948)) /* c3-c9-c13 */ + - MULTIPLY(tmp5, FIX(3.069855259)), /* c1+c5+c11 */ + CONST_BITS-PASS1_BITS); + dataptr[1] = (DCTELEM) + DESCALE(tmp11 + tmp12 + tmp3 + tmp6 - + MULTIPLY(tmp0 + tmp6, FIX(1.126980169)), /* c3+c5-c1 */ + CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/14)*(8/7) = 32/49, which we + * partially fold into the constant multipliers and final shifting: + * 7-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/14) * 64/49. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*6]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*5]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*4]; + tmp3 = dataptr[DCTSIZE*3]; + + tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*6]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*5]; + tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*4]; + + z1 = tmp0 + tmp2; + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(z1 + tmp1 + tmp3, FIX(1.306122449)), /* 64/49 */ + CONST_BITS+PASS1_BITS+1); + tmp3 += tmp3; + z1 -= tmp3; + z1 -= tmp3; + z1 = MULTIPLY(z1, FIX(0.461784020)); /* (c2+c6-c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp2, FIX(1.202428084)); /* (c2+c4-c6)/2 */ + z3 = MULTIPLY(tmp1 - tmp2, FIX(0.411026446)); /* c6 */ + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS+PASS1_BITS+1); + z1 -= z2; + z2 = MULTIPLY(tmp0 - tmp1, FIX(1.151670509)); /* c4 */ + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.923568041)), /* c2+c6-c4 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+PASS1_BITS+1); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.221765677)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.222383464)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.800824523)); /* -c1 */ + tmp1 += tmp2; + tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.801442310)); /* c5 */ + tmp0 += tmp3; + tmp2 += tmp3 + MULTIPLY(tmp12, FIX(2.443531355)); /* c3+c1-c5 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+PASS1_BITS+1); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 12x6 sample block. + * + * 12-point FDCT in pass 1 (rows), 6-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_12x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Zero 2 bottom rows of output coefficient block. */ + MEMZERO(&data[DCTSIZE*6], SIZEOF(DCTELEM) * DCTSIZE * 2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 12-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/24). */ + + dataptr = data; + for (ctr = 0; ctr < 6; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[11]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[10]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[9]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[8]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[7]); + tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[6]); + + tmp10 = tmp0 + tmp5; + tmp13 = tmp0 - tmp5; + tmp11 = tmp1 + tmp4; + tmp14 = tmp1 - tmp4; + tmp12 = tmp2 + tmp3; + tmp15 = tmp2 - tmp3; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[11]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[10]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[9]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[8]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[7]); + tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[6]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 - 12 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[6] = (DCTELEM) ((tmp13 - tmp14 - tmp15) << PASS1_BITS); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.224744871)), /* c4 */ + CONST_BITS-PASS1_BITS); + dataptr[2] = (DCTELEM) + DESCALE(tmp14 - tmp15 + MULTIPLY(tmp13 + tmp15, FIX(1.366025404)), /* c2 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp1 + tmp4, FIX_0_541196100); /* c9 */ + tmp14 = tmp10 + MULTIPLY(tmp1, FIX_0_765366865); /* c3-c9 */ + tmp15 = tmp10 - MULTIPLY(tmp4, FIX_1_847759065); /* c3+c9 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.121971054)); /* c5 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.860918669)); /* c7 */ + tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.580774953)) /* c5+c7-c1 */ + + MULTIPLY(tmp5, FIX(0.184591911)); /* c11 */ + tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.184591911)); /* -c11 */ + tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.339493912)) /* c1+c5-c11 */ + + MULTIPLY(tmp5, FIX(0.860918669)); /* c7 */ + tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.725788011)) /* c1+c11-c7 */ + - MULTIPLY(tmp5, FIX(1.121971054)); /* c5 */ + tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.306562965)) /* c3 */ + - MULTIPLY(tmp2 + tmp5, FIX_0_541196100); /* c9 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/12)*(8/6) = 8/9, which we + * partially fold into the constant multipliers and final shifting: + * 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12) * 16/9. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5]; + tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */ + CONST_BITS+PASS1_BITS+1); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS+1); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 10x5 sample block. + * + * 10-point FDCT in pass 1 (rows), 5-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_10x5 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Zero 3 bottom rows of output coefficient block. */ + MEMZERO(&data[DCTSIZE*5], SIZEOF(DCTELEM) * DCTSIZE * 3); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 10-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/20). */ + + dataptr = data; + for (ctr = 0; ctr < 5; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[9]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[8]); + tmp12 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[7]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[6]); + tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[5]); + + tmp10 = tmp0 + tmp4; + tmp13 = tmp0 - tmp4; + tmp11 = tmp1 + tmp3; + tmp14 = tmp1 - tmp3; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[9]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[8]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[7]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[6]); + tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[5]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 + tmp12 - 10 * CENTERJSAMPLE) << PASS1_BITS); + tmp12 += tmp12; + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.144122806)) - /* c4 */ + MULTIPLY(tmp11 - tmp12, FIX(0.437016024)), /* c8 */ + CONST_BITS-PASS1_BITS); + tmp10 = MULTIPLY(tmp13 + tmp14, FIX(0.831253876)); /* c6 */ + dataptr[2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.513743148)), /* c2-c6 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.176250899)), /* c2+c6 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = tmp0 + tmp4; + tmp11 = tmp1 - tmp3; + dataptr[5] = (DCTELEM) ((tmp10 - tmp11 - tmp2) << PASS1_BITS); + tmp2 <<= CONST_BITS; + dataptr[1] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.396802247)) + /* c1 */ + MULTIPLY(tmp1, FIX(1.260073511)) + tmp2 + /* c3 */ + MULTIPLY(tmp3, FIX(0.642039522)) + /* c7 */ + MULTIPLY(tmp4, FIX(0.221231742)), /* c9 */ + CONST_BITS-PASS1_BITS); + tmp12 = MULTIPLY(tmp0 - tmp4, FIX(0.951056516)) - /* (c3+c7)/2 */ + MULTIPLY(tmp1 + tmp3, FIX(0.587785252)); /* (c1-c9)/2 */ + tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.309016994)) + /* (c3-c7)/2 */ + (tmp11 << (CONST_BITS - 1)) - tmp2; + dataptr[3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS-PASS1_BITS); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/10)*(8/5) = 32/25, which we + * fold into the constant multipliers: + * 5-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/10) * 32/25. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*4]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*2]; + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*4]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*3]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp2, FIX(1.28)), /* 32/25 */ + CONST_BITS+PASS1_BITS); + tmp11 = MULTIPLY(tmp11, FIX(1.011928851)); /* (c2+c4)/2 */ + tmp10 -= tmp2 << 2; + tmp10 = MULTIPLY(tmp10, FIX(0.452548340)); /* (c2-c4)/2 */ + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp1, FIX(1.064004961)); /* c3 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.657591230)), /* c1-c3 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.785601151)), /* c1+c3 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on an 8x4 sample block. + * + * 8-point FDCT in pass 1 (rows), 4-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_8x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Zero 4 bottom rows of output coefficient block. */ + MEMZERO(&data[DCTSIZE*4], SIZEOF(DCTELEM) * DCTSIZE * 4); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We must also scale the output by 8/4 = 2, which we add here. */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]); + + tmp10 = tmp0 + tmp3; + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << (PASS1_BITS+1)); + dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << (PASS1_BITS+1)); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-2); + dataptr[2] = (DCTELEM) RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), + CONST_BITS-PASS1_BITS-1); + dataptr[6] = (DCTELEM) RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), + CONST_BITS-PASS1_BITS-1); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-2); + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + tmp10 + tmp12, CONST_BITS-PASS1_BITS-1); + dataptr[3] = (DCTELEM) + RIGHT_SHIFT(tmp1 + tmp11 + tmp13, CONST_BITS-PASS1_BITS-1); + dataptr[5] = (DCTELEM) + RIGHT_SHIFT(tmp2 + tmp11 + tmp12, CONST_BITS-PASS1_BITS-1); + dataptr[7] = (DCTELEM) + RIGHT_SHIFT(tmp3 + tmp10 + tmp13, CONST_BITS-PASS1_BITS-1); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * 4-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3] + (ONE << (PASS1_BITS-1)); + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2]; + + tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2]; + + dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp0 + tmp1, PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) RIGHT_SHIFT(tmp0 - tmp1, PASS1_BITS); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS+PASS1_BITS-1); + + dataptr[DCTSIZE*1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 6x3 sample block. + * + * 6-point FDCT in pass 1 (rows), 3-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_6x3 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2; + INT32 tmp10, tmp11, tmp12; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12). */ + + dataptr = data; + for (ctr = 0; ctr < 3; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]); + tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]); + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << (PASS1_BITS+1)); + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */ + CONST_BITS-PASS1_BITS-1); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */ + CONST_BITS-PASS1_BITS-1); + + /* Odd part */ + + tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */ + CONST_BITS-PASS1_BITS-1); + + dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << (PASS1_BITS+1))); + dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << (PASS1_BITS+1)); + dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << (PASS1_BITS+1))); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/6)*(8/3) = 32/9, which we partially + * fold into the constant multipliers (other part was done in pass 1): + * 3-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/6) * 16/9. + */ + + dataptr = data; + for (ctr = 0; ctr < 6; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*2]; + tmp1 = dataptr[DCTSIZE*1]; + + tmp2 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*2]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(1.257078722)), /* c2 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(MULTIPLY(tmp2, FIX(2.177324216)), /* c1 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 4x2 sample block. + * + * 4-point FDCT in pass 1 (rows), 2-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_4x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1; + INT32 tmp10, tmp11; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We must also scale the output by (8/4)*(8/2) = 2**3, which we add here. */ + /* 4-point FDCT kernel, */ + /* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT]. */ + + dataptr = data; + for (ctr = 0; ctr < 2; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 - 4 * CENTERJSAMPLE) << (PASS1_BITS+3)); + dataptr[2] = (DCTELEM) ((tmp0 - tmp1) << (PASS1_BITS+3)); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-4); + + dataptr[1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS-PASS1_BITS-3); + dataptr[3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS-PASS1_BITS-3); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = dataptr[DCTSIZE*0] + (ONE << (PASS1_BITS-1)); + tmp1 = dataptr[DCTSIZE*1]; + + dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp0 + tmp1, PASS1_BITS); + + /* Odd part */ + + dataptr[DCTSIZE*1] = (DCTELEM) RIGHT_SHIFT(tmp0 - tmp1, PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 2x1 sample block. + * + * 2-point FDCT in pass 1 (rows), 1-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_2x1 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1; + JSAMPROW elemptr; + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + elemptr = sample_data[0] + start_col; + + tmp0 = GETJSAMPLE(elemptr[0]); + tmp1 = GETJSAMPLE(elemptr[1]); + + /* We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/2)*(8/1) = 2**5. + */ + + /* Even part */ + /* Apply unsigned->signed conversion */ + data[0] = (DCTELEM) ((tmp0 + tmp1 - 2 * CENTERJSAMPLE) << 5); + + /* Odd part */ + data[1] = (DCTELEM) ((tmp0 - tmp1) << 5); +} + + +/* + * Perform the forward DCT on an 8x16 sample block. + * + * 8-point FDCT in pass 1 (rows), 16-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_8x16 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17; + INT32 z1; + DCTELEM workspace[DCTSIZE2]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]); + + tmp10 = tmp0 + tmp3; + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]); + tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, FIX_0_765366865), + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 - MULTIPLY(tmp13, FIX_1_847759065), + CONST_BITS-PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[1] = (DCTELEM) DESCALE(tmp0 + tmp10 + tmp12, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp1 + tmp11 + tmp13, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp2 + tmp11 + tmp12, CONST_BITS-PASS1_BITS); + dataptr[7] = (DCTELEM) DESCALE(tmp3 + tmp10 + tmp13, CONST_BITS-PASS1_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == DCTSIZE * 2) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by 8/16 = 1/2. + * 16-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/32). + */ + + dataptr = data; + wsptr = workspace; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*3]; + tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*2]; + tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*1]; + tmp7 = dataptr[DCTSIZE*7] + wsptr[DCTSIZE*0]; + + tmp10 = tmp0 + tmp7; + tmp14 = tmp0 - tmp7; + tmp11 = tmp1 + tmp6; + tmp15 = tmp1 - tmp6; + tmp12 = tmp2 + tmp5; + tmp16 = tmp2 - tmp5; + tmp13 = tmp3 + tmp4; + tmp17 = tmp3 - tmp4; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*3]; + tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*2]; + tmp6 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*1]; + tmp7 = dataptr[DCTSIZE*7] - wsptr[DCTSIZE*0]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(tmp10 + tmp11 + tmp12 + tmp13, PASS1_BITS+1); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */ + MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */ + CONST_BITS+PASS1_BITS+1); + + tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */ + MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */ + + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */ + + MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */ + CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */ + - MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */ + CONST_BITS+PASS1_BITS+1); + + /* Odd part */ + + tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */ + MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */ + MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */ + MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */ + tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */ + MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */ + tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */ + MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */ + tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */ + MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */ + MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */ + tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */ + - MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */ + tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */ + + MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */ + tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */ + + MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS+1); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS+1); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 7x14 sample block. + * + * 7-point FDCT in pass 1 (rows), 14-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_7x14 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 z1, z2, z3; + DCTELEM workspace[8*6]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 7-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/14). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[6]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[5]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[4]); + tmp3 = GETJSAMPLE(elemptr[3]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[6]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[5]); + tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[4]); + + z1 = tmp0 + tmp2; + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((z1 + tmp1 + tmp3 - 7 * CENTERJSAMPLE) << PASS1_BITS); + tmp3 += tmp3; + z1 -= tmp3; + z1 -= tmp3; + z1 = MULTIPLY(z1, FIX(0.353553391)); /* (c2+c6-c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp2, FIX(0.920609002)); /* (c2+c4-c6)/2 */ + z3 = MULTIPLY(tmp1 - tmp2, FIX(0.314692123)); /* c6 */ + dataptr[2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS-PASS1_BITS); + z1 -= z2; + z2 = MULTIPLY(tmp0 - tmp1, FIX(0.881747734)); /* c4 */ + dataptr[4] = (DCTELEM) + DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.707106781)), /* c2+c6-c4 */ + CONST_BITS-PASS1_BITS); + dataptr[6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp1 = MULTIPLY(tmp10 + tmp11, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.378756276)); /* -c1 */ + tmp1 += tmp2; + tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.613604268)); /* c5 */ + tmp0 += tmp3; + tmp2 += tmp3 + MULTIPLY(tmp12, FIX(1.870828693)); /* c3+c1-c5 */ + + dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-PASS1_BITS); + dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-PASS1_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 14) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/7)*(8/14) = 32/49, which we + * fold into the constant multipliers: + * 14-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/28) * 32/49. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = 0; ctr < 7; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*3]; + tmp13 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*2]; + tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*0]; + tmp6 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; + + tmp10 = tmp0 + tmp6; + tmp14 = tmp0 - tmp6; + tmp11 = tmp1 + tmp5; + tmp15 = tmp1 - tmp5; + tmp12 = tmp2 + tmp4; + tmp16 = tmp2 - tmp4; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*3]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*2]; + tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*0]; + tmp6 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12 + tmp13, + FIX(0.653061224)), /* 32/49 */ + CONST_BITS+PASS1_BITS); + tmp13 += tmp13; + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp13, FIX(0.832106052)) + /* c4 */ + MULTIPLY(tmp11 - tmp13, FIX(0.205513223)) - /* c12 */ + MULTIPLY(tmp12 - tmp13, FIX(0.575835255)), /* c8 */ + CONST_BITS+PASS1_BITS); + + tmp10 = MULTIPLY(tmp14 + tmp15, FIX(0.722074570)); /* c6 */ + + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.178337691)) /* c2-c6 */ + + MULTIPLY(tmp16, FIX(0.400721155)), /* c10 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.122795725)) /* c6+c10 */ + - MULTIPLY(tmp16, FIX(0.900412262)), /* c2 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = tmp1 + tmp2; + tmp11 = tmp5 - tmp4; + dataptr[DCTSIZE*7] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp10 + tmp3 - tmp11 - tmp6, + FIX(0.653061224)), /* 32/49 */ + CONST_BITS+PASS1_BITS); + tmp3 = MULTIPLY(tmp3 , FIX(0.653061224)); /* 32/49 */ + tmp10 = MULTIPLY(tmp10, - FIX(0.103406812)); /* -c13 */ + tmp11 = MULTIPLY(tmp11, FIX(0.917760839)); /* c1 */ + tmp10 += tmp11 - tmp3; + tmp11 = MULTIPLY(tmp0 + tmp2, FIX(0.782007410)) + /* c5 */ + MULTIPLY(tmp4 + tmp6, FIX(0.491367823)); /* c9 */ + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(1.550341076)) /* c3+c5-c13 */ + + MULTIPLY(tmp4, FIX(0.731428202)), /* c1+c11-c9 */ + CONST_BITS+PASS1_BITS); + tmp12 = MULTIPLY(tmp0 + tmp1, FIX(0.871740478)) + /* c3 */ + MULTIPLY(tmp5 - tmp6, FIX(0.305035186)); /* c11 */ + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.276965844)) /* c3-c9-c13 */ + - MULTIPLY(tmp5, FIX(2.004803435)), /* c1+c5+c11 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp11 + tmp12 + tmp3 + - MULTIPLY(tmp0, FIX(0.735987049)) /* c3+c5-c1 */ + - MULTIPLY(tmp6, FIX(0.082925825)), /* c9-c11-c13 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 6x12 sample block. + * + * 6-point FDCT in pass 1 (rows), 12-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_6x12 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + DCTELEM workspace[8*4]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]); + tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]); + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]); + tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << PASS1_BITS); + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */ + CONST_BITS-PASS1_BITS); + dataptr[4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */ + CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */ + CONST_BITS-PASS1_BITS); + + dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << PASS1_BITS)); + dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << PASS1_BITS); + dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << PASS1_BITS)); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 12) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/6)*(8/12) = 8/9, which we + * fold into the constant multipliers: + * 12-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/24) * 8/9. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*3]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*2]; + tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*1]; + tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*0]; + tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*7]; + tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*6]; + + tmp10 = tmp0 + tmp5; + tmp13 = tmp0 - tmp5; + tmp11 = tmp1 + tmp4; + tmp14 = tmp1 - tmp4; + tmp12 = tmp2 + tmp3; + tmp15 = tmp2 - tmp3; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*3]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*2]; + tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*1]; + tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*0]; + tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*7]; + tmp5 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*6]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(0.888888889)), /* 8/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(MULTIPLY(tmp13 - tmp14 - tmp15, FIX(0.888888889)), /* 8/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.088662108)), /* c4 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp14 - tmp15, FIX(0.888888889)) + /* 8/9 */ + MULTIPLY(tmp13 + tmp15, FIX(1.214244803)), /* c2 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp1 + tmp4, FIX(0.481063200)); /* c9 */ + tmp14 = tmp10 + MULTIPLY(tmp1, FIX(0.680326102)); /* c3-c9 */ + tmp15 = tmp10 - MULTIPLY(tmp4, FIX(1.642452502)); /* c3+c9 */ + tmp12 = MULTIPLY(tmp0 + tmp2, FIX(0.997307603)); /* c5 */ + tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.765261039)); /* c7 */ + tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.516244403)) /* c5+c7-c1 */ + + MULTIPLY(tmp5, FIX(0.164081699)); /* c11 */ + tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.164081699)); /* -c11 */ + tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.079550144)) /* c1+c5-c11 */ + + MULTIPLY(tmp5, FIX(0.765261039)); /* c7 */ + tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.645144899)) /* c1+c11-c7 */ + - MULTIPLY(tmp5, FIX(0.997307603)); /* c5 */ + tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.161389302)) /* c3 */ + - MULTIPLY(tmp2 + tmp5, FIX(0.481063200)); /* c9 */ + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 5x10 sample block. + * + * 5-point FDCT in pass 1 (rows), 10-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_5x10 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4; + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + DCTELEM workspace[8*2]; + DCTELEM *dataptr; + DCTELEM *wsptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* 5-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/10). */ + + dataptr = data; + ctr = 0; + for (;;) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[4]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[3]); + tmp2 = GETJSAMPLE(elemptr[2]); + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + + tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[4]); + tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[3]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp10 + tmp2 - 5 * CENTERJSAMPLE) << PASS1_BITS); + tmp11 = MULTIPLY(tmp11, FIX(0.790569415)); /* (c2+c4)/2 */ + tmp10 -= tmp2 << 2; + tmp10 = MULTIPLY(tmp10, FIX(0.353553391)); /* (c2-c4)/2 */ + dataptr[2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS-PASS1_BITS); + dataptr[4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS-PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp1, FIX(0.831253876)); /* c3 */ + + dataptr[1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.513743148)), /* c1-c3 */ + CONST_BITS-PASS1_BITS); + dataptr[3] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.176250899)), /* c1+c3 */ + CONST_BITS-PASS1_BITS); + + ctr++; + + if (ctr != DCTSIZE) { + if (ctr == 10) + break; /* Done. */ + dataptr += DCTSIZE; /* advance pointer to next row */ + } else + dataptr = workspace; /* switch pointer to extended workspace */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/5)*(8/10) = 32/25, which we + * fold into the constant multipliers: + * 10-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/20) * 32/25. + */ + + dataptr = data; + wsptr = workspace; + for (ctr = 0; ctr < 5; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*0]; + tmp12 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*7]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*6]; + tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; + + tmp10 = tmp0 + tmp4; + tmp13 = tmp0 - tmp4; + tmp11 = tmp1 + tmp3; + tmp14 = tmp1 - tmp3; + + tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*0]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*7]; + tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*6]; + tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(1.28)), /* 32/25 */ + CONST_BITS+PASS1_BITS); + tmp12 += tmp12; + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.464477191)) - /* c4 */ + MULTIPLY(tmp11 - tmp12, FIX(0.559380511)), /* c8 */ + CONST_BITS+PASS1_BITS); + tmp10 = MULTIPLY(tmp13 + tmp14, FIX(1.064004961)); /* c6 */ + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.657591230)), /* c2-c6 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) + DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.785601151)), /* c2+c6 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = tmp0 + tmp4; + tmp11 = tmp1 - tmp3; + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp2, FIX(1.28)), /* 32/25 */ + CONST_BITS+PASS1_BITS); + tmp2 = MULTIPLY(tmp2, FIX(1.28)); /* 32/25 */ + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(MULTIPLY(tmp0, FIX(1.787906876)) + /* c1 */ + MULTIPLY(tmp1, FIX(1.612894094)) + tmp2 + /* c3 */ + MULTIPLY(tmp3, FIX(0.821810588)) + /* c7 */ + MULTIPLY(tmp4, FIX(0.283176630)), /* c9 */ + CONST_BITS+PASS1_BITS); + tmp12 = MULTIPLY(tmp0 - tmp4, FIX(1.217352341)) - /* (c3+c7)/2 */ + MULTIPLY(tmp1 + tmp3, FIX(0.752365123)); /* (c1-c9)/2 */ + tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.395541753)) + /* (c3-c7)/2 */ + MULTIPLY(tmp11, FIX(0.64)) - tmp2; /* 16/25 */ + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + wsptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 4x8 sample block. + * + * 4-point FDCT in pass 1 (rows), 8-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_4x8 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We must also scale the output by 8/4 = 2, which we add here. */ + /* 4-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). */ + + dataptr = data; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]); + tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]); + + tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]); + tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 - 4 * CENTERJSAMPLE) << (PASS1_BITS+1)); + dataptr[2] = (DCTELEM) ((tmp0 - tmp1) << (PASS1_BITS+1)); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-2); + + dataptr[1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS-PASS1_BITS-1); + dataptr[3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS-PASS1_BITS-1); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + /* Even part per LL&M figure 1 --- note that published figure is faulty; + * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". + */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + + /* Add fudge factor here for final descale. */ + tmp10 = tmp0 + tmp3 + (ONE << (PASS1_BITS-1)); + tmp12 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp13 = tmp1 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp10 + tmp11, PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) RIGHT_SHIFT(tmp10 - tmp11, PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS+PASS1_BITS-1); + dataptr[DCTSIZE*2] = (DCTELEM) + RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) + RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), CONST_BITS+PASS1_BITS); + + /* Odd part per figure 8 --- note paper omits factor of sqrt(2). + * 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + * i0..i3 in the paper are tmp0..tmp3 here. + */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp0 + tmp2; + tmp13 = tmp1 + tmp3; + z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS+PASS1_BITS-1); + + tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */ + tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */ + tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */ + tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */ + tmp10 = MULTIPLY(tmp10, - FIX_0_899976223); /* c7-c3 */ + tmp11 = MULTIPLY(tmp11, - FIX_2_562915447); /* -c1-c3 */ + tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* c5-c3 */ + tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */ + + tmp12 += z1; + tmp13 += z1; + + dataptr[DCTSIZE*1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + tmp10 + tmp12, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + RIGHT_SHIFT(tmp1 + tmp11 + tmp13, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) + RIGHT_SHIFT(tmp2 + tmp11 + tmp12, CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*7] = (DCTELEM) + RIGHT_SHIFT(tmp3 + tmp10 + tmp13, CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 3x6 sample block. + * + * 3-point FDCT in pass 1 (rows), 6-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_3x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1, tmp2; + INT32 tmp10, tmp11, tmp12; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + /* We scale the results further by 2 as part of output adaption */ + /* scaling for different DCT size. */ + /* 3-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/6). */ + + dataptr = data; + for (ctr = 0; ctr < 6; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[2]); + tmp1 = GETJSAMPLE(elemptr[1]); + + tmp2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[2]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) + ((tmp0 + tmp1 - 3 * CENTERJSAMPLE) << (PASS1_BITS+1)); + dataptr[2] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(0.707106781)), /* c2 */ + CONST_BITS-PASS1_BITS-1); + + /* Odd part */ + + dataptr[1] = (DCTELEM) + DESCALE(MULTIPLY(tmp2, FIX(1.224744871)), /* c1 */ + CONST_BITS-PASS1_BITS-1); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + * We must also scale the output by (8/6)*(8/3) = 32/9, which we partially + * fold into the constant multipliers (other part was done in pass 1): + * 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12) * 16/9. + */ + + dataptr = data; + for (ctr = 0; ctr < 3; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5]; + tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + + tmp10 = tmp0 + tmp2; + tmp12 = tmp0 - tmp2; + + tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5]; + tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4]; + tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + + dataptr[DCTSIZE*0] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*2] = (DCTELEM) + DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) + DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */ + CONST_BITS+PASS1_BITS); + + /* Odd part */ + + tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */ + + dataptr[DCTSIZE*1] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) + DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */ + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 2x4 sample block. + * + * 2-point FDCT in pass 1 (rows), 4-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_2x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1; + INT32 tmp10, tmp11; + DCTELEM *dataptr; + JSAMPROW elemptr; + int ctr; + SHIFT_TEMPS + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + /* Pass 1: process rows. */ + /* Note results are scaled up by sqrt(8) compared to a true DCT. */ + /* We must also scale the output by (8/2)*(8/4) = 2**3, which we add here. */ + + dataptr = data; + for (ctr = 0; ctr < 4; ctr++) { + elemptr = sample_data[ctr] + start_col; + + /* Even part */ + + tmp0 = GETJSAMPLE(elemptr[0]); + tmp1 = GETJSAMPLE(elemptr[1]); + + /* Apply unsigned->signed conversion */ + dataptr[0] = (DCTELEM) ((tmp0 + tmp1 - 2 * CENTERJSAMPLE) << 3); + + /* Odd part */ + + dataptr[1] = (DCTELEM) ((tmp0 - tmp1) << 3); + + dataptr += DCTSIZE; /* advance pointer to next row */ + } + + /* Pass 2: process columns. + * We leave the results scaled up by an overall factor of 8. + * 4-point FDCT kernel, + * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT]. + */ + + dataptr = data; + for (ctr = 0; ctr < 2; ctr++) { + /* Even part */ + + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3]; + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2]; + + tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3]; + tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2]; + + dataptr[DCTSIZE*0] = (DCTELEM) (tmp0 + tmp1); + dataptr[DCTSIZE*2] = (DCTELEM) (tmp0 - tmp1); + + /* Odd part */ + + tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-1); + + dataptr[DCTSIZE*1] = (DCTELEM) + RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */ + CONST_BITS); + dataptr[DCTSIZE*3] = (DCTELEM) + RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */ + CONST_BITS); + + dataptr++; /* advance pointer to next column */ + } +} + + +/* + * Perform the forward DCT on a 1x2 sample block. + * + * 1-point FDCT in pass 1 (rows), 2-point in pass 2 (columns). + */ + +GLOBAL(void) +jpeg_fdct_1x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col) +{ + INT32 tmp0, tmp1; + + /* Pre-zero output coefficient block. */ + MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2); + + tmp0 = GETJSAMPLE(sample_data[0][start_col]); + tmp1 = GETJSAMPLE(sample_data[1][start_col]); + + /* We leave the results scaled up by an overall factor of 8. + * We must also scale the output by (8/1)*(8/2) = 2**5. + */ + + /* Even part */ + /* Apply unsigned->signed conversion */ + data[DCTSIZE*0] = (DCTELEM) ((tmp0 + tmp1 - 2 * CENTERJSAMPLE) << 5); + + /* Odd part */ + data[DCTSIZE*1] = (DCTELEM) ((tmp0 - tmp1) << 5); +} + +#endif /* DCT_SCALING_SUPPORTED */ +#endif /* DCT_ISLOW_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctflt.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctflt.c new file mode 100644 index 000000000..23ae9d333 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctflt.c @@ -0,0 +1,235 @@ +/* + * jidctflt.c + * + * Copyright (C) 1994-1998, Thomas G. Lane. + * Modified 2010 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a floating-point implementation of the + * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine + * must also perform dequantization of the input coefficients. + * + * This implementation should be more accurate than either of the integer + * IDCT implementations. However, it may not give the same results on all + * machines because of differences in roundoff behavior. Speed will depend + * on the hardware's floating point capacity. + * + * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT + * on each row (or vice versa, but it's more convenient to emit a row at + * a time). Direct algorithms are also available, but they are much more + * complex and seem not to be any faster when reduced to code. + * + * This implementation is based on Arai, Agui, and Nakajima's algorithm for + * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in + * Japanese, but the algorithm is described in the Pennebaker & Mitchell + * JPEG textbook (see REFERENCES section in file README). The following code + * is based directly on figure 4-8 in P&M. + * While an 8-point DCT cannot be done in less than 11 multiplies, it is + * possible to arrange the computation so that many of the multiplies are + * simple scalings of the final outputs. These multiplies can then be + * folded into the multiplications or divisions by the JPEG quantization + * table entries. The AA&N method leaves only 5 multiplies and 29 adds + * to be done in the DCT itself. + * The primary disadvantage of this method is that with a fixed-point + * implementation, accuracy is lost due to imprecise representation of the + * scaled quantization values. However, that problem does not arise if + * we use floating point arithmetic. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_FLOAT_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ +#endif + + +/* Dequantize a coefficient by multiplying it by the multiplier-table + * entry; produce a float result. + */ + +#define DEQUANTIZE(coef,quantval) (((FAST_FLOAT) (coef)) * (quantval)) + + +/* + * Perform dequantization and inverse DCT on one block of coefficients. + */ + +GLOBAL(void) +jpeg_idct_float (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + FAST_FLOAT tmp10, tmp11, tmp12, tmp13; + FAST_FLOAT z5, z10, z11, z12, z13; + JCOEFPTR inptr; + FLOAT_MULT_TYPE * quantptr; + FAST_FLOAT * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = cinfo->sample_range_limit; + int ctr; + FAST_FLOAT workspace[DCTSIZE2]; /* buffers data between passes */ + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (FLOAT_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = DCTSIZE; ctr > 0; ctr--) { + /* Due to quantization, we will usually find that many of the input + * coefficients are zero, especially the AC terms. We can exploit this + * by short-circuiting the IDCT calculation for any column in which all + * the AC terms are zero. In that case each output is equal to the + * DC coefficient (with scale factor as needed). + * With typical images and quantization tables, half or more of the + * column DCT calculations can be simplified this way. + */ + + if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && + inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && + inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && + inptr[DCTSIZE*7] == 0) { + /* AC terms all zero */ + FAST_FLOAT dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + + wsptr[DCTSIZE*0] = dcval; + wsptr[DCTSIZE*1] = dcval; + wsptr[DCTSIZE*2] = dcval; + wsptr[DCTSIZE*3] = dcval; + wsptr[DCTSIZE*4] = dcval; + wsptr[DCTSIZE*5] = dcval; + wsptr[DCTSIZE*6] = dcval; + wsptr[DCTSIZE*7] = dcval; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + continue; + } + + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp10 = tmp0 + tmp2; /* phase 3 */ + tmp11 = tmp0 - tmp2; + + tmp13 = tmp1 + tmp3; /* phases 5-3 */ + tmp12 = (tmp1 - tmp3) * ((FAST_FLOAT) 1.414213562) - tmp13; /* 2*c4 */ + + tmp0 = tmp10 + tmp13; /* phase 2 */ + tmp3 = tmp10 - tmp13; + tmp1 = tmp11 + tmp12; + tmp2 = tmp11 - tmp12; + + /* Odd part */ + + tmp4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + tmp5 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp7 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + z13 = tmp6 + tmp5; /* phase 6 */ + z10 = tmp6 - tmp5; + z11 = tmp4 + tmp7; + z12 = tmp4 - tmp7; + + tmp7 = z11 + z13; /* phase 5 */ + tmp11 = (z11 - z13) * ((FAST_FLOAT) 1.414213562); /* 2*c4 */ + + z5 = (z10 + z12) * ((FAST_FLOAT) 1.847759065); /* 2*c2 */ + tmp10 = z5 - z12 * ((FAST_FLOAT) 1.082392200); /* 2*(c2-c6) */ + tmp12 = z5 - z10 * ((FAST_FLOAT) 2.613125930); /* 2*(c2+c6) */ + + tmp6 = tmp12 - tmp7; /* phase 2 */ + tmp5 = tmp11 - tmp6; + tmp4 = tmp10 - tmp5; + + wsptr[DCTSIZE*0] = tmp0 + tmp7; + wsptr[DCTSIZE*7] = tmp0 - tmp7; + wsptr[DCTSIZE*1] = tmp1 + tmp6; + wsptr[DCTSIZE*6] = tmp1 - tmp6; + wsptr[DCTSIZE*2] = tmp2 + tmp5; + wsptr[DCTSIZE*5] = tmp2 - tmp5; + wsptr[DCTSIZE*3] = tmp3 + tmp4; + wsptr[DCTSIZE*4] = tmp3 - tmp4; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + } + + /* Pass 2: process rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + outptr = output_buf[ctr] + output_col; + /* Rows of zeroes can be exploited in the same way as we did with columns. + * However, the column calculation has created many nonzero AC terms, so + * the simplification applies less often (typically 5% to 10% of the time). + * And testing floats for zero is relatively expensive, so we don't bother. + */ + + /* Even part */ + + /* Apply signed->unsigned and prepare float->int conversion */ + z5 = wsptr[0] + ((FAST_FLOAT) CENTERJSAMPLE + (FAST_FLOAT) 0.5); + tmp10 = z5 + wsptr[4]; + tmp11 = z5 - wsptr[4]; + + tmp13 = wsptr[2] + wsptr[6]; + tmp12 = (wsptr[2] - wsptr[6]) * ((FAST_FLOAT) 1.414213562) - tmp13; + + tmp0 = tmp10 + tmp13; + tmp3 = tmp10 - tmp13; + tmp1 = tmp11 + tmp12; + tmp2 = tmp11 - tmp12; + + /* Odd part */ + + z13 = wsptr[5] + wsptr[3]; + z10 = wsptr[5] - wsptr[3]; + z11 = wsptr[1] + wsptr[7]; + z12 = wsptr[1] - wsptr[7]; + + tmp7 = z11 + z13; + tmp11 = (z11 - z13) * ((FAST_FLOAT) 1.414213562); + + z5 = (z10 + z12) * ((FAST_FLOAT) 1.847759065); /* 2*c2 */ + tmp10 = z5 - z12 * ((FAST_FLOAT) 1.082392200); /* 2*(c2-c6) */ + tmp12 = z5 - z10 * ((FAST_FLOAT) 2.613125930); /* 2*(c2+c6) */ + + tmp6 = tmp12 - tmp7; + tmp5 = tmp11 - tmp6; + tmp4 = tmp10 - tmp5; + + /* Final output stage: float->int conversion and range-limit */ + + outptr[0] = range_limit[((int) (tmp0 + tmp7)) & RANGE_MASK]; + outptr[7] = range_limit[((int) (tmp0 - tmp7)) & RANGE_MASK]; + outptr[1] = range_limit[((int) (tmp1 + tmp6)) & RANGE_MASK]; + outptr[6] = range_limit[((int) (tmp1 - tmp6)) & RANGE_MASK]; + outptr[2] = range_limit[((int) (tmp2 + tmp5)) & RANGE_MASK]; + outptr[5] = range_limit[((int) (tmp2 - tmp5)) & RANGE_MASK]; + outptr[3] = range_limit[((int) (tmp3 + tmp4)) & RANGE_MASK]; + outptr[4] = range_limit[((int) (tmp3 - tmp4)) & RANGE_MASK]; + + wsptr += DCTSIZE; /* advance pointer to next row */ + } +} + +#endif /* DCT_FLOAT_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctfst.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctfst.c new file mode 100644 index 000000000..dba4216fb --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctfst.c @@ -0,0 +1,368 @@ +/* + * jidctfst.c + * + * Copyright (C) 1994-1998, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a fast, not so accurate integer implementation of the + * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine + * must also perform dequantization of the input coefficients. + * + * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT + * on each row (or vice versa, but it's more convenient to emit a row at + * a time). Direct algorithms are also available, but they are much more + * complex and seem not to be any faster when reduced to code. + * + * This implementation is based on Arai, Agui, and Nakajima's algorithm for + * scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in + * Japanese, but the algorithm is described in the Pennebaker & Mitchell + * JPEG textbook (see REFERENCES section in file README). The following code + * is based directly on figure 4-8 in P&M. + * While an 8-point DCT cannot be done in less than 11 multiplies, it is + * possible to arrange the computation so that many of the multiplies are + * simple scalings of the final outputs. These multiplies can then be + * folded into the multiplications or divisions by the JPEG quantization + * table entries. The AA&N method leaves only 5 multiplies and 29 adds + * to be done in the DCT itself. + * The primary disadvantage of this method is that with fixed-point math, + * accuracy is lost due to imprecise representation of the scaled + * quantization values. The smaller the quantization table entry, the less + * precise the scaled value, so this implementation does worse with high- + * quality-setting files than with low-quality ones. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_IFAST_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ +#endif + + +/* Scaling decisions are generally the same as in the LL&M algorithm; + * see jidctint.c for more details. However, we choose to descale + * (right shift) multiplication products as soon as they are formed, + * rather than carrying additional fractional bits into subsequent additions. + * This compromises accuracy slightly, but it lets us save a few shifts. + * More importantly, 16-bit arithmetic is then adequate (for 8-bit samples) + * everywhere except in the multiplications proper; this saves a good deal + * of work on 16-bit-int machines. + * + * The dequantized coefficients are not integers because the AA&N scaling + * factors have been incorporated. We represent them scaled up by PASS1_BITS, + * so that the first and second IDCT rounds have the same input scaling. + * For 8-bit JSAMPLEs, we choose IFAST_SCALE_BITS = PASS1_BITS so as to + * avoid a descaling shift; this compromises accuracy rather drastically + * for small quantization table entries, but it saves a lot of shifts. + * For 12-bit JSAMPLEs, there's no hope of using 16x16 multiplies anyway, + * so we use a much larger scaling factor to preserve accuracy. + * + * A final compromise is to represent the multiplicative constants to only + * 8 fractional bits, rather than 13. This saves some shifting work on some + * machines, and may also reduce the cost of multiplication (since there + * are fewer one-bits in the constants). + */ + +#if BITS_IN_JSAMPLE == 8 +#define CONST_BITS 8 +#define PASS1_BITS 2 +#else +#define CONST_BITS 8 +#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ +#endif + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 8 +#define FIX_1_082392200 ((INT32) 277) /* FIX(1.082392200) */ +#define FIX_1_414213562 ((INT32) 362) /* FIX(1.414213562) */ +#define FIX_1_847759065 ((INT32) 473) /* FIX(1.847759065) */ +#define FIX_2_613125930 ((INT32) 669) /* FIX(2.613125930) */ +#else +#define FIX_1_082392200 FIX(1.082392200) +#define FIX_1_414213562 FIX(1.414213562) +#define FIX_1_847759065 FIX(1.847759065) +#define FIX_2_613125930 FIX(2.613125930) +#endif + + +/* We can gain a little more speed, with a further compromise in accuracy, + * by omitting the addition in a descaling shift. This yields an incorrectly + * rounded result half the time... + */ + +#ifndef USE_ACCURATE_ROUNDING +#undef DESCALE +#define DESCALE(x,n) RIGHT_SHIFT(x, n) +#endif + + +/* Multiply a DCTELEM variable by an INT32 constant, and immediately + * descale to yield a DCTELEM result. + */ + +#define MULTIPLY(var,const) ((DCTELEM) DESCALE((var) * (const), CONST_BITS)) + + +/* Dequantize a coefficient by multiplying it by the multiplier-table + * entry; produce a DCTELEM result. For 8-bit data a 16x16->16 + * multiplication will do. For 12-bit data, the multiplier table is + * declared INT32, so a 32-bit multiply will be used. + */ + +#if BITS_IN_JSAMPLE == 8 +#define DEQUANTIZE(coef,quantval) (((IFAST_MULT_TYPE) (coef)) * (quantval)) +#else +#define DEQUANTIZE(coef,quantval) \ + DESCALE((coef)*(quantval), IFAST_SCALE_BITS-PASS1_BITS) +#endif + + +/* Like DESCALE, but applies to a DCTELEM and produces an int. + * We assume that int right shift is unsigned if INT32 right shift is. + */ + +#ifdef RIGHT_SHIFT_IS_UNSIGNED +#define ISHIFT_TEMPS DCTELEM ishift_temp; +#if BITS_IN_JSAMPLE == 8 +#define DCTELEMBITS 16 /* DCTELEM may be 16 or 32 bits */ +#else +#define DCTELEMBITS 32 /* DCTELEM must be 32 bits */ +#endif +#define IRIGHT_SHIFT(x,shft) \ + ((ishift_temp = (x)) < 0 ? \ + (ishift_temp >> (shft)) | ((~((DCTELEM) 0)) << (DCTELEMBITS-(shft))) : \ + (ishift_temp >> (shft))) +#else +#define ISHIFT_TEMPS +#define IRIGHT_SHIFT(x,shft) ((x) >> (shft)) +#endif + +#ifdef USE_ACCURATE_ROUNDING +#define IDESCALE(x,n) ((int) IRIGHT_SHIFT((x) + (1 << ((n)-1)), n)) +#else +#define IDESCALE(x,n) ((int) IRIGHT_SHIFT(x, n)) +#endif + + +/* + * Perform dequantization and inverse DCT on one block of coefficients. + */ + +GLOBAL(void) +jpeg_idct_ifast (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + DCTELEM tmp10, tmp11, tmp12, tmp13; + DCTELEM z5, z10, z11, z12, z13; + JCOEFPTR inptr; + IFAST_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[DCTSIZE2]; /* buffers data between passes */ + SHIFT_TEMPS /* for DESCALE */ + ISHIFT_TEMPS /* for IDESCALE */ + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (IFAST_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = DCTSIZE; ctr > 0; ctr--) { + /* Due to quantization, we will usually find that many of the input + * coefficients are zero, especially the AC terms. We can exploit this + * by short-circuiting the IDCT calculation for any column in which all + * the AC terms are zero. In that case each output is equal to the + * DC coefficient (with scale factor as needed). + * With typical images and quantization tables, half or more of the + * column DCT calculations can be simplified this way. + */ + + if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && + inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && + inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && + inptr[DCTSIZE*7] == 0) { + /* AC terms all zero */ + int dcval = (int) DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + + wsptr[DCTSIZE*0] = dcval; + wsptr[DCTSIZE*1] = dcval; + wsptr[DCTSIZE*2] = dcval; + wsptr[DCTSIZE*3] = dcval; + wsptr[DCTSIZE*4] = dcval; + wsptr[DCTSIZE*5] = dcval; + wsptr[DCTSIZE*6] = dcval; + wsptr[DCTSIZE*7] = dcval; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + continue; + } + + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp10 = tmp0 + tmp2; /* phase 3 */ + tmp11 = tmp0 - tmp2; + + tmp13 = tmp1 + tmp3; /* phases 5-3 */ + tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13; /* 2*c4 */ + + tmp0 = tmp10 + tmp13; /* phase 2 */ + tmp3 = tmp10 - tmp13; + tmp1 = tmp11 + tmp12; + tmp2 = tmp11 - tmp12; + + /* Odd part */ + + tmp4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + tmp5 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp7 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + z13 = tmp6 + tmp5; /* phase 6 */ + z10 = tmp6 - tmp5; + z11 = tmp4 + tmp7; + z12 = tmp4 - tmp7; + + tmp7 = z11 + z13; /* phase 5 */ + tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ + + z5 = MULTIPLY(z10 + z12, FIX_1_847759065); /* 2*c2 */ + tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ + tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; /* -2*(c2+c6) */ + + tmp6 = tmp12 - tmp7; /* phase 2 */ + tmp5 = tmp11 - tmp6; + tmp4 = tmp10 + tmp5; + + wsptr[DCTSIZE*0] = (int) (tmp0 + tmp7); + wsptr[DCTSIZE*7] = (int) (tmp0 - tmp7); + wsptr[DCTSIZE*1] = (int) (tmp1 + tmp6); + wsptr[DCTSIZE*6] = (int) (tmp1 - tmp6); + wsptr[DCTSIZE*2] = (int) (tmp2 + tmp5); + wsptr[DCTSIZE*5] = (int) (tmp2 - tmp5); + wsptr[DCTSIZE*4] = (int) (tmp3 + tmp4); + wsptr[DCTSIZE*3] = (int) (tmp3 - tmp4); + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + } + + /* Pass 2: process rows from work array, store into output array. */ + /* Note that we must descale the results by a factor of 8 == 2**3, */ + /* and also undo the PASS1_BITS scaling. */ + + wsptr = workspace; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + outptr = output_buf[ctr] + output_col; + /* Rows of zeroes can be exploited in the same way as we did with columns. + * However, the column calculation has created many nonzero AC terms, so + * the simplification applies less often (typically 5% to 10% of the time). + * On machines with very fast multiplication, it's possible that the + * test takes more time than it's worth. In that case this section + * may be commented out. + */ + +#ifndef NO_ZERO_ROW_TEST + if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 && + wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) { + /* AC terms all zero */ + JSAMPLE dcval = range_limit[IDESCALE(wsptr[0], PASS1_BITS+3) + & RANGE_MASK]; + + outptr[0] = dcval; + outptr[1] = dcval; + outptr[2] = dcval; + outptr[3] = dcval; + outptr[4] = dcval; + outptr[5] = dcval; + outptr[6] = dcval; + outptr[7] = dcval; + + wsptr += DCTSIZE; /* advance pointer to next row */ + continue; + } +#endif + + /* Even part */ + + tmp10 = ((DCTELEM) wsptr[0] + (DCTELEM) wsptr[4]); + tmp11 = ((DCTELEM) wsptr[0] - (DCTELEM) wsptr[4]); + + tmp13 = ((DCTELEM) wsptr[2] + (DCTELEM) wsptr[6]); + tmp12 = MULTIPLY((DCTELEM) wsptr[2] - (DCTELEM) wsptr[6], FIX_1_414213562) + - tmp13; + + tmp0 = tmp10 + tmp13; + tmp3 = tmp10 - tmp13; + tmp1 = tmp11 + tmp12; + tmp2 = tmp11 - tmp12; + + /* Odd part */ + + z13 = (DCTELEM) wsptr[5] + (DCTELEM) wsptr[3]; + z10 = (DCTELEM) wsptr[5] - (DCTELEM) wsptr[3]; + z11 = (DCTELEM) wsptr[1] + (DCTELEM) wsptr[7]; + z12 = (DCTELEM) wsptr[1] - (DCTELEM) wsptr[7]; + + tmp7 = z11 + z13; /* phase 5 */ + tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ + + z5 = MULTIPLY(z10 + z12, FIX_1_847759065); /* 2*c2 */ + tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ + tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; /* -2*(c2+c6) */ + + tmp6 = tmp12 - tmp7; /* phase 2 */ + tmp5 = tmp11 - tmp6; + tmp4 = tmp10 + tmp5; + + /* Final output stage: scale down by a factor of 8 and range-limit */ + + outptr[0] = range_limit[IDESCALE(tmp0 + tmp7, PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[IDESCALE(tmp0 - tmp7, PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[IDESCALE(tmp1 + tmp6, PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[IDESCALE(tmp1 - tmp6, PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[IDESCALE(tmp2 + tmp5, PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[IDESCALE(tmp2 - tmp5, PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[IDESCALE(tmp3 - tmp4, PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += DCTSIZE; /* advance pointer to next row */ + } +} + +#endif /* DCT_IFAST_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctint.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctint.c new file mode 100644 index 000000000..dcdf7ce45 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jidctint.c @@ -0,0 +1,5137 @@ +/* + * jidctint.c + * + * Copyright (C) 1991-1998, Thomas G. Lane. + * Modification developed 2002-2009 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains a slow-but-accurate integer implementation of the + * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine + * must also perform dequantization of the input coefficients. + * + * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT + * on each row (or vice versa, but it's more convenient to emit a row at + * a time). Direct algorithms are also available, but they are much more + * complex and seem not to be any faster when reduced to code. + * + * This implementation is based on an algorithm described in + * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT + * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, + * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. + * The primary algorithm described there uses 11 multiplies and 29 adds. + * We use their alternate method with 12 multiplies and 32 adds. + * The advantage of this method is that no data path contains more than one + * multiplication; this allows a very simple and accurate implementation in + * scaled fixed-point arithmetic, with a minimal number of shifts. + * + * We also provide IDCT routines with various output sample block sizes for + * direct resolution reduction or enlargement and for direct resolving the + * common 2x1 and 1x2 subsampling cases without additional resampling: NxN + * (N=1...16), 2NxN, and Nx2N (N=1...8) pixels for one 8x8 input DCT block. + * + * For N<8 we simply take the corresponding low-frequency coefficients of + * the 8x8 input DCT block and apply an NxN point IDCT on the sub-block + * to yield the downscaled outputs. + * This can be seen as direct low-pass downsampling from the DCT domain + * point of view rather than the usual spatial domain point of view, + * yielding significant computational savings and results at least + * as good as common bilinear (averaging) spatial downsampling. + * + * For N>8 we apply a partial NxN IDCT on the 8 input coefficients as + * lower frequencies and higher frequencies assumed to be zero. + * It turns out that the computational effort is similar to the 8x8 IDCT + * regarding the output size. + * Furthermore, the scaling and descaling is the same for all IDCT sizes. + * + * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases + * since there would be too many additional constants to pre-calculate. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jdct.h" /* Private declarations for DCT subsystem */ + +#ifdef DCT_ISLOW_SUPPORTED + + +/* + * This module is specialized to the case DCTSIZE = 8. + */ + +#if DCTSIZE != 8 + Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */ +#endif + + +/* + * The poop on this scaling stuff is as follows: + * + * Each 1-D IDCT step produces outputs which are a factor of sqrt(N) + * larger than the true IDCT outputs. The final outputs are therefore + * a factor of N larger than desired; since N=8 this can be cured by + * a simple right shift at the end of the algorithm. The advantage of + * this arrangement is that we save two multiplications per 1-D IDCT, + * because the y0 and y4 inputs need not be divided by sqrt(N). + * + * We have to do addition and subtraction of the integer inputs, which + * is no problem, and multiplication by fractional constants, which is + * a problem to do in integer arithmetic. We multiply all the constants + * by CONST_SCALE and convert them to integer constants (thus retaining + * CONST_BITS bits of precision in the constants). After doing a + * multiplication we have to divide the product by CONST_SCALE, with proper + * rounding, to produce the correct output. This division can be done + * cheaply as a right shift of CONST_BITS bits. We postpone shifting + * as long as possible so that partial sums can be added together with + * full fractional precision. + * + * The outputs of the first pass are scaled up by PASS1_BITS bits so that + * they are represented to better-than-integral precision. These outputs + * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word + * with the recommended scaling. (To scale up 12-bit sample data further, an + * intermediate INT32 array would be needed.) + * + * To avoid overflow of the 32-bit intermediate results in pass 2, we must + * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis + * shows that the values given below are the most effective. + */ + +#if BITS_IN_JSAMPLE == 8 +#define CONST_BITS 13 +#define PASS1_BITS 2 +#else +#define CONST_BITS 13 +#define PASS1_BITS 1 /* lose a little precision to avoid overflow */ +#endif + +/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus + * causing a lot of useless floating-point operations at run time. + * To get around this we use the following pre-calculated constants. + * If you change CONST_BITS you may want to add appropriate values. + * (With a reasonable C compiler, you can just rely on the FIX() macro...) + */ + +#if CONST_BITS == 13 +#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */ +#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */ +#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */ +#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */ +#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */ +#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */ +#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */ +#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */ +#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */ +#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */ +#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */ +#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */ +#else +#define FIX_0_298631336 FIX(0.298631336) +#define FIX_0_390180644 FIX(0.390180644) +#define FIX_0_541196100 FIX(0.541196100) +#define FIX_0_765366865 FIX(0.765366865) +#define FIX_0_899976223 FIX(0.899976223) +#define FIX_1_175875602 FIX(1.175875602) +#define FIX_1_501321110 FIX(1.501321110) +#define FIX_1_847759065 FIX(1.847759065) +#define FIX_1_961570560 FIX(1.961570560) +#define FIX_2_053119869 FIX(2.053119869) +#define FIX_2_562915447 FIX(2.562915447) +#define FIX_3_072711026 FIX(3.072711026) +#endif + + +/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result. + * For 8-bit samples with the recommended scaling, all the variable + * and constant values involved are no more than 16 bits wide, so a + * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. + * For 12-bit samples, a full 32-bit multiplication will be needed. + */ + +#if BITS_IN_JSAMPLE == 8 +#define MULTIPLY(var,const) MULTIPLY16C16(var,const) +#else +#define MULTIPLY(var,const) ((var) * (const)) +#endif + + +/* Dequantize a coefficient by multiplying it by the multiplier-table + * entry; produce an int result. In this module, both inputs and result + * are 16 bits or less, so either int or short multiply will work. + */ + +#define DEQUANTIZE(coef,quantval) (((ISLOW_MULT_TYPE) (coef)) * (quantval)) + + +/* + * Perform dequantization and inverse DCT on one block of coefficients. + */ + +GLOBAL(void) +jpeg_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[DCTSIZE2]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = DCTSIZE; ctr > 0; ctr--) { + /* Due to quantization, we will usually find that many of the input + * coefficients are zero, especially the AC terms. We can exploit this + * by short-circuiting the IDCT calculation for any column in which all + * the AC terms are zero. In that case each output is equal to the + * DC coefficient (with scale factor as needed). + * With typical images and quantization tables, half or more of the + * column DCT calculations can be simplified this way. + */ + + if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && + inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && + inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && + inptr[DCTSIZE*7] == 0) { + /* AC terms all zero */ + int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS; + + wsptr[DCTSIZE*0] = dcval; + wsptr[DCTSIZE*1] = dcval; + wsptr[DCTSIZE*2] = dcval; + wsptr[DCTSIZE*3] = dcval; + wsptr[DCTSIZE*4] = dcval; + wsptr[DCTSIZE*5] = dcval; + wsptr[DCTSIZE*6] = dcval; + wsptr[DCTSIZE*7] = dcval; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + continue; + } + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z2 <<= CONST_BITS; + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z2 += ONE << (CONST_BITS-PASS1_BITS-1); + + tmp0 = z2 + z3; + tmp1 = z2 - z3; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS); + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + } + + /* Pass 2: process rows from work array, store into output array. */ + /* Note that we must descale the results by a factor of 8 == 2**3, */ + /* and also undo the PASS1_BITS scaling. */ + + wsptr = workspace; + for (ctr = 0; ctr < DCTSIZE; ctr++) { + outptr = output_buf[ctr] + output_col; + /* Rows of zeroes can be exploited in the same way as we did with columns. + * However, the column calculation has created many nonzero AC terms, so + * the simplification applies less often (typically 5% to 10% of the time). + * On machines with very fast multiplication, it's possible that the + * test takes more time than it's worth. In that case this section + * may be commented out. + */ + +#ifndef NO_ZERO_ROW_TEST + if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 && + wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) { + /* AC terms all zero */ + JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3) + & RANGE_MASK]; + + outptr[0] = dcval; + outptr[1] = dcval; + outptr[2] = dcval; + outptr[3] = dcval; + outptr[4] = dcval; + outptr[5] = dcval; + outptr[6] = dcval; + outptr[7] = dcval; + + wsptr += DCTSIZE; /* advance pointer to next row */ + continue; + } +#endif + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[6]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + /* Add fudge factor here for final descale. */ + z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 = (INT32) wsptr[4]; + + tmp0 = (z2 + z3) << CONST_BITS; + tmp1 = (z2 - z3) << CONST_BITS; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = (INT32) wsptr[7]; + tmp1 = (INT32) wsptr[5]; + tmp2 = (INT32) wsptr[3]; + tmp3 = (INT32) wsptr[1]; + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += DCTSIZE; /* advance pointer to next row */ + } +} + +#ifdef IDCT_SCALING_SUPPORTED + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 7x7 output block. + * + * Optimized algorithm with 12 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/14). + */ + +GLOBAL(void) +jpeg_idct_7x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[7*7]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp13 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp13 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp13 += ONE << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ + tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ + tmp0 = z1 + z3; + z2 -= tmp0; + tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */ + tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ + tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ + tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + + tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */ + tmp1 += tmp2; + z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ + tmp0 += z2; + tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ + + /* Final output stage */ + + wsptr[7*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[7*6] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[7*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[7*5] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[7*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[7*4] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[7*3] = (int) RIGHT_SHIFT(tmp13, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 7 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 7; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp13 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp13 <<= CONST_BITS; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[4]; + z3 = (INT32) wsptr[6]; + + tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ + tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ + tmp0 = z1 + z3; + z2 -= tmp0; + tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */ + tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ + tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ + tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + + tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp0 = tmp1 - tmp2; + tmp1 += tmp2; + tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */ + tmp1 += tmp2; + z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ + tmp0 += z2; + tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 7; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 6x6 output block. + * + * Optimized algorithm with 3 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/12). + */ + +GLOBAL(void) +jpeg_idct_6x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[6*6]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ + tmp1 = tmp0 + tmp10; + tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS); + tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ + tmp10 = tmp1 + tmp0; + tmp12 = tmp1 - tmp0; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp0 = tmp1 + ((z1 + z2) << CONST_BITS); + tmp2 = tmp1 + ((z3 - z2) << CONST_BITS); + tmp1 = (z1 - z2 - z3) << PASS1_BITS; + + /* Final output stage */ + + wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[6*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[6*1] = (int) (tmp11 + tmp1); + wsptr[6*4] = (int) (tmp11 - tmp1); + wsptr[6*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[6*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 6 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + tmp2 = (INT32) wsptr[4]; + tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ + tmp1 = tmp0 + tmp10; + tmp11 = tmp0 - tmp10 - tmp10; + tmp10 = (INT32) wsptr[2]; + tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ + tmp10 = tmp1 + tmp0; + tmp12 = tmp1 - tmp0; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp0 = tmp1 + ((z1 + z2) << CONST_BITS); + tmp2 = tmp1 + ((z3 - z2) << CONST_BITS); + tmp1 = (z1 - z2 - z3) << CONST_BITS; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 6; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 5x5 output block. + * + * Optimized algorithm with 5 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/10). + */ + +GLOBAL(void) +jpeg_idct_5x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp10, tmp11, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[5*5]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp12 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp12 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp0 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */ + z3 = tmp12 + z2; + tmp10 = z3 + z1; + tmp11 = z3 - z1; + tmp12 -= z2 << 2; + + /* Odd part */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ + tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ + tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ + + /* Final output stage */ + + wsptr[5*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[5*4] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[5*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[5*3] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[5*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 5 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 5; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp12 <<= CONST_BITS; + tmp0 = (INT32) wsptr[2]; + tmp1 = (INT32) wsptr[4]; + z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */ + z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */ + z3 = tmp12 + z2; + tmp10 = z3 + z1; + tmp11 = z3 - z1; + tmp12 -= z2 << 2; + + /* Odd part */ + + z2 = (INT32) wsptr[1]; + z3 = (INT32) wsptr[3]; + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ + tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ + tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 5; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 4x4 output block. + * + * Optimized algorithm with 3 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT]. + */ + +GLOBAL(void) +jpeg_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp2, tmp10, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[4*4]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + + tmp10 = (tmp0 + tmp2) << PASS1_BITS; + tmp12 = (tmp0 - tmp2) << PASS1_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */ + CONST_BITS-PASS1_BITS); + tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */ + CONST_BITS-PASS1_BITS); + + /* Final output stage */ + + wsptr[4*0] = (int) (tmp10 + tmp0); + wsptr[4*3] = (int) (tmp10 - tmp0); + wsptr[4*1] = (int) (tmp12 + tmp2); + wsptr[4*2] = (int) (tmp12 - tmp2); + } + + /* Pass 2: process 4 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 4; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp2 = (INT32) wsptr[2]; + + tmp10 = (tmp0 + tmp2) << CONST_BITS; + tmp12 = (tmp0 - tmp2) << CONST_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = (INT32) wsptr[1]; + z3 = (INT32) wsptr[3]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */ + tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 4; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 3x3 output block. + * + * Optimized algorithm with 2 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/6). + */ + +GLOBAL(void) +jpeg_idct_3x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp2, tmp10, tmp12; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[3*3]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ + tmp10 = tmp0 + tmp12; + tmp2 = tmp0 - tmp12 - tmp12; + + /* Odd part */ + + tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ + + /* Final output stage */ + + wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[3*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[3*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 3 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 3; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + tmp2 = (INT32) wsptr[2]; + tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ + tmp10 = tmp0 + tmp12; + tmp2 = tmp0 - tmp12 - tmp12; + + /* Odd part */ + + tmp12 = (INT32) wsptr[1]; + tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 3; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 2x2 output block. + * + * Multiplication-less algorithm. + */ + +GLOBAL(void) +jpeg_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + ISLOW_MULT_TYPE * quantptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + SHIFT_TEMPS + + /* Pass 1: process columns from input. */ + + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + + /* Column 0 */ + tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]); + /* Add fudge factor here for final descale. */ + tmp4 += ONE << 2; + + tmp0 = tmp4 + tmp5; + tmp2 = tmp4 - tmp5; + + /* Column 1 */ + tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0+1], quantptr[DCTSIZE*0+1]); + tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1+1], quantptr[DCTSIZE*1+1]); + + tmp1 = tmp4 + tmp5; + tmp3 = tmp4 - tmp5; + + /* Pass 2: process 2 rows, store into output array. */ + + /* Row 0 */ + outptr = output_buf[0] + output_col; + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp0 + tmp1, 3) & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp0 - tmp1, 3) & RANGE_MASK]; + + /* Row 1 */ + outptr = output_buf[1] + output_col; + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp2 + tmp3, 3) & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2 - tmp3, 3) & RANGE_MASK]; +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 1x1 output block. + * + * We hardly need an inverse DCT routine for this: just take the + * average pixel value, which is one-eighth of the DC coefficient. + */ + +GLOBAL(void) +jpeg_idct_1x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + int dcval; + ISLOW_MULT_TYPE * quantptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + SHIFT_TEMPS + + /* 1x1 is trivial: just take the DC coefficient divided by 8. */ + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + dcval = DEQUANTIZE(coef_block[0], quantptr[0]); + dcval = (int) DESCALE((INT32) dcval, 3); + + output_buf[0][output_col] = range_limit[dcval & RANGE_MASK]; +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 9x9 output block. + * + * Optimized algorithm with 10 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/18). + */ + +GLOBAL(void) +jpeg_idct_9x9 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*9]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */ + tmp1 = tmp0 + tmp3; + tmp2 = tmp0 - tmp3 - tmp3; + + tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */ + tmp11 = tmp2 + tmp0; + tmp14 = tmp2 - tmp0 - tmp0; + + tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */ + tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */ + tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */ + + tmp10 = tmp1 + tmp0 - tmp3; + tmp12 = tmp1 - tmp0 + tmp2; + tmp13 = tmp1 - tmp2 + tmp3; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */ + + tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */ + tmp0 = tmp2 + tmp3 - z2; + tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */ + tmp2 += z2 - tmp1; + tmp3 += z2 + tmp1; + tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp14, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 9 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 9; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[4]; + z3 = (INT32) wsptr[6]; + + tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */ + tmp1 = tmp0 + tmp3; + tmp2 = tmp0 - tmp3 - tmp3; + + tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */ + tmp11 = tmp2 + tmp0; + tmp14 = tmp2 - tmp0 - tmp0; + + tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */ + tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */ + tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */ + + tmp10 = tmp1 + tmp0 - tmp3; + tmp12 = tmp1 - tmp0 + tmp2; + tmp13 = tmp1 - tmp2 + tmp3; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */ + + tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */ + tmp0 = tmp2 + tmp3 - z2; + tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */ + tmp2 += z2 - tmp1; + tmp3 += z2 + tmp1; + tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 10x10 output block. + * + * Optimized algorithm with 12 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/20). + */ + +GLOBAL(void) +jpeg_idct_10x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24; + INT32 z1, z2, z3, z4, z5; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*10]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z3 += ONE << (CONST_BITS-PASS1_BITS-1); + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ + z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ + tmp10 = z3 + z1; + tmp11 = z3 - z2; + + tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */ + CONST_BITS-PASS1_BITS); + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ + tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ + tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ + + tmp20 = tmp10 + tmp12; + tmp24 = tmp10 - tmp12; + tmp21 = tmp11 + tmp13; + tmp23 = tmp11 - tmp13; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = z2 + z4; + tmp13 = z2 - z4; + + tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ + z5 = z3 << CONST_BITS; + + z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ + z4 = z5 + tmp12; + + tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ + tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ + + z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ + z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1)); + + tmp12 = (z1 - tmp13 - z3) << PASS1_BITS; + + tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ + tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) (tmp22 + tmp12); + wsptr[8*7] = (int) (tmp22 - tmp12); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 10 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 10; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 <<= CONST_BITS; + z4 = (INT32) wsptr[4]; + z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ + z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ + tmp10 = z3 + z1; + tmp11 = z3 - z2; + + tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */ + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[6]; + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ + tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ + tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ + + tmp20 = tmp10 + tmp12; + tmp24 = tmp10 - tmp12; + tmp21 = tmp11 + tmp13; + tmp23 = tmp11 - tmp13; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z3 <<= CONST_BITS; + z4 = (INT32) wsptr[7]; + + tmp11 = z2 + z4; + tmp13 = z2 - z4; + + tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ + + z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ + z4 = z3 + tmp12; + + tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ + tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ + + z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ + z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1)); + + tmp12 = ((z1 - tmp13) << CONST_BITS) - z3; + + tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ + tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 11x11 output block. + * + * Optimized algorithm with 24 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/22). + */ + +GLOBAL(void) +jpeg_idct_11x11 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*11]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp10 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp10 += ONE << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ + tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */ + z4 = z1 + z3; + tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */ + z4 -= z2; + tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */ + tmp21 = tmp20 + tmp23 + tmp25 - + MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */ + tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */ + tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */ + tmp24 += tmp25; + tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */ + tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */ + MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */ + tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = z1 + z2; + tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */ + tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */ + tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */ + z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */ + tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */ + tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */ + z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */ + tmp11 += z1; + tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */ + tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */ + MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */ + MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 11 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 11; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp10 <<= CONST_BITS; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[4]; + z3 = (INT32) wsptr[6]; + + tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ + tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */ + z4 = z1 + z3; + tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */ + z4 -= z2; + tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */ + tmp21 = tmp20 + tmp23 + tmp25 - + MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */ + tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */ + tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */ + tmp24 += tmp25; + tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */ + tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */ + MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */ + tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = z1 + z2; + tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */ + tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */ + tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */ + z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */ + tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */ + tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */ + z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */ + tmp11 += z1; + tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */ + tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */ + MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */ + MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 12x12 output block. + * + * Optimized algorithm with 15 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/24). + */ + +GLOBAL(void) +jpeg_idct_12x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*12]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z3 += ONE << (CONST_BITS-PASS1_BITS-1); + + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ + z1 <<= CONST_BITS; + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + z2 <<= CONST_BITS; + + tmp12 = z1 - z2; + + tmp21 = z3 + tmp12; + tmp24 = z3 - tmp12; + + tmp12 = z4 + z2; + + tmp20 = tmp10 + tmp12; + tmp25 = tmp10 - tmp12; + + tmp12 = z4 - z1 - z2; + + tmp22 = tmp11 + tmp12; + tmp23 = tmp11 - tmp12; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ + tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */ + + tmp10 = z1 + z3; + tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ + tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ + tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ + tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */ + tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ + tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ + tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ + MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ + + z1 -= z4; + z2 -= z3; + z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ + tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ + tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 12 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 12; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 <<= CONST_BITS; + + z4 = (INT32) wsptr[4]; + z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + z1 = (INT32) wsptr[2]; + z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ + z1 <<= CONST_BITS; + z2 = (INT32) wsptr[6]; + z2 <<= CONST_BITS; + + tmp12 = z1 - z2; + + tmp21 = z3 + tmp12; + tmp24 = z3 - tmp12; + + tmp12 = z4 + z2; + + tmp20 = tmp10 + tmp12; + tmp25 = tmp10 - tmp12; + + tmp12 = z4 - z1 - z2; + + tmp22 = tmp11 + tmp12; + tmp23 = tmp11 - tmp12; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ + tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */ + + tmp10 = z1 + z3; + tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ + tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ + tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ + tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */ + tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ + tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ + tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ + MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ + + z1 -= z4; + z2 -= z3; + z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ + tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ + tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 13x13 output block. + * + * Optimized algorithm with 29 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/26). + */ + +GLOBAL(void) +jpeg_idct_13x13 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*13]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z1 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */ + + tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */ + tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */ + + tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */ + + tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */ + tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */ + + tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */ + + tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */ + tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */ + + tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */ + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */ + tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */ + tmp15 = z1 + z4; + tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */ + tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */ + tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */ + tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */ + tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */ + tmp11 += tmp14; + tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */ + tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */ + tmp12 += tmp14; + tmp13 += tmp14; + tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */ + tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */ + MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */ + z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */ + tmp14 += z1; + tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */ + MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*12] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp26, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 13 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 13; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z1 <<= CONST_BITS; + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[4]; + z4 = (INT32) wsptr[6]; + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */ + + tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */ + tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */ + + tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */ + + tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */ + tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */ + + tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */ + tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */ + + tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */ + tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */ + + tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */ + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */ + tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */ + tmp15 = z1 + z4; + tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */ + tmp10 = tmp11 + tmp12 + tmp13 - + MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */ + tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */ + tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */ + tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */ + tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */ + tmp11 += tmp14; + tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */ + tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */ + tmp12 += tmp14; + tmp13 += tmp14; + tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */ + tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */ + MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */ + z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */ + tmp14 += z1; + tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */ + MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 14x14 output block. + * + * Optimized algorithm with 20 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/28). + */ + +GLOBAL(void) +jpeg_idct_14x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*14]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z1 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ + z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ + z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ + + tmp10 = z1 + z2; + tmp11 = z1 + z3; + tmp12 = z1 - z4; + + tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */ + CONST_BITS-PASS1_BITS); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ + + tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ + tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ + tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ + MULTIPLY(z2, FIX(1.378756276)); /* c2 */ + + tmp20 = tmp10 + tmp13; + tmp26 = tmp10 - tmp13; + tmp21 = tmp11 + tmp14; + tmp25 = tmp11 - tmp14; + tmp22 = tmp12 + tmp15; + tmp24 = tmp12 - tmp15; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + tmp13 = z4 << CONST_BITS; + + tmp14 = z1 + z3; + tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ + tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ + tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ + tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ + tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ + z1 -= z2; + tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */ + tmp16 += tmp15; + z1 += z4; + z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */ + tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ + tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ + z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ + tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ + tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ + + tmp13 = (z1 - z3) << PASS1_BITS; + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) (tmp23 + tmp13); + wsptr[8*10] = (int) (tmp23 - tmp13); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 14 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 14; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z1 <<= CONST_BITS; + z4 = (INT32) wsptr[4]; + z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ + z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ + z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ + + tmp10 = z1 + z2; + tmp11 = z1 + z3; + tmp12 = z1 - z4; + + tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */ + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[6]; + + z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ + + tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ + tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ + tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ + MULTIPLY(z2, FIX(1.378756276)); /* c2 */ + + tmp20 = tmp10 + tmp13; + tmp26 = tmp10 - tmp13; + tmp21 = tmp11 + tmp14; + tmp25 = tmp11 - tmp14; + tmp22 = tmp12 + tmp15; + tmp24 = tmp12 - tmp15; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + z4 <<= CONST_BITS; + + tmp14 = z1 + z3; + tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ + tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ + tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ + tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ + tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ + z1 -= z2; + tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */ + tmp16 += tmp15; + tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */ + tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ + tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ + tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ + tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ + tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ + + tmp13 = ((z1 - z3) << CONST_BITS) + z4; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 15x15 output block. + * + * Optimized algorithm with 22 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/30). + */ + +GLOBAL(void) +jpeg_idct_15x15 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*15]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z1 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */ + tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */ + + tmp12 = z1 - tmp10; + tmp13 = z1 + tmp11; + z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */ + + z4 = z2 - z3; + z3 += z2; + tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */ + z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */ + + tmp20 = tmp13 + tmp10 + tmp11; + tmp23 = tmp12 - tmp10 + tmp11 + z2; + + tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */ + + tmp25 = tmp13 - tmp10 - tmp11; + tmp26 = tmp12 + tmp10 - tmp11 - z2; + + tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */ + + tmp21 = tmp12 + tmp10 + tmp11; + tmp24 = tmp13 - tmp10 + tmp11; + tmp11 += tmp11; + tmp22 = z1 + tmp11; /* c10 = c6-c12 */ + tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */ + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z4 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */ + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp13 = z2 - z4; + tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */ + tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */ + tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */ + + tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */ + tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */ + z2 = z1 - z4; + tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */ + + tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */ + tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */ + tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */ + z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */ + tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */ + tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*14] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*13] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*12] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp27, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 15 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 15; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z1 <<= CONST_BITS; + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[4]; + z4 = (INT32) wsptr[6]; + + tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */ + tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */ + + tmp12 = z1 - tmp10; + tmp13 = z1 + tmp11; + z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */ + + z4 = z2 - z3; + z3 += z2; + tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */ + z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */ + + tmp20 = tmp13 + tmp10 + tmp11; + tmp23 = tmp12 - tmp10 + tmp11 + z2; + + tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */ + + tmp25 = tmp13 - tmp10 - tmp11; + tmp26 = tmp12 + tmp10 - tmp11 - z2; + + tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */ + tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */ + + tmp21 = tmp12 + tmp10 + tmp11; + tmp24 = tmp13 - tmp10 + tmp11; + tmp11 += tmp11; + tmp22 = z1 + tmp11; /* c10 = c6-c12 */ + tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */ + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z4 = (INT32) wsptr[5]; + z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */ + z4 = (INT32) wsptr[7]; + + tmp13 = z2 - z4; + tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */ + tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */ + tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */ + + tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */ + tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */ + z2 = z1 - z4; + tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */ + + tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */ + tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */ + tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */ + z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */ + tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */ + tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 16x16 output block. + * + * Optimized algorithm with 28 multiplications in the 1-D kernel. + * cK represents sqrt(2) * cos(K*pi/32). + */ + +GLOBAL(void) +jpeg_idct_16x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*16]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += 1 << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ + tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + tmp12 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + z3 = z1 - z2; + z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ + z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ + + tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ + tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ + tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ + tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ + + tmp20 = tmp10 + tmp0; + tmp27 = tmp10 - tmp0; + tmp21 = tmp12 + tmp1; + tmp26 = tmp12 - tmp1; + tmp22 = tmp13 + tmp2; + tmp25 = tmp13 - tmp2; + tmp23 = tmp11 + tmp3; + tmp24 = tmp11 - tmp3; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = z1 + z3; + + tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ + tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ + tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ + tmp13 = tmp10 + tmp11 + tmp12 - + MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ + z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ + tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ + tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ + z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ + tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ + tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ + z2 += z4; + z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */ + tmp1 += z1; + tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ + z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */ + tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ + tmp12 += z2; + z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */ + tmp2 += z2; + tmp3 += z2; + z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ + tmp10 += z2; + tmp11 += z2; + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 16 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 16; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + + z1 = (INT32) wsptr[4]; + tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ + tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + tmp12 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[6]; + z3 = z1 - z2; + z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ + z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ + + tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ + tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ + tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ + tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ + + tmp20 = tmp10 + tmp0; + tmp27 = tmp10 - tmp0; + tmp21 = tmp12 + tmp1; + tmp26 = tmp12 - tmp1; + tmp22 = tmp13 + tmp2; + tmp25 = tmp13 - tmp2; + tmp23 = tmp11 + tmp3; + tmp24 = tmp11 - tmp3; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = z1 + z3; + + tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ + tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ + tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ + tmp13 = tmp10 + tmp11 + tmp12 - + MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ + z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ + tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ + tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ + z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ + tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ + tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ + z2 += z4; + z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */ + tmp1 += z1; + tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ + z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */ + tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ + tmp12 += z2; + z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */ + tmp2 += z2; + tmp3 += z2; + z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ + tmp10 += z2; + tmp11 += z2; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 16x8 output block. + * + * 8-point IDCT in pass 1 (columns), 16-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_16x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*8]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = DCTSIZE; ctr > 0; ctr--) { + /* Due to quantization, we will usually find that many of the input + * coefficients are zero, especially the AC terms. We can exploit this + * by short-circuiting the IDCT calculation for any column in which all + * the AC terms are zero. In that case each output is equal to the + * DC coefficient (with scale factor as needed). + * With typical images and quantization tables, half or more of the + * column DCT calculations can be simplified this way. + */ + + if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && + inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && + inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && + inptr[DCTSIZE*7] == 0) { + /* AC terms all zero */ + int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS; + + wsptr[DCTSIZE*0] = dcval; + wsptr[DCTSIZE*1] = dcval; + wsptr[DCTSIZE*2] = dcval; + wsptr[DCTSIZE*3] = dcval; + wsptr[DCTSIZE*4] = dcval; + wsptr[DCTSIZE*5] = dcval; + wsptr[DCTSIZE*6] = dcval; + wsptr[DCTSIZE*7] = dcval; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + continue; + } + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z2 <<= CONST_BITS; + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z2 += ONE << (CONST_BITS-PASS1_BITS-1); + + tmp0 = z2 + z3; + tmp1 = z2 - z3; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS); + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + } + + /* Pass 2: process 8 rows from work array, store into output array. + * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32). + */ + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + + z1 = (INT32) wsptr[4]; + tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ + tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + tmp12 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[6]; + z3 = z1 - z2; + z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ + z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ + + tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ + tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ + tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ + tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ + + tmp20 = tmp10 + tmp0; + tmp27 = tmp10 - tmp0; + tmp21 = tmp12 + tmp1; + tmp26 = tmp12 - tmp1; + tmp22 = tmp13 + tmp2; + tmp25 = tmp13 - tmp2; + tmp23 = tmp11 + tmp3; + tmp24 = tmp11 - tmp3; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = z1 + z3; + + tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ + tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ + tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ + tmp13 = tmp10 + tmp11 + tmp12 - + MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ + z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ + tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ + tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ + z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ + tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ + tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ + z2 += z4; + z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */ + tmp1 += z1; + tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ + z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */ + tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ + tmp12 += z2; + z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */ + tmp2 += z2; + tmp3 += z2; + z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ + tmp10 += z2; + tmp11 += z2; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 14x7 output block. + * + * 7-point IDCT in pass 1 (columns), 14-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_14x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*7]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp23 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp23 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp23 += ONE << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ + tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ + tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ + tmp10 = z1 + z3; + z2 -= tmp10; + tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */ + tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ + tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ + tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + + tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp10 = tmp11 - tmp12; + tmp11 += tmp12; + tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */ + tmp11 += tmp12; + z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ + tmp10 += z2; + tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 7 rows from work array, store into output array. + * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28). + */ + wsptr = workspace; + for (ctr = 0; ctr < 7; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z1 <<= CONST_BITS; + z4 = (INT32) wsptr[4]; + z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ + z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ + z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ + + tmp10 = z1 + z2; + tmp11 = z1 + z3; + tmp12 = z1 - z4; + + tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */ + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[6]; + + z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ + + tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ + tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ + tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ + MULTIPLY(z2, FIX(1.378756276)); /* c2 */ + + tmp20 = tmp10 + tmp13; + tmp26 = tmp10 - tmp13; + tmp21 = tmp11 + tmp14; + tmp25 = tmp11 - tmp14; + tmp22 = tmp12 + tmp15; + tmp24 = tmp12 - tmp15; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + z4 <<= CONST_BITS; + + tmp14 = z1 + z3; + tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ + tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ + tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ + tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ + tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ + z1 -= z2; + tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */ + tmp16 += tmp15; + tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */ + tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ + tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ + tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ + tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ + tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ + + tmp13 = ((z1 - z3) << CONST_BITS) + z4; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 12x6 output block. + * + * 6-point IDCT in pass 1 (columns), 12-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_12x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*6]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp10 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp10 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp12 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */ + tmp11 = tmp10 + tmp20; + tmp21 = RIGHT_SHIFT(tmp10 - tmp20 - tmp20, CONST_BITS-PASS1_BITS); + tmp20 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */ + tmp20 = tmp11 + tmp10; + tmp22 = tmp11 - tmp10; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp10 = tmp11 + ((z1 + z2) << CONST_BITS); + tmp12 = tmp11 + ((z3 - z2) << CONST_BITS); + tmp11 = (z1 - z2 - z3) << PASS1_BITS; + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) (tmp21 + tmp11); + wsptr[8*4] = (int) (tmp21 - tmp11); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 6 rows from work array, store into output array. + * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24). + */ + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 <<= CONST_BITS; + + z4 = (INT32) wsptr[4]; + z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + z1 = (INT32) wsptr[2]; + z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ + z1 <<= CONST_BITS; + z2 = (INT32) wsptr[6]; + z2 <<= CONST_BITS; + + tmp12 = z1 - z2; + + tmp21 = z3 + tmp12; + tmp24 = z3 - tmp12; + + tmp12 = z4 + z2; + + tmp20 = tmp10 + tmp12; + tmp25 = tmp10 - tmp12; + + tmp12 = z4 - z1 - z2; + + tmp22 = tmp11 + tmp12; + tmp23 = tmp11 - tmp12; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z4 = (INT32) wsptr[7]; + + tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ + tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */ + + tmp10 = z1 + z3; + tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ + tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ + tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ + tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */ + tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ + tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ + tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ + MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ + + z1 -= z4; + z2 -= z3; + z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ + tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ + tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 10x5 output block. + * + * 5-point IDCT in pass 1 (columns), 10-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_10x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*5]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp12 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp12 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp13 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp14 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */ + z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */ + z3 = tmp12 + z2; + tmp10 = z3 + z1; + tmp11 = z3 - z1; + tmp12 -= z2 << 2; + + /* Odd part */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ + tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ + tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp10 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp11 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 5 rows from work array, store into output array. + * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20). + */ + wsptr = workspace; + for (ctr = 0; ctr < 5; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 <<= CONST_BITS; + z4 = (INT32) wsptr[4]; + z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ + z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ + tmp10 = z3 + z1; + tmp11 = z3 - z2; + + tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */ + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[6]; + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ + tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ + tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ + + tmp20 = tmp10 + tmp12; + tmp24 = tmp10 - tmp12; + tmp21 = tmp11 + tmp13; + tmp23 = tmp11 - tmp13; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + z3 <<= CONST_BITS; + z4 = (INT32) wsptr[7]; + + tmp11 = z2 + z4; + tmp13 = z2 - z4; + + tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ + + z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ + z4 = z3 + tmp12; + + tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ + tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ + + z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ + z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1)); + + tmp12 = ((z1 - tmp13) << CONST_BITS) - z3; + + tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ + tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 8; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 8x4 output block. + * + * 4-point IDCT in pass 1 (columns), 8-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_8x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*4]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 4-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + + tmp10 = (tmp0 + tmp2) << PASS1_BITS; + tmp12 = (tmp0 - tmp2) << PASS1_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */ + CONST_BITS-PASS1_BITS); + tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */ + CONST_BITS-PASS1_BITS); + + /* Final output stage */ + + wsptr[8*0] = (int) (tmp10 + tmp0); + wsptr[8*3] = (int) (tmp10 - tmp0); + wsptr[8*1] = (int) (tmp12 + tmp2); + wsptr[8*2] = (int) (tmp12 - tmp2); + } + + /* Pass 2: process rows from work array, store into output array. */ + /* Note that we must descale the results by a factor of 8 == 2**3, */ + /* and also undo the PASS1_BITS scaling. */ + + wsptr = workspace; + for (ctr = 0; ctr < 4; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[6]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + /* Add fudge factor here for final descale. */ + z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 = (INT32) wsptr[4]; + + tmp0 = (z2 + z3) << CONST_BITS; + tmp1 = (z2 - z3) << CONST_BITS; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = (INT32) wsptr[7]; + tmp1 = (INT32) wsptr[5]; + tmp2 = (INT32) wsptr[3]; + tmp3 = (INT32) wsptr[1]; + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += DCTSIZE; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 6x3 output block. + * + * 3-point IDCT in pass 1 (columns), 6-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_6x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[6*3]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ + tmp10 = tmp0 + tmp12; + tmp2 = tmp0 - tmp12 - tmp12; + + /* Odd part */ + + tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ + + /* Final output stage */ + + wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[6*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[6*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 3 rows from work array, store into output array. + * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12). + */ + wsptr = workspace; + for (ctr = 0; ctr < 3; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + tmp2 = (INT32) wsptr[4]; + tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ + tmp1 = tmp0 + tmp10; + tmp11 = tmp0 - tmp10 - tmp10; + tmp10 = (INT32) wsptr[2]; + tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ + tmp10 = tmp1 + tmp0; + tmp12 = tmp1 - tmp0; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp0 = tmp1 + ((z1 + z2) << CONST_BITS); + tmp2 = tmp1 + ((z3 - z2) << CONST_BITS); + tmp1 = (z1 - z2 - z3) << CONST_BITS; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 6; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 4x2 output block. + * + * 2-point IDCT in pass 1 (columns), 4-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_4x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp2, tmp10, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + INT32 * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + INT32 workspace[4*2]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + + /* Odd part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + + /* Final output stage */ + + wsptr[4*0] = tmp10 + tmp0; + wsptr[4*1] = tmp10 - tmp0; + } + + /* Pass 2: process 2 rows from work array, store into output array. + * 4-point IDCT kernel, + * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT]. + */ + wsptr = workspace; + for (ctr = 0; ctr < 2; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = wsptr[0] + (ONE << 2); + tmp2 = wsptr[2]; + + tmp10 = (tmp0 + tmp2) << CONST_BITS; + tmp12 = (tmp0 - tmp2) << CONST_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = wsptr[1]; + z3 = wsptr[3]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */ + tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+3) + & RANGE_MASK]; + + wsptr += 4; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 2x1 output block. + * + * 1-point IDCT in pass 1 (columns), 2-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_2x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp10; + ISLOW_MULT_TYPE * quantptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + SHIFT_TEMPS + + /* Pass 1: empty. */ + + /* Pass 2: process 1 row from input, store into output array. */ + + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + outptr = output_buf[0] + output_col; + + /* Even part */ + + tmp10 = DEQUANTIZE(coef_block[0], quantptr[0]); + /* Add fudge factor here for final descale. */ + tmp10 += ONE << 2; + + /* Odd part */ + + tmp0 = DEQUANTIZE(coef_block[1], quantptr[1]); + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, 3) & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, 3) & RANGE_MASK]; +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 8x16 output block. + * + * 16-point IDCT in pass 1 (columns), 8-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_8x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[8*16]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + + z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ + tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ + + tmp10 = tmp0 + tmp1; + tmp11 = tmp0 - tmp1; + tmp12 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + z3 = z1 - z2; + z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ + z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ + + tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ + tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ + tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ + tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ + + tmp20 = tmp10 + tmp0; + tmp27 = tmp10 - tmp0; + tmp21 = tmp12 + tmp1; + tmp26 = tmp12 - tmp1; + tmp22 = tmp13 + tmp2; + tmp25 = tmp13 - tmp2; + tmp23 = tmp11 + tmp3; + tmp24 = tmp11 - tmp3; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = z1 + z3; + + tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ + tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ + tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ + tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ + tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ + tmp0 = tmp1 + tmp2 + tmp3 - + MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ + tmp13 = tmp10 + tmp11 + tmp12 - + MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ + z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ + tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ + tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ + z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ + tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ + tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ + z2 += z4; + z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */ + tmp1 += z1; + tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ + z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */ + tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ + tmp12 += z2; + z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */ + tmp2 += z2; + tmp3 += z2; + z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ + tmp10 += z2; + tmp11 += z2; + + /* Final output stage */ + + wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process rows from work array, store into output array. */ + /* Note that we must descale the results by a factor of 8 == 2**3, */ + /* and also undo the PASS1_BITS scaling. */ + + wsptr = workspace; + for (ctr = 0; ctr < 16; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = (INT32) wsptr[2]; + z3 = (INT32) wsptr[6]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + /* Add fudge factor here for final descale. */ + z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + z3 = (INT32) wsptr[4]; + + tmp0 = (z2 + z3) << CONST_BITS; + tmp1 = (z2 - z3) << CONST_BITS; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = (INT32) wsptr[7]; + tmp1 = (INT32) wsptr[5]; + tmp2 = (INT32) wsptr[3]; + tmp3 = (INT32) wsptr[1]; + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += DCTSIZE; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 7x14 output block. + * + * 14-point IDCT in pass 1 (columns), 7-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_7x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[7*14]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z1 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z1 += ONE << (CONST_BITS-PASS1_BITS-1); + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ + z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ + z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ + + tmp10 = z1 + z2; + tmp11 = z1 + z3; + tmp12 = z1 - z4; + + tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */ + CONST_BITS-PASS1_BITS); + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ + + tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ + tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ + tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ + MULTIPLY(z2, FIX(1.378756276)); /* c2 */ + + tmp20 = tmp10 + tmp13; + tmp26 = tmp10 - tmp13; + tmp21 = tmp11 + tmp14; + tmp25 = tmp11 - tmp14; + tmp22 = tmp12 + tmp15; + tmp24 = tmp12 - tmp15; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + tmp13 = z4 << CONST_BITS; + + tmp14 = z1 + z3; + tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ + tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ + tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ + tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ + tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ + z1 -= z2; + tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */ + tmp16 += tmp15; + z1 += z4; + z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */ + tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ + tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ + z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ + tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ + tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ + + tmp13 = (z1 - z3) << PASS1_BITS; + + /* Final output stage */ + + wsptr[7*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[7*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[7*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[7*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[7*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[7*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[7*3] = (int) (tmp23 + tmp13); + wsptr[7*10] = (int) (tmp23 - tmp13); + wsptr[7*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[7*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[7*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[7*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + wsptr[7*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS); + wsptr[7*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 14 rows from work array, store into output array. + * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14). + */ + wsptr = workspace; + for (ctr = 0; ctr < 14; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp23 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp23 <<= CONST_BITS; + + z1 = (INT32) wsptr[2]; + z2 = (INT32) wsptr[4]; + z3 = (INT32) wsptr[6]; + + tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ + tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ + tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ + tmp10 = z1 + z3; + z2 -= tmp10; + tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */ + tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ + tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ + tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + + tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ + tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ + tmp10 = tmp11 - tmp12; + tmp11 += tmp12; + tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */ + tmp11 += tmp12; + z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ + tmp10 += z2; + tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 7; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 6x12 output block. + * + * 12-point IDCT in pass 1 (columns), 6-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_6x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; + INT32 z1, z2, z3, z4; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[6*12]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z3 += ONE << (CONST_BITS-PASS1_BITS-1); + + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ + + tmp10 = z3 + z4; + tmp11 = z3 - z4; + + z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ + z1 <<= CONST_BITS; + z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + z2 <<= CONST_BITS; + + tmp12 = z1 - z2; + + tmp21 = z3 + tmp12; + tmp24 = z3 - tmp12; + + tmp12 = z4 + z2; + + tmp20 = tmp10 + tmp12; + tmp25 = tmp10 - tmp12; + + tmp12 = z4 - z1 - z2; + + tmp22 = tmp11 + tmp12; + tmp23 = tmp11 - tmp12; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ + tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */ + + tmp10 = z1 + z3; + tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ + tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ + tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ + tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */ + tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ + tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ + tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ + MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ + + z1 -= z4; + z2 -= z3; + z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ + tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ + tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ + + /* Final output stage */ + + wsptr[6*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[6*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[6*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[6*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[6*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS); + wsptr[6*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS); + wsptr[6*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[6*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[6*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[6*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + wsptr[6*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS); + wsptr[6*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 12 rows from work array, store into output array. + * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12). + */ + wsptr = workspace; + for (ctr = 0; ctr < 12; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp10 <<= CONST_BITS; + tmp12 = (INT32) wsptr[4]; + tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */ + tmp11 = tmp10 + tmp20; + tmp21 = tmp10 - tmp20 - tmp20; + tmp20 = (INT32) wsptr[2]; + tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */ + tmp20 = tmp11 + tmp10; + tmp22 = tmp11 - tmp10; + + /* Odd part */ + + z1 = (INT32) wsptr[1]; + z2 = (INT32) wsptr[3]; + z3 = (INT32) wsptr[5]; + tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp10 = tmp11 + ((z1 + z2) << CONST_BITS); + tmp12 = tmp11 + ((z3 - z2) << CONST_BITS); + tmp11 = (z1 - z2 - z3) << CONST_BITS; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 6; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 5x10 output block. + * + * 10-point IDCT in pass 1 (columns), 5-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_5x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp10, tmp11, tmp12, tmp13, tmp14; + INT32 tmp20, tmp21, tmp22, tmp23, tmp24; + INT32 z1, z2, z3, z4, z5; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[5*10]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z3 += ONE << (CONST_BITS-PASS1_BITS-1); + z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ + z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ + tmp10 = z3 + z1; + tmp11 = z3 - z2; + + tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */ + CONST_BITS-PASS1_BITS); + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ + tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ + tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ + + tmp20 = tmp10 + tmp12; + tmp24 = tmp10 - tmp12; + tmp21 = tmp11 + tmp13; + tmp23 = tmp11 - tmp13; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + + tmp11 = z2 + z4; + tmp13 = z2 - z4; + + tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ + z5 = z3 << CONST_BITS; + + z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ + z4 = z5 + tmp12; + + tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ + tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ + + z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ + z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1)); + + tmp12 = (z1 - tmp13 - z3) << PASS1_BITS; + + tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ + tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ + + /* Final output stage */ + + wsptr[5*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS); + wsptr[5*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS); + wsptr[5*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS); + wsptr[5*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS); + wsptr[5*2] = (int) (tmp22 + tmp12); + wsptr[5*7] = (int) (tmp22 - tmp12); + wsptr[5*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS); + wsptr[5*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS); + wsptr[5*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS); + wsptr[5*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 10 rows from work array, store into output array. + * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10). + */ + wsptr = workspace; + for (ctr = 0; ctr < 10; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp12 <<= CONST_BITS; + tmp13 = (INT32) wsptr[2]; + tmp14 = (INT32) wsptr[4]; + z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */ + z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */ + z3 = tmp12 + z2; + tmp10 = z3 + z1; + tmp11 = z3 - z1; + tmp12 -= z2 << 2; + + /* Odd part */ + + z2 = (INT32) wsptr[1]; + z3 = (INT32) wsptr[3]; + + z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ + tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ + tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp13, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp14, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 5; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 4x8 output block. + * + * 8-point IDCT in pass 1 (columns), 4-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_4x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp3; + INT32 tmp10, tmp11, tmp12, tmp13; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[4*8]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. */ + /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ + /* furthermore, we scale the results by 2**PASS1_BITS. */ + + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 4; ctr > 0; ctr--) { + /* Due to quantization, we will usually find that many of the input + * coefficients are zero, especially the AC terms. We can exploit this + * by short-circuiting the IDCT calculation for any column in which all + * the AC terms are zero. In that case each output is equal to the + * DC coefficient (with scale factor as needed). + * With typical images and quantization tables, half or more of the + * column DCT calculations can be simplified this way. + */ + + if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && + inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && + inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && + inptr[DCTSIZE*7] == 0) { + /* AC terms all zero */ + int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS; + + wsptr[4*0] = dcval; + wsptr[4*1] = dcval; + wsptr[4*2] = dcval; + wsptr[4*3] = dcval; + wsptr[4*4] = dcval; + wsptr[4*5] = dcval; + wsptr[4*6] = dcval; + wsptr[4*7] = dcval; + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + continue; + } + + /* Even part: reverse the even part of the forward DCT. */ + /* The rotator is sqrt(2)*c(-6). */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); + tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865); + tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); + + z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + z2 <<= CONST_BITS; + z3 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + z2 += ONE << (CONST_BITS-PASS1_BITS-1); + + tmp0 = z2 + z3; + tmp1 = z2 - z3; + + tmp10 = tmp0 + tmp2; + tmp13 = tmp0 - tmp2; + tmp11 = tmp1 + tmp3; + tmp12 = tmp1 - tmp3; + + /* Odd part per figure 8; the matrix is unitary and hence its + * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. + */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); + tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + + z2 = tmp0 + tmp2; + z3 = tmp1 + tmp3; + + z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */ + z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ + z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ + z2 += z1; + z3 += z1; + + z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ + tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ + tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ + tmp0 += z1 + z2; + tmp3 += z1 + z3; + + z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ + tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ + tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ + tmp1 += z1 + z3; + tmp2 += z1 + z2; + + /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ + + wsptr[4*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS); + wsptr[4*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS); + wsptr[4*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[4*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS); + wsptr[4*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS); + wsptr[4*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS); + wsptr[4*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[4*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS); + + inptr++; /* advance pointers to next column */ + quantptr++; + wsptr++; + } + + /* Pass 2: process 8 rows from work array, store into output array. + * 4-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16). + */ + wsptr = workspace; + for (ctr = 0; ctr < 8; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp2 = (INT32) wsptr[2]; + + tmp10 = (tmp0 + tmp2) << CONST_BITS; + tmp12 = (tmp0 - tmp2) << CONST_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = (INT32) wsptr[1]; + z3 = (INT32) wsptr[3]; + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */ + tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 4; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a reduced-size 3x6 output block. + * + * 6-point IDCT in pass 1 (columns), 3-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_3x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + int * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + int workspace[3*6]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12). + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp0 <<= CONST_BITS; + /* Add fudge factor here for final descale. */ + tmp0 += ONE << (CONST_BITS-PASS1_BITS-1); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); + tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ + tmp1 = tmp0 + tmp10; + tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS); + tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ + tmp10 = tmp1 + tmp0; + tmp12 = tmp1 - tmp0; + + /* Odd part */ + + z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); + tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ + tmp0 = tmp1 + ((z1 + z2) << CONST_BITS); + tmp2 = tmp1 + ((z3 - z2) << CONST_BITS); + tmp1 = (z1 - z2 - z3) << PASS1_BITS; + + /* Final output stage */ + + wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS); + wsptr[3*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS); + wsptr[3*1] = (int) (tmp11 + tmp1); + wsptr[3*4] = (int) (tmp11 - tmp1); + wsptr[3*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS); + wsptr[3*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS); + } + + /* Pass 2: process 6 rows from work array, store into output array. + * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6). + */ + wsptr = workspace; + for (ctr = 0; ctr < 6; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2)); + tmp0 <<= CONST_BITS; + tmp2 = (INT32) wsptr[2]; + tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ + tmp10 = tmp0 + tmp12; + tmp2 = tmp0 - tmp12 - tmp12; + + /* Odd part */ + + tmp12 = (INT32) wsptr[1]; + tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2, + CONST_BITS+PASS1_BITS+3) + & RANGE_MASK]; + + wsptr += 3; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 2x4 output block. + * + * 4-point IDCT in pass 1 (columns), 2-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_2x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp2, tmp10, tmp12; + INT32 z1, z2, z3; + JCOEFPTR inptr; + ISLOW_MULT_TYPE * quantptr; + INT32 * wsptr; + JSAMPROW outptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + int ctr; + INT32 workspace[2*4]; /* buffers data between passes */ + SHIFT_TEMPS + + /* Pass 1: process columns from input, store into work array. + * 4-point IDCT kernel, + * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT]. + */ + inptr = coef_block; + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + wsptr = workspace; + for (ctr = 0; ctr < 2; ctr++, inptr++, quantptr++, wsptr++) { + /* Even part */ + + tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); + tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); + + tmp10 = (tmp0 + tmp2) << CONST_BITS; + tmp12 = (tmp0 - tmp2) << CONST_BITS; + + /* Odd part */ + /* Same rotation as in the even part of the 8x8 LL&M IDCT */ + + z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); + z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); + + z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */ + tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */ + tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */ + + /* Final output stage */ + + wsptr[2*0] = tmp10 + tmp0; + wsptr[2*3] = tmp10 - tmp0; + wsptr[2*1] = tmp12 + tmp2; + wsptr[2*2] = tmp12 - tmp2; + } + + /* Pass 2: process 4 rows from work array, store into output array. */ + + wsptr = workspace; + for (ctr = 0; ctr < 4; ctr++) { + outptr = output_buf[ctr] + output_col; + + /* Even part */ + + /* Add fudge factor here for final descale. */ + tmp10 = wsptr[0] + (ONE << (CONST_BITS+2)); + + /* Odd part */ + + tmp0 = wsptr[1]; + + /* Final output stage */ + + outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS+3) + & RANGE_MASK]; + outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS+3) + & RANGE_MASK]; + + wsptr += 2; /* advance pointer to next row */ + } +} + + +/* + * Perform dequantization and inverse DCT on one block of coefficients, + * producing a 1x2 output block. + * + * 2-point IDCT in pass 1 (columns), 1-point in pass 2 (rows). + */ + +GLOBAL(void) +jpeg_idct_1x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, + JSAMPARRAY output_buf, JDIMENSION output_col) +{ + INT32 tmp0, tmp10; + ISLOW_MULT_TYPE * quantptr; + JSAMPLE *range_limit = IDCT_range_limit(cinfo); + SHIFT_TEMPS + + /* Process 1 column from input, store into output array. */ + + quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table; + + /* Even part */ + + tmp10 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]); + /* Add fudge factor here for final descale. */ + tmp10 += ONE << 2; + + /* Odd part */ + + tmp0 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]); + + /* Final output stage */ + + output_buf[0][output_col] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, 3) + & RANGE_MASK]; + output_buf[1][output_col] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, 3) + & RANGE_MASK]; +} + +#endif /* IDCT_SCALING_SUPPORTED */ +#endif /* DCT_ISLOW_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemmgr.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemmgr.c new file mode 100644 index 000000000..f0e83fb95 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemmgr.c @@ -0,0 +1,1119 @@ +/* + * jmemmgr.c + * + * Copyright (C) 1991-1997, Thomas G. Lane. + * Modified 2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains the JPEG system-independent memory management + * routines. This code is usable across a wide variety of machines; most + * of the system dependencies have been isolated in a separate file. + * The major functions provided here are: + * * pool-based allocation and freeing of memory; + * * policy decisions about how to divide available memory among the + * virtual arrays; + * * control logic for swapping virtual arrays between main memory and + * backing storage. + * The separate system-dependent file provides the actual backing-storage + * access code, and it contains the policy decision about how much total + * main memory to use. + * This file is system-dependent in the sense that some of its functions + * are unnecessary in some systems. For example, if there is enough virtual + * memory so that backing storage will never be used, much of the virtual + * array control logic could be removed. (Of course, if you have that much + * memory then you shouldn't care about a little bit of unused code...) + */ + +#define JPEG_INTERNALS +#define AM_MEMORY_MANAGER /* we define jvirt_Xarray_control structs */ +#include "jinclude.h" +#include "jpeglib.h" +#include "jmemsys.h" /* import the system-dependent declarations */ + +#ifndef NO_GETENV +#ifndef HAVE_STDLIB_H /* should declare getenv() */ +extern char * getenv JPP((const char * name)); +#endif +#endif + + +/* + * Some important notes: + * The allocation routines provided here must never return NULL. + * They should exit to error_exit if unsuccessful. + * + * It's not a good idea to try to merge the sarray and barray routines, + * even though they are textually almost the same, because samples are + * usually stored as bytes while coefficients are shorts or ints. Thus, + * in machines where byte pointers have a different representation from + * word pointers, the resulting machine code could not be the same. + */ + + +/* + * Many machines require storage alignment: longs must start on 4-byte + * boundaries, doubles on 8-byte boundaries, etc. On such machines, malloc() + * always returns pointers that are multiples of the worst-case alignment + * requirement, and we had better do so too. + * There isn't any really portable way to determine the worst-case alignment + * requirement. This module assumes that the alignment requirement is + * multiples of sizeof(ALIGN_TYPE). + * By default, we define ALIGN_TYPE as double. This is necessary on some + * workstations (where doubles really do need 8-byte alignment) and will work + * fine on nearly everything. If your machine has lesser alignment needs, + * you can save a few bytes by making ALIGN_TYPE smaller. + * The only place I know of where this will NOT work is certain Macintosh + * 680x0 compilers that define double as a 10-byte IEEE extended float. + * Doing 10-byte alignment is counterproductive because longwords won't be + * aligned well. Put "#define ALIGN_TYPE long" in jconfig.h if you have + * such a compiler. + */ + +#ifndef ALIGN_TYPE /* so can override from jconfig.h */ +#define ALIGN_TYPE double +#endif + + +/* + * We allocate objects from "pools", where each pool is gotten with a single + * request to jpeg_get_small() or jpeg_get_large(). There is no per-object + * overhead within a pool, except for alignment padding. Each pool has a + * header with a link to the next pool of the same class. + * Small and large pool headers are identical except that the latter's + * link pointer must be FAR on 80x86 machines. + * Notice that the "real" header fields are union'ed with a dummy ALIGN_TYPE + * field. This forces the compiler to make SIZEOF(small_pool_hdr) a multiple + * of the alignment requirement of ALIGN_TYPE. + */ + +typedef union small_pool_struct * small_pool_ptr; + +typedef union small_pool_struct { + struct { + small_pool_ptr next; /* next in list of pools */ + size_t bytes_used; /* how many bytes already used within pool */ + size_t bytes_left; /* bytes still available in this pool */ + } hdr; + ALIGN_TYPE dummy; /* included in union to ensure alignment */ +} small_pool_hdr; + +typedef union large_pool_struct FAR * large_pool_ptr; + +typedef union large_pool_struct { + struct { + large_pool_ptr next; /* next in list of pools */ + size_t bytes_used; /* how many bytes already used within pool */ + size_t bytes_left; /* bytes still available in this pool */ + } hdr; + ALIGN_TYPE dummy; /* included in union to ensure alignment */ +} large_pool_hdr; + + +/* + * Here is the full definition of a memory manager object. + */ + +typedef struct { + struct jpeg_memory_mgr pub; /* public fields */ + + /* Each pool identifier (lifetime class) names a linked list of pools. */ + small_pool_ptr small_list[JPOOL_NUMPOOLS]; + large_pool_ptr large_list[JPOOL_NUMPOOLS]; + + /* Since we only have one lifetime class of virtual arrays, only one + * linked list is necessary (for each datatype). Note that the virtual + * array control blocks being linked together are actually stored somewhere + * in the small-pool list. + */ + jvirt_sarray_ptr virt_sarray_list; + jvirt_barray_ptr virt_barray_list; + + /* This counts total space obtained from jpeg_get_small/large */ + long total_space_allocated; + + /* alloc_sarray and alloc_barray set this value for use by virtual + * array routines. + */ + JDIMENSION last_rowsperchunk; /* from most recent alloc_sarray/barray */ +} my_memory_mgr; + +typedef my_memory_mgr * my_mem_ptr; + + +/* + * The control blocks for virtual arrays. + * Note that these blocks are allocated in the "small" pool area. + * System-dependent info for the associated backing store (if any) is hidden + * inside the backing_store_info struct. + */ + +struct jvirt_sarray_control { + JSAMPARRAY mem_buffer; /* => the in-memory buffer */ + JDIMENSION rows_in_array; /* total virtual array height */ + JDIMENSION samplesperrow; /* width of array (and of memory buffer) */ + JDIMENSION maxaccess; /* max rows accessed by access_virt_sarray */ + JDIMENSION rows_in_mem; /* height of memory buffer */ + JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */ + JDIMENSION cur_start_row; /* first logical row # in the buffer */ + JDIMENSION first_undef_row; /* row # of first uninitialized row */ + boolean pre_zero; /* pre-zero mode requested? */ + boolean dirty; /* do current buffer contents need written? */ + boolean b_s_open; /* is backing-store data valid? */ + jvirt_sarray_ptr next; /* link to next virtual sarray control block */ + backing_store_info b_s_info; /* System-dependent control info */ +}; + +struct jvirt_barray_control { + JBLOCKARRAY mem_buffer; /* => the in-memory buffer */ + JDIMENSION rows_in_array; /* total virtual array height */ + JDIMENSION blocksperrow; /* width of array (and of memory buffer) */ + JDIMENSION maxaccess; /* max rows accessed by access_virt_barray */ + JDIMENSION rows_in_mem; /* height of memory buffer */ + JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */ + JDIMENSION cur_start_row; /* first logical row # in the buffer */ + JDIMENSION first_undef_row; /* row # of first uninitialized row */ + boolean pre_zero; /* pre-zero mode requested? */ + boolean dirty; /* do current buffer contents need written? */ + boolean b_s_open; /* is backing-store data valid? */ + jvirt_barray_ptr next; /* link to next virtual barray control block */ + backing_store_info b_s_info; /* System-dependent control info */ +}; + + +#ifdef MEM_STATS /* optional extra stuff for statistics */ + +LOCAL(void) +print_mem_stats (j_common_ptr cinfo, int pool_id) +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + small_pool_ptr shdr_ptr; + large_pool_ptr lhdr_ptr; + + /* Since this is only a debugging stub, we can cheat a little by using + * fprintf directly rather than going through the trace message code. + * This is helpful because message parm array can't handle longs. + */ + fprintf(stderr, "Freeing pool %d, total space = %ld\n", + pool_id, mem->total_space_allocated); + + for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL; + lhdr_ptr = lhdr_ptr->hdr.next) { + fprintf(stderr, " Large chunk used %ld\n", + (long) lhdr_ptr->hdr.bytes_used); + } + + for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL; + shdr_ptr = shdr_ptr->hdr.next) { + fprintf(stderr, " Small chunk used %ld free %ld\n", + (long) shdr_ptr->hdr.bytes_used, + (long) shdr_ptr->hdr.bytes_left); + } +} + +#endif /* MEM_STATS */ + + +LOCAL(void) +out_of_memory (j_common_ptr cinfo, int which) +/* Report an out-of-memory error and stop execution */ +/* If we compiled MEM_STATS support, report alloc requests before dying */ +{ +#ifdef MEM_STATS + cinfo->err->trace_level = 2; /* force self_destruct to report stats */ +#endif + ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, which); +} + + +/* + * Allocation of "small" objects. + * + * For these, we use pooled storage. When a new pool must be created, + * we try to get enough space for the current request plus a "slop" factor, + * where the slop will be the amount of leftover space in the new pool. + * The speed vs. space tradeoff is largely determined by the slop values. + * A different slop value is provided for each pool class (lifetime), + * and we also distinguish the first pool of a class from later ones. + * NOTE: the values given work fairly well on both 16- and 32-bit-int + * machines, but may be too small if longs are 64 bits or more. + */ + +static const size_t first_pool_slop[JPOOL_NUMPOOLS] = +{ + 1600, /* first PERMANENT pool */ + 16000 /* first IMAGE pool */ +}; + +static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = +{ + 0, /* additional PERMANENT pools */ + 5000 /* additional IMAGE pools */ +}; + +#define MIN_SLOP 50 /* greater than 0 to avoid futile looping */ + + +METHODDEF(void *) +alloc_small (j_common_ptr cinfo, int pool_id, size_t sizeofobject) +/* Allocate a "small" object */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + small_pool_ptr hdr_ptr, prev_hdr_ptr; + char * data_ptr; + size_t odd_bytes, min_request, slop; + + /* Check for unsatisfiable request (do now to ensure no overflow below) */ + if (sizeofobject > (size_t) (MAX_ALLOC_CHUNK-SIZEOF(small_pool_hdr))) + out_of_memory(cinfo, 1); /* request exceeds malloc's ability */ + + /* Round up the requested size to a multiple of SIZEOF(ALIGN_TYPE) */ + odd_bytes = sizeofobject % SIZEOF(ALIGN_TYPE); + if (odd_bytes > 0) + sizeofobject += SIZEOF(ALIGN_TYPE) - odd_bytes; + + /* See if space is available in any existing pool */ + if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) + ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ + prev_hdr_ptr = NULL; + hdr_ptr = mem->small_list[pool_id]; + while (hdr_ptr != NULL) { + if (hdr_ptr->hdr.bytes_left >= sizeofobject) + break; /* found pool with enough space */ + prev_hdr_ptr = hdr_ptr; + hdr_ptr = hdr_ptr->hdr.next; + } + + /* Time to make a new pool? */ + if (hdr_ptr == NULL) { + /* min_request is what we need now, slop is what will be leftover */ + min_request = sizeofobject + SIZEOF(small_pool_hdr); + if (prev_hdr_ptr == NULL) /* first pool in class? */ + slop = first_pool_slop[pool_id]; + else + slop = extra_pool_slop[pool_id]; + /* Don't ask for more than MAX_ALLOC_CHUNK */ + if (slop > (size_t) (MAX_ALLOC_CHUNK-min_request)) + slop = (size_t) (MAX_ALLOC_CHUNK-min_request); + /* Try to get space, if fail reduce slop and try again */ + for (;;) { + hdr_ptr = (small_pool_ptr) jpeg_get_small(cinfo, min_request + slop); + if (hdr_ptr != NULL) + break; + slop /= 2; + if (slop < MIN_SLOP) /* give up when it gets real small */ + out_of_memory(cinfo, 2); /* jpeg_get_small failed */ + } + mem->total_space_allocated += min_request + slop; + /* Success, initialize the new pool header and add to end of list */ + hdr_ptr->hdr.next = NULL; + hdr_ptr->hdr.bytes_used = 0; + hdr_ptr->hdr.bytes_left = sizeofobject + slop; + if (prev_hdr_ptr == NULL) /* first pool in class? */ + mem->small_list[pool_id] = hdr_ptr; + else + prev_hdr_ptr->hdr.next = hdr_ptr; + } + + /* OK, allocate the object from the current pool */ + data_ptr = (char *) (hdr_ptr + 1); /* point to first data byte in pool */ + data_ptr += hdr_ptr->hdr.bytes_used; /* point to place for object */ + hdr_ptr->hdr.bytes_used += sizeofobject; + hdr_ptr->hdr.bytes_left -= sizeofobject; + + return (void *) data_ptr; +} + + +/* + * Allocation of "large" objects. + * + * The external semantics of these are the same as "small" objects, + * except that FAR pointers are used on 80x86. However the pool + * management heuristics are quite different. We assume that each + * request is large enough that it may as well be passed directly to + * jpeg_get_large; the pool management just links everything together + * so that we can free it all on demand. + * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY + * structures. The routines that create these structures (see below) + * deliberately bunch rows together to ensure a large request size. + */ + +METHODDEF(void FAR *) +alloc_large (j_common_ptr cinfo, int pool_id, size_t sizeofobject) +/* Allocate a "large" object */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + large_pool_ptr hdr_ptr; + size_t odd_bytes; + + /* Check for unsatisfiable request (do now to ensure no overflow below) */ + if (sizeofobject > (size_t) (MAX_ALLOC_CHUNK-SIZEOF(large_pool_hdr))) + out_of_memory(cinfo, 3); /* request exceeds malloc's ability */ + + /* Round up the requested size to a multiple of SIZEOF(ALIGN_TYPE) */ + odd_bytes = sizeofobject % SIZEOF(ALIGN_TYPE); + if (odd_bytes > 0) + sizeofobject += SIZEOF(ALIGN_TYPE) - odd_bytes; + + /* Always make a new pool */ + if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) + ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ + + hdr_ptr = (large_pool_ptr) jpeg_get_large(cinfo, sizeofobject + + SIZEOF(large_pool_hdr)); + if (hdr_ptr == NULL) + out_of_memory(cinfo, 4); /* jpeg_get_large failed */ + mem->total_space_allocated += sizeofobject + SIZEOF(large_pool_hdr); + + /* Success, initialize the new pool header and add to list */ + hdr_ptr->hdr.next = mem->large_list[pool_id]; + /* We maintain space counts in each pool header for statistical purposes, + * even though they are not needed for allocation. + */ + hdr_ptr->hdr.bytes_used = sizeofobject; + hdr_ptr->hdr.bytes_left = 0; + mem->large_list[pool_id] = hdr_ptr; + + return (void FAR *) (hdr_ptr + 1); /* point to first data byte in pool */ +} + + +/* + * Creation of 2-D sample arrays. + * The pointers are in near heap, the samples themselves in FAR heap. + * + * To minimize allocation overhead and to allow I/O of large contiguous + * blocks, we allocate the sample rows in groups of as many rows as possible + * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request. + * NB: the virtual array control routines, later in this file, know about + * this chunking of rows. The rowsperchunk value is left in the mem manager + * object so that it can be saved away if this sarray is the workspace for + * a virtual array. + */ + +METHODDEF(JSAMPARRAY) +alloc_sarray (j_common_ptr cinfo, int pool_id, + JDIMENSION samplesperrow, JDIMENSION numrows) +/* Allocate a 2-D sample array */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + JSAMPARRAY result; + JSAMPROW workspace; + JDIMENSION rowsperchunk, currow, i; + long ltemp; + + /* Calculate max # of rows allowed in one allocation chunk */ + ltemp = (MAX_ALLOC_CHUNK-SIZEOF(large_pool_hdr)) / + ((long) samplesperrow * SIZEOF(JSAMPLE)); + if (ltemp <= 0) + ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); + if (ltemp < (long) numrows) + rowsperchunk = (JDIMENSION) ltemp; + else + rowsperchunk = numrows; + mem->last_rowsperchunk = rowsperchunk; + + /* Get space for row pointers (small object) */ + result = (JSAMPARRAY) alloc_small(cinfo, pool_id, + (size_t) (numrows * SIZEOF(JSAMPROW))); + + /* Get the rows themselves (large objects) */ + currow = 0; + while (currow < numrows) { + rowsperchunk = MIN(rowsperchunk, numrows - currow); + workspace = (JSAMPROW) alloc_large(cinfo, pool_id, + (size_t) ((size_t) rowsperchunk * (size_t) samplesperrow + * SIZEOF(JSAMPLE))); + for (i = rowsperchunk; i > 0; i--) { + result[currow++] = workspace; + workspace += samplesperrow; + } + } + + return result; +} + + +/* + * Creation of 2-D coefficient-block arrays. + * This is essentially the same as the code for sample arrays, above. + */ + +METHODDEF(JBLOCKARRAY) +alloc_barray (j_common_ptr cinfo, int pool_id, + JDIMENSION blocksperrow, JDIMENSION numrows) +/* Allocate a 2-D coefficient-block array */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + JBLOCKARRAY result; + JBLOCKROW workspace; + JDIMENSION rowsperchunk, currow, i; + long ltemp; + + /* Calculate max # of rows allowed in one allocation chunk */ + ltemp = (MAX_ALLOC_CHUNK-SIZEOF(large_pool_hdr)) / + ((long) blocksperrow * SIZEOF(JBLOCK)); + if (ltemp <= 0) + ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); + if (ltemp < (long) numrows) + rowsperchunk = (JDIMENSION) ltemp; + else + rowsperchunk = numrows; + mem->last_rowsperchunk = rowsperchunk; + + /* Get space for row pointers (small object) */ + result = (JBLOCKARRAY) alloc_small(cinfo, pool_id, + (size_t) (numrows * SIZEOF(JBLOCKROW))); + + /* Get the rows themselves (large objects) */ + currow = 0; + while (currow < numrows) { + rowsperchunk = MIN(rowsperchunk, numrows - currow); + workspace = (JBLOCKROW) alloc_large(cinfo, pool_id, + (size_t) ((size_t) rowsperchunk * (size_t) blocksperrow + * SIZEOF(JBLOCK))); + for (i = rowsperchunk; i > 0; i--) { + result[currow++] = workspace; + workspace += blocksperrow; + } + } + + return result; +} + + +/* + * About virtual array management: + * + * The above "normal" array routines are only used to allocate strip buffers + * (as wide as the image, but just a few rows high). Full-image-sized buffers + * are handled as "virtual" arrays. The array is still accessed a strip at a + * time, but the memory manager must save the whole array for repeated + * accesses. The intended implementation is that there is a strip buffer in + * memory (as high as is possible given the desired memory limit), plus a + * backing file that holds the rest of the array. + * + * The request_virt_array routines are told the total size of the image and + * the maximum number of rows that will be accessed at once. The in-memory + * buffer must be at least as large as the maxaccess value. + * + * The request routines create control blocks but not the in-memory buffers. + * That is postponed until realize_virt_arrays is called. At that time the + * total amount of space needed is known (approximately, anyway), so free + * memory can be divided up fairly. + * + * The access_virt_array routines are responsible for making a specific strip + * area accessible (after reading or writing the backing file, if necessary). + * Note that the access routines are told whether the caller intends to modify + * the accessed strip; during a read-only pass this saves having to rewrite + * data to disk. The access routines are also responsible for pre-zeroing + * any newly accessed rows, if pre-zeroing was requested. + * + * In current usage, the access requests are usually for nonoverlapping + * strips; that is, successive access start_row numbers differ by exactly + * num_rows = maxaccess. This means we can get good performance with simple + * buffer dump/reload logic, by making the in-memory buffer be a multiple + * of the access height; then there will never be accesses across bufferload + * boundaries. The code will still work with overlapping access requests, + * but it doesn't handle bufferload overlaps very efficiently. + */ + + +METHODDEF(jvirt_sarray_ptr) +request_virt_sarray (j_common_ptr cinfo, int pool_id, boolean pre_zero, + JDIMENSION samplesperrow, JDIMENSION numrows, + JDIMENSION maxaccess) +/* Request a virtual 2-D sample array */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + jvirt_sarray_ptr result; + + /* Only IMAGE-lifetime virtual arrays are currently supported */ + if (pool_id != JPOOL_IMAGE) + ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ + + /* get control block */ + result = (jvirt_sarray_ptr) alloc_small(cinfo, pool_id, + SIZEOF(struct jvirt_sarray_control)); + + result->mem_buffer = NULL; /* marks array not yet realized */ + result->rows_in_array = numrows; + result->samplesperrow = samplesperrow; + result->maxaccess = maxaccess; + result->pre_zero = pre_zero; + result->b_s_open = FALSE; /* no associated backing-store object */ + result->next = mem->virt_sarray_list; /* add to list of virtual arrays */ + mem->virt_sarray_list = result; + + return result; +} + + +METHODDEF(jvirt_barray_ptr) +request_virt_barray (j_common_ptr cinfo, int pool_id, boolean pre_zero, + JDIMENSION blocksperrow, JDIMENSION numrows, + JDIMENSION maxaccess) +/* Request a virtual 2-D coefficient-block array */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + jvirt_barray_ptr result; + + /* Only IMAGE-lifetime virtual arrays are currently supported */ + if (pool_id != JPOOL_IMAGE) + ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ + + /* get control block */ + result = (jvirt_barray_ptr) alloc_small(cinfo, pool_id, + SIZEOF(struct jvirt_barray_control)); + + result->mem_buffer = NULL; /* marks array not yet realized */ + result->rows_in_array = numrows; + result->blocksperrow = blocksperrow; + result->maxaccess = maxaccess; + result->pre_zero = pre_zero; + result->b_s_open = FALSE; /* no associated backing-store object */ + result->next = mem->virt_barray_list; /* add to list of virtual arrays */ + mem->virt_barray_list = result; + + return result; +} + + +METHODDEF(void) +realize_virt_arrays (j_common_ptr cinfo) +/* Allocate the in-memory buffers for any unrealized virtual arrays */ +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + long space_per_minheight, maximum_space, avail_mem; + long minheights, max_minheights; + jvirt_sarray_ptr sptr; + jvirt_barray_ptr bptr; + + /* Compute the minimum space needed (maxaccess rows in each buffer) + * and the maximum space needed (full image height in each buffer). + * These may be of use to the system-dependent jpeg_mem_available routine. + */ + space_per_minheight = 0; + maximum_space = 0; + for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { + if (sptr->mem_buffer == NULL) { /* if not realized yet */ + space_per_minheight += (long) sptr->maxaccess * + (long) sptr->samplesperrow * SIZEOF(JSAMPLE); + maximum_space += (long) sptr->rows_in_array * + (long) sptr->samplesperrow * SIZEOF(JSAMPLE); + } + } + for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { + if (bptr->mem_buffer == NULL) { /* if not realized yet */ + space_per_minheight += (long) bptr->maxaccess * + (long) bptr->blocksperrow * SIZEOF(JBLOCK); + maximum_space += (long) bptr->rows_in_array * + (long) bptr->blocksperrow * SIZEOF(JBLOCK); + } + } + + if (space_per_minheight <= 0) + return; /* no unrealized arrays, no work */ + + /* Determine amount of memory to actually use; this is system-dependent. */ + avail_mem = jpeg_mem_available(cinfo, space_per_minheight, maximum_space, + mem->total_space_allocated); + + /* If the maximum space needed is available, make all the buffers full + * height; otherwise parcel it out with the same number of minheights + * in each buffer. + */ + if (avail_mem >= maximum_space) + max_minheights = 1000000000L; + else { + max_minheights = avail_mem / space_per_minheight; + /* If there doesn't seem to be enough space, try to get the minimum + * anyway. This allows a "stub" implementation of jpeg_mem_available(). + */ + if (max_minheights <= 0) + max_minheights = 1; + } + + /* Allocate the in-memory buffers and initialize backing store as needed. */ + + for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { + if (sptr->mem_buffer == NULL) { /* if not realized yet */ + minheights = ((long) sptr->rows_in_array - 1L) / sptr->maxaccess + 1L; + if (minheights <= max_minheights) { + /* This buffer fits in memory */ + sptr->rows_in_mem = sptr->rows_in_array; + } else { + /* It doesn't fit in memory, create backing store. */ + sptr->rows_in_mem = (JDIMENSION) (max_minheights * sptr->maxaccess); + jpeg_open_backing_store(cinfo, & sptr->b_s_info, + (long) sptr->rows_in_array * + (long) sptr->samplesperrow * + (long) SIZEOF(JSAMPLE)); + sptr->b_s_open = TRUE; + } + sptr->mem_buffer = alloc_sarray(cinfo, JPOOL_IMAGE, + sptr->samplesperrow, sptr->rows_in_mem); + sptr->rowsperchunk = mem->last_rowsperchunk; + sptr->cur_start_row = 0; + sptr->first_undef_row = 0; + sptr->dirty = FALSE; + } + } + + for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { + if (bptr->mem_buffer == NULL) { /* if not realized yet */ + minheights = ((long) bptr->rows_in_array - 1L) / bptr->maxaccess + 1L; + if (minheights <= max_minheights) { + /* This buffer fits in memory */ + bptr->rows_in_mem = bptr->rows_in_array; + } else { + /* It doesn't fit in memory, create backing store. */ + bptr->rows_in_mem = (JDIMENSION) (max_minheights * bptr->maxaccess); + jpeg_open_backing_store(cinfo, & bptr->b_s_info, + (long) bptr->rows_in_array * + (long) bptr->blocksperrow * + (long) SIZEOF(JBLOCK)); + bptr->b_s_open = TRUE; + } + bptr->mem_buffer = alloc_barray(cinfo, JPOOL_IMAGE, + bptr->blocksperrow, bptr->rows_in_mem); + bptr->rowsperchunk = mem->last_rowsperchunk; + bptr->cur_start_row = 0; + bptr->first_undef_row = 0; + bptr->dirty = FALSE; + } + } +} + + +LOCAL(void) +do_sarray_io (j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing) +/* Do backing store read or write of a virtual sample array */ +{ + long bytesperrow, file_offset, byte_count, rows, thisrow, i; + + bytesperrow = (long) ptr->samplesperrow * SIZEOF(JSAMPLE); + file_offset = ptr->cur_start_row * bytesperrow; + /* Loop to read or write each allocation chunk in mem_buffer */ + for (i = 0; i < (long) ptr->rows_in_mem; i += ptr->rowsperchunk) { + /* One chunk, but check for short chunk at end of buffer */ + rows = MIN((long) ptr->rowsperchunk, (long) ptr->rows_in_mem - i); + /* Transfer no more than is currently defined */ + thisrow = (long) ptr->cur_start_row + i; + rows = MIN(rows, (long) ptr->first_undef_row - thisrow); + /* Transfer no more than fits in file */ + rows = MIN(rows, (long) ptr->rows_in_array - thisrow); + if (rows <= 0) /* this chunk might be past end of file! */ + break; + byte_count = rows * bytesperrow; + if (writing) + (*ptr->b_s_info.write_backing_store) (cinfo, & ptr->b_s_info, + (void FAR *) ptr->mem_buffer[i], + file_offset, byte_count); + else + (*ptr->b_s_info.read_backing_store) (cinfo, & ptr->b_s_info, + (void FAR *) ptr->mem_buffer[i], + file_offset, byte_count); + file_offset += byte_count; + } +} + + +LOCAL(void) +do_barray_io (j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing) +/* Do backing store read or write of a virtual coefficient-block array */ +{ + long bytesperrow, file_offset, byte_count, rows, thisrow, i; + + bytesperrow = (long) ptr->blocksperrow * SIZEOF(JBLOCK); + file_offset = ptr->cur_start_row * bytesperrow; + /* Loop to read or write each allocation chunk in mem_buffer */ + for (i = 0; i < (long) ptr->rows_in_mem; i += ptr->rowsperchunk) { + /* One chunk, but check for short chunk at end of buffer */ + rows = MIN((long) ptr->rowsperchunk, (long) ptr->rows_in_mem - i); + /* Transfer no more than is currently defined */ + thisrow = (long) ptr->cur_start_row + i; + rows = MIN(rows, (long) ptr->first_undef_row - thisrow); + /* Transfer no more than fits in file */ + rows = MIN(rows, (long) ptr->rows_in_array - thisrow); + if (rows <= 0) /* this chunk might be past end of file! */ + break; + byte_count = rows * bytesperrow; + if (writing) + (*ptr->b_s_info.write_backing_store) (cinfo, & ptr->b_s_info, + (void FAR *) ptr->mem_buffer[i], + file_offset, byte_count); + else + (*ptr->b_s_info.read_backing_store) (cinfo, & ptr->b_s_info, + (void FAR *) ptr->mem_buffer[i], + file_offset, byte_count); + file_offset += byte_count; + } +} + + +METHODDEF(JSAMPARRAY) +access_virt_sarray (j_common_ptr cinfo, jvirt_sarray_ptr ptr, + JDIMENSION start_row, JDIMENSION num_rows, + boolean writable) +/* Access the part of a virtual sample array starting at start_row */ +/* and extending for num_rows rows. writable is true if */ +/* caller intends to modify the accessed area. */ +{ + JDIMENSION end_row = start_row + num_rows; + JDIMENSION undef_row; + + /* debugging check */ + if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || + ptr->mem_buffer == NULL) + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + + /* Make the desired part of the virtual array accessible */ + if (start_row < ptr->cur_start_row || + end_row > ptr->cur_start_row+ptr->rows_in_mem) { + if (! ptr->b_s_open) + ERREXIT(cinfo, JERR_VIRTUAL_BUG); + /* Flush old buffer contents if necessary */ + if (ptr->dirty) { + do_sarray_io(cinfo, ptr, TRUE); + ptr->dirty = FALSE; + } + /* Decide what part of virtual array to access. + * Algorithm: if target address > current window, assume forward scan, + * load starting at target address. If target address < current window, + * assume backward scan, load so that target area is top of window. + * Note that when switching from forward write to forward read, will have + * start_row = 0, so the limiting case applies and we load from 0 anyway. + */ + if (start_row > ptr->cur_start_row) { + ptr->cur_start_row = start_row; + } else { + /* use long arithmetic here to avoid overflow & unsigned problems */ + long ltemp; + + ltemp = (long) end_row - (long) ptr->rows_in_mem; + if (ltemp < 0) + ltemp = 0; /* don't fall off front end of file */ + ptr->cur_start_row = (JDIMENSION) ltemp; + } + /* Read in the selected part of the array. + * During the initial write pass, we will do no actual read + * because the selected part is all undefined. + */ + do_sarray_io(cinfo, ptr, FALSE); + } + /* Ensure the accessed part of the array is defined; prezero if needed. + * To improve locality of access, we only prezero the part of the array + * that the caller is about to access, not the entire in-memory array. + */ + if (ptr->first_undef_row < end_row) { + if (ptr->first_undef_row < start_row) { + if (writable) /* writer skipped over a section of array */ + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + undef_row = start_row; /* but reader is allowed to read ahead */ + } else { + undef_row = ptr->first_undef_row; + } + if (writable) + ptr->first_undef_row = end_row; + if (ptr->pre_zero) { + size_t bytesperrow = (size_t) ptr->samplesperrow * SIZEOF(JSAMPLE); + undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ + end_row -= ptr->cur_start_row; + while (undef_row < end_row) { + FMEMZERO((void FAR *) ptr->mem_buffer[undef_row], bytesperrow); + undef_row++; + } + } else { + if (! writable) /* reader looking at undefined data */ + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + } + } + /* Flag the buffer dirty if caller will write in it */ + if (writable) + ptr->dirty = TRUE; + /* Return address of proper part of the buffer */ + return ptr->mem_buffer + (start_row - ptr->cur_start_row); +} + + +METHODDEF(JBLOCKARRAY) +access_virt_barray (j_common_ptr cinfo, jvirt_barray_ptr ptr, + JDIMENSION start_row, JDIMENSION num_rows, + boolean writable) +/* Access the part of a virtual block array starting at start_row */ +/* and extending for num_rows rows. writable is true if */ +/* caller intends to modify the accessed area. */ +{ + JDIMENSION end_row = start_row + num_rows; + JDIMENSION undef_row; + + /* debugging check */ + if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || + ptr->mem_buffer == NULL) + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + + /* Make the desired part of the virtual array accessible */ + if (start_row < ptr->cur_start_row || + end_row > ptr->cur_start_row+ptr->rows_in_mem) { + if (! ptr->b_s_open) + ERREXIT(cinfo, JERR_VIRTUAL_BUG); + /* Flush old buffer contents if necessary */ + if (ptr->dirty) { + do_barray_io(cinfo, ptr, TRUE); + ptr->dirty = FALSE; + } + /* Decide what part of virtual array to access. + * Algorithm: if target address > current window, assume forward scan, + * load starting at target address. If target address < current window, + * assume backward scan, load so that target area is top of window. + * Note that when switching from forward write to forward read, will have + * start_row = 0, so the limiting case applies and we load from 0 anyway. + */ + if (start_row > ptr->cur_start_row) { + ptr->cur_start_row = start_row; + } else { + /* use long arithmetic here to avoid overflow & unsigned problems */ + long ltemp; + + ltemp = (long) end_row - (long) ptr->rows_in_mem; + if (ltemp < 0) + ltemp = 0; /* don't fall off front end of file */ + ptr->cur_start_row = (JDIMENSION) ltemp; + } + /* Read in the selected part of the array. + * During the initial write pass, we will do no actual read + * because the selected part is all undefined. + */ + do_barray_io(cinfo, ptr, FALSE); + } + /* Ensure the accessed part of the array is defined; prezero if needed. + * To improve locality of access, we only prezero the part of the array + * that the caller is about to access, not the entire in-memory array. + */ + if (ptr->first_undef_row < end_row) { + if (ptr->first_undef_row < start_row) { + if (writable) /* writer skipped over a section of array */ + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + undef_row = start_row; /* but reader is allowed to read ahead */ + } else { + undef_row = ptr->first_undef_row; + } + if (writable) + ptr->first_undef_row = end_row; + if (ptr->pre_zero) { + size_t bytesperrow = (size_t) ptr->blocksperrow * SIZEOF(JBLOCK); + undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ + end_row -= ptr->cur_start_row; + while (undef_row < end_row) { + FMEMZERO((void FAR *) ptr->mem_buffer[undef_row], bytesperrow); + undef_row++; + } + } else { + if (! writable) /* reader looking at undefined data */ + ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); + } + } + /* Flag the buffer dirty if caller will write in it */ + if (writable) + ptr->dirty = TRUE; + /* Return address of proper part of the buffer */ + return ptr->mem_buffer + (start_row - ptr->cur_start_row); +} + + +/* + * Release all objects belonging to a specified pool. + */ + +METHODDEF(void) +free_pool (j_common_ptr cinfo, int pool_id) +{ + my_mem_ptr mem = (my_mem_ptr) cinfo->mem; + small_pool_ptr shdr_ptr; + large_pool_ptr lhdr_ptr; + size_t space_freed; + + if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) + ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ + +#ifdef MEM_STATS + if (cinfo->err->trace_level > 1) + print_mem_stats(cinfo, pool_id); /* print pool's memory usage statistics */ +#endif + + /* If freeing IMAGE pool, close any virtual arrays first */ + if (pool_id == JPOOL_IMAGE) { + jvirt_sarray_ptr sptr; + jvirt_barray_ptr bptr; + + for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { + if (sptr->b_s_open) { /* there may be no backing store */ + sptr->b_s_open = FALSE; /* prevent recursive close if error */ + (*sptr->b_s_info.close_backing_store) (cinfo, & sptr->b_s_info); + } + } + mem->virt_sarray_list = NULL; + for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { + if (bptr->b_s_open) { /* there may be no backing store */ + bptr->b_s_open = FALSE; /* prevent recursive close if error */ + (*bptr->b_s_info.close_backing_store) (cinfo, & bptr->b_s_info); + } + } + mem->virt_barray_list = NULL; + } + + /* Release large objects */ + lhdr_ptr = mem->large_list[pool_id]; + mem->large_list[pool_id] = NULL; + + while (lhdr_ptr != NULL) { + large_pool_ptr next_lhdr_ptr = lhdr_ptr->hdr.next; + space_freed = lhdr_ptr->hdr.bytes_used + + lhdr_ptr->hdr.bytes_left + + SIZEOF(large_pool_hdr); + jpeg_free_large(cinfo, (void FAR *) lhdr_ptr, space_freed); + mem->total_space_allocated -= space_freed; + lhdr_ptr = next_lhdr_ptr; + } + + /* Release small objects */ + shdr_ptr = mem->small_list[pool_id]; + mem->small_list[pool_id] = NULL; + + while (shdr_ptr != NULL) { + small_pool_ptr next_shdr_ptr = shdr_ptr->hdr.next; + space_freed = shdr_ptr->hdr.bytes_used + + shdr_ptr->hdr.bytes_left + + SIZEOF(small_pool_hdr); + jpeg_free_small(cinfo, (void *) shdr_ptr, space_freed); + mem->total_space_allocated -= space_freed; + shdr_ptr = next_shdr_ptr; + } +} + + +/* + * Close up shop entirely. + * Note that this cannot be called unless cinfo->mem is non-NULL. + */ + +METHODDEF(void) +self_destruct (j_common_ptr cinfo) +{ + int pool; + + /* Close all backing store, release all memory. + * Releasing pools in reverse order might help avoid fragmentation + * with some (brain-damaged) malloc libraries. + */ + for (pool = JPOOL_NUMPOOLS-1; pool >= JPOOL_PERMANENT; pool--) { + free_pool(cinfo, pool); + } + + /* Release the memory manager control block too. */ + jpeg_free_small(cinfo, (void *) cinfo->mem, SIZEOF(my_memory_mgr)); + cinfo->mem = NULL; /* ensures I will be called only once */ + + jpeg_mem_term(cinfo); /* system-dependent cleanup */ +} + + +/* + * Memory manager initialization. + * When this is called, only the error manager pointer is valid in cinfo! + */ + +GLOBAL(void) +jinit_memory_mgr (j_common_ptr cinfo) +{ + my_mem_ptr mem; + long max_to_use; + int pool; + size_t test_mac; + + cinfo->mem = NULL; /* for safety if init fails */ + + /* Check for configuration errors. + * SIZEOF(ALIGN_TYPE) should be a power of 2; otherwise, it probably + * doesn't reflect any real hardware alignment requirement. + * The test is a little tricky: for X>0, X and X-1 have no one-bits + * in common if and only if X is a power of 2, ie has only one one-bit. + * Some compilers may give an "unreachable code" warning here; ignore it. + */ + if ((SIZEOF(ALIGN_TYPE) & (SIZEOF(ALIGN_TYPE)-1)) != 0) + ERREXIT(cinfo, JERR_BAD_ALIGN_TYPE); + /* MAX_ALLOC_CHUNK must be representable as type size_t, and must be + * a multiple of SIZEOF(ALIGN_TYPE). + * Again, an "unreachable code" warning may be ignored here. + * But a "constant too large" warning means you need to fix MAX_ALLOC_CHUNK. + */ + test_mac = (size_t) MAX_ALLOC_CHUNK; + if ((long) test_mac != MAX_ALLOC_CHUNK || + (MAX_ALLOC_CHUNK % SIZEOF(ALIGN_TYPE)) != 0) + ERREXIT(cinfo, JERR_BAD_ALLOC_CHUNK); + + max_to_use = jpeg_mem_init(cinfo); /* system-dependent initialization */ + + /* Attempt to allocate memory manager's control block */ + mem = (my_mem_ptr) jpeg_get_small(cinfo, SIZEOF(my_memory_mgr)); + + if (mem == NULL) { + jpeg_mem_term(cinfo); /* system-dependent cleanup */ + ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 0); + } + + /* OK, fill in the method pointers */ + mem->pub.alloc_small = alloc_small; + mem->pub.alloc_large = alloc_large; + mem->pub.alloc_sarray = alloc_sarray; + mem->pub.alloc_barray = alloc_barray; + mem->pub.request_virt_sarray = request_virt_sarray; + mem->pub.request_virt_barray = request_virt_barray; + mem->pub.realize_virt_arrays = realize_virt_arrays; + mem->pub.access_virt_sarray = access_virt_sarray; + mem->pub.access_virt_barray = access_virt_barray; + mem->pub.free_pool = free_pool; + mem->pub.self_destruct = self_destruct; + + /* Make MAX_ALLOC_CHUNK accessible to other modules */ + mem->pub.max_alloc_chunk = MAX_ALLOC_CHUNK; + + /* Initialize working state */ + mem->pub.max_memory_to_use = max_to_use; + + for (pool = JPOOL_NUMPOOLS-1; pool >= JPOOL_PERMANENT; pool--) { + mem->small_list[pool] = NULL; + mem->large_list[pool] = NULL; + } + mem->virt_sarray_list = NULL; + mem->virt_barray_list = NULL; + + mem->total_space_allocated = SIZEOF(my_memory_mgr); + + /* Declare ourselves open for business */ + cinfo->mem = & mem->pub; + + /* Check for an environment variable JPEGMEM; if found, override the + * default max_memory setting from jpeg_mem_init. Note that the + * surrounding application may again override this value. + * If your system doesn't support getenv(), define NO_GETENV to disable + * this feature. + */ +#ifndef NO_GETENV + { char * memenv; + + if ((memenv = getenv("JPEGMEM")) != NULL) { + char ch = 'x'; + + if (sscanf(memenv, "%ld%c", &max_to_use, &ch) > 0) { + if (ch == 'm' || ch == 'M') + max_to_use *= 1000L; + mem->pub.max_memory_to_use = max_to_use * 1000L; + } + } + } +#endif + +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemnobs.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemnobs.c new file mode 100644 index 000000000..ffbbad315 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jmemnobs.c @@ -0,0 +1,102 @@ +/* + * jmemnobs.c + * + * Copyright (C) 1992-1996, Thomas G. Lane. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file provides a really simple implementation of the system- + * dependent portion of the JPEG memory manager. This implementation + * assumes that no backing-store files are needed: all required space + * can be obtained from malloc(). + * This is very portable in the sense that it'll compile on almost anything, + * but you'd better have lots of main memory (or virtual memory) if you want + * to process big images. + * Note that the max_memory_to_use option is ignored by this implementation. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" +#include "jmemsys.h" /* import the system-dependent declarations */ + +/* + * Memory allocation and freeing are controlled by the regular library + * routines malloc() and free(). + */ + +GLOBAL(void *) +jpeg_get_small (j_common_ptr cinfo, size_t sizeofobject) +{ + return (void *) JMALLOC(sizeofobject); +} + +GLOBAL(void) +jpeg_free_small (j_common_ptr cinfo, void * object, size_t sizeofobject) +{ + JFREE(object); +} + + +/* + * "Large" objects are treated the same as "small" ones. + * NB: although we include FAR keywords in the routine declarations, + * this file won't actually work in 80x86 small/medium model; at least, + * you probably won't be able to process useful-size images in only 64KB. + */ + +GLOBAL(void FAR *) +jpeg_get_large (j_common_ptr cinfo, size_t sizeofobject) +{ + return (void FAR *) JMALLOC(sizeofobject); +} + +GLOBAL(void) +jpeg_free_large (j_common_ptr cinfo, void FAR * object, size_t sizeofobject) +{ + JFREE(object); +} + + +/* + * This routine computes the total memory space available for allocation. + * Here we always say, "we got all you want bud!" + */ + +GLOBAL(long) +jpeg_mem_available (j_common_ptr cinfo, long min_bytes_needed, + long max_bytes_needed, long already_allocated) +{ + return max_bytes_needed; +} + + +/* + * Backing store (temporary file) management. + * Since jpeg_mem_available always promised the moon, + * this should never be called and we can just error out. + */ + +GLOBAL(void) +jpeg_open_backing_store (j_common_ptr cinfo, backing_store_ptr info, + long total_bytes_needed) +{ + ERREXIT(cinfo, JERR_NO_BACKING_STORE); +} + + +/* + * These routines take care of any system-dependent initialization and + * cleanup required. Here, there isn't any. + */ + +GLOBAL(long) +jpeg_mem_init (j_common_ptr cinfo) +{ + return 0; /* just set max_memory_to_use to 0 */ +} + +GLOBAL(void) +jpeg_mem_term (j_common_ptr cinfo) +{ +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant1.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant1.c new file mode 100644 index 000000000..9d11f7066 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant1.c @@ -0,0 +1,857 @@ +/* + * jquant1.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modified 2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains 1-pass color quantization (color mapping) routines. + * These routines provide mapping to a fixed color map using equally spaced + * color values. Optional Floyd-Steinberg or ordered dithering is available. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + +#ifdef QUANT_1PASS_SUPPORTED + + +/* + * The main purpose of 1-pass quantization is to provide a fast, if not very + * high quality, colormapped output capability. A 2-pass quantizer usually + * gives better visual quality; however, for quantized grayscale output this + * quantizer is perfectly adequate. Dithering is highly recommended with this + * quantizer, though you can turn it off if you really want to. + * + * In 1-pass quantization the colormap must be chosen in advance of seeing the + * image. We use a map consisting of all combinations of Ncolors[i] color + * values for the i'th component. The Ncolors[] values are chosen so that + * their product, the total number of colors, is no more than that requested. + * (In most cases, the product will be somewhat less.) + * + * Since the colormap is orthogonal, the representative value for each color + * component can be determined without considering the other components; + * then these indexes can be combined into a colormap index by a standard + * N-dimensional-array-subscript calculation. Most of the arithmetic involved + * can be precalculated and stored in the lookup table colorindex[]. + * colorindex[i][j] maps pixel value j in component i to the nearest + * representative value (grid plane) for that component; this index is + * multiplied by the array stride for component i, so that the + * index of the colormap entry closest to a given pixel value is just + * sum( colorindex[component-number][pixel-component-value] ) + * Aside from being fast, this scheme allows for variable spacing between + * representative values with no additional lookup cost. + * + * If gamma correction has been applied in color conversion, it might be wise + * to adjust the color grid spacing so that the representative colors are + * equidistant in linear space. At this writing, gamma correction is not + * implemented by jdcolor, so nothing is done here. + */ + + +/* Declarations for ordered dithering. + * + * We use a standard 16x16 ordered dither array. The basic concept of ordered + * dithering is described in many references, for instance Dale Schumacher's + * chapter II.2 of Graphics Gems II (James Arvo, ed. Academic Press, 1991). + * In place of Schumacher's comparisons against a "threshold" value, we add a + * "dither" value to the input pixel and then round the result to the nearest + * output value. The dither value is equivalent to (0.5 - threshold) times + * the distance between output values. For ordered dithering, we assume that + * the output colors are equally spaced; if not, results will probably be + * worse, since the dither may be too much or too little at a given point. + * + * The normal calculation would be to form pixel value + dither, range-limit + * this to 0..MAXJSAMPLE, and then index into the colorindex table as usual. + * We can skip the separate range-limiting step by extending the colorindex + * table in both directions. + */ + +#define ODITHER_SIZE 16 /* dimension of dither matrix */ +/* NB: if ODITHER_SIZE is not a power of 2, ODITHER_MASK uses will break */ +#define ODITHER_CELLS (ODITHER_SIZE*ODITHER_SIZE) /* # cells in matrix */ +#define ODITHER_MASK (ODITHER_SIZE-1) /* mask for wrapping around counters */ + +typedef int ODITHER_MATRIX[ODITHER_SIZE][ODITHER_SIZE]; +typedef int (*ODITHER_MATRIX_PTR)[ODITHER_SIZE]; + +static const UINT8 base_dither_matrix[ODITHER_SIZE][ODITHER_SIZE] = { + /* Bayer's order-4 dither array. Generated by the code given in + * Stephen Hawley's article "Ordered Dithering" in Graphics Gems I. + * The values in this array must range from 0 to ODITHER_CELLS-1. + */ + { 0,192, 48,240, 12,204, 60,252, 3,195, 51,243, 15,207, 63,255 }, + { 128, 64,176,112,140, 76,188,124,131, 67,179,115,143, 79,191,127 }, + { 32,224, 16,208, 44,236, 28,220, 35,227, 19,211, 47,239, 31,223 }, + { 160, 96,144, 80,172,108,156, 92,163, 99,147, 83,175,111,159, 95 }, + { 8,200, 56,248, 4,196, 52,244, 11,203, 59,251, 7,199, 55,247 }, + { 136, 72,184,120,132, 68,180,116,139, 75,187,123,135, 71,183,119 }, + { 40,232, 24,216, 36,228, 20,212, 43,235, 27,219, 39,231, 23,215 }, + { 168,104,152, 88,164,100,148, 84,171,107,155, 91,167,103,151, 87 }, + { 2,194, 50,242, 14,206, 62,254, 1,193, 49,241, 13,205, 61,253 }, + { 130, 66,178,114,142, 78,190,126,129, 65,177,113,141, 77,189,125 }, + { 34,226, 18,210, 46,238, 30,222, 33,225, 17,209, 45,237, 29,221 }, + { 162, 98,146, 82,174,110,158, 94,161, 97,145, 81,173,109,157, 93 }, + { 10,202, 58,250, 6,198, 54,246, 9,201, 57,249, 5,197, 53,245 }, + { 138, 74,186,122,134, 70,182,118,137, 73,185,121,133, 69,181,117 }, + { 42,234, 26,218, 38,230, 22,214, 41,233, 25,217, 37,229, 21,213 }, + { 170,106,154, 90,166,102,150, 86,169,105,153, 89,165,101,149, 85 } +}; + + +/* Declarations for Floyd-Steinberg dithering. + * + * Errors are accumulated into the array fserrors[], at a resolution of + * 1/16th of a pixel count. The error at a given pixel is propagated + * to its not-yet-processed neighbors using the standard F-S fractions, + * ... (here) 7/16 + * 3/16 5/16 1/16 + * We work left-to-right on even rows, right-to-left on odd rows. + * + * We can get away with a single array (holding one row's worth of errors) + * by using it to store the current row's errors at pixel columns not yet + * processed, but the next row's errors at columns already processed. We + * need only a few extra variables to hold the errors immediately around the + * current column. (If we are lucky, those variables are in registers, but + * even if not, they're probably cheaper to access than array elements are.) + * + * The fserrors[] array is indexed [component#][position]. + * We provide (#columns + 2) entries per component; the extra entry at each + * end saves us from special-casing the first and last pixels. + * + * Note: on a wide image, we might not have enough room in a PC's near data + * segment to hold the error array; so it is allocated with alloc_large. + */ + +#if BITS_IN_JSAMPLE == 8 +typedef INT16 FSERROR; /* 16 bits should be enough */ +typedef int LOCFSERROR; /* use 'int' for calculation temps */ +#else +typedef INT32 FSERROR; /* may need more than 16 bits */ +typedef INT32 LOCFSERROR; /* be sure calculation temps are big enough */ +#endif + +typedef FSERROR FAR *FSERRPTR; /* pointer to error array (in FAR storage!) */ + + +/* Private subobject */ + +#define MAX_Q_COMPS 4 /* max components I can handle */ + +typedef struct { + struct jpeg_color_quantizer pub; /* public fields */ + + /* Initially allocated colormap is saved here */ + JSAMPARRAY sv_colormap; /* The color map as a 2-D pixel array */ + int sv_actual; /* number of entries in use */ + + JSAMPARRAY colorindex; /* Precomputed mapping for speed */ + /* colorindex[i][j] = index of color closest to pixel value j in component i, + * premultiplied as described above. Since colormap indexes must fit into + * JSAMPLEs, the entries of this array will too. + */ + boolean is_padded; /* is the colorindex padded for odither? */ + + int Ncolors[MAX_Q_COMPS]; /* # of values alloced to each component */ + + /* Variables for ordered dithering */ + int row_index; /* cur row's vertical index in dither matrix */ + ODITHER_MATRIX_PTR odither[MAX_Q_COMPS]; /* one dither array per component */ + + /* Variables for Floyd-Steinberg dithering */ + FSERRPTR fserrors[MAX_Q_COMPS]; /* accumulated errors */ + boolean on_odd_row; /* flag to remember which row we are on */ +} my_cquantizer; + +typedef my_cquantizer * my_cquantize_ptr; + + +/* + * Policy-making subroutines for create_colormap and create_colorindex. + * These routines determine the colormap to be used. The rest of the module + * only assumes that the colormap is orthogonal. + * + * * select_ncolors decides how to divvy up the available colors + * among the components. + * * output_value defines the set of representative values for a component. + * * largest_input_value defines the mapping from input values to + * representative values for a component. + * Note that the latter two routines may impose different policies for + * different components, though this is not currently done. + */ + + +LOCAL(int) +select_ncolors (j_decompress_ptr cinfo, int Ncolors[]) +/* Determine allocation of desired colors to components, */ +/* and fill in Ncolors[] array to indicate choice. */ +/* Return value is total number of colors (product of Ncolors[] values). */ +{ + int nc = cinfo->out_color_components; /* number of color components */ + int max_colors = cinfo->desired_number_of_colors; + int total_colors, iroot, i, j; + boolean changed; + long temp; + static const int RGB_order[3] = { RGB_GREEN, RGB_RED, RGB_BLUE }; + + /* We can allocate at least the nc'th root of max_colors per component. */ + /* Compute floor(nc'th root of max_colors). */ + iroot = 1; + do { + iroot++; + temp = iroot; /* set temp = iroot ** nc */ + for (i = 1; i < nc; i++) + temp *= iroot; + } while (temp <= (long) max_colors); /* repeat till iroot exceeds root */ + iroot--; /* now iroot = floor(root) */ + + /* Must have at least 2 color values per component */ + if (iroot < 2) + ERREXIT1(cinfo, JERR_QUANT_FEW_COLORS, (int) temp); + + /* Initialize to iroot color values for each component */ + total_colors = 1; + for (i = 0; i < nc; i++) { + Ncolors[i] = iroot; + total_colors *= iroot; + } + /* We may be able to increment the count for one or more components without + * exceeding max_colors, though we know not all can be incremented. + * Sometimes, the first component can be incremented more than once! + * (Example: for 16 colors, we start at 2*2*2, go to 3*2*2, then 4*2*2.) + * In RGB colorspace, try to increment G first, then R, then B. + */ + do { + changed = FALSE; + for (i = 0; i < nc; i++) { + j = (cinfo->out_color_space == JCS_RGB ? RGB_order[i] : i); + /* calculate new total_colors if Ncolors[j] is incremented */ + temp = total_colors / Ncolors[j]; + temp *= Ncolors[j]+1; /* done in long arith to avoid oflo */ + if (temp > (long) max_colors) + break; /* won't fit, done with this pass */ + Ncolors[j]++; /* OK, apply the increment */ + total_colors = (int) temp; + changed = TRUE; + } + } while (changed); + + return total_colors; +} + + +LOCAL(int) +output_value (j_decompress_ptr cinfo, int ci, int j, int maxj) +/* Return j'th output value, where j will range from 0 to maxj */ +/* The output values must fall in 0..MAXJSAMPLE in increasing order */ +{ + /* We always provide values 0 and MAXJSAMPLE for each component; + * any additional values are equally spaced between these limits. + * (Forcing the upper and lower values to the limits ensures that + * dithering can't produce a color outside the selected gamut.) + */ + return (int) (((INT32) j * MAXJSAMPLE + maxj/2) / maxj); +} + + +LOCAL(int) +largest_input_value (j_decompress_ptr cinfo, int ci, int j, int maxj) +/* Return largest input value that should map to j'th output value */ +/* Must have largest(j=0) >= 0, and largest(j=maxj) >= MAXJSAMPLE */ +{ + /* Breakpoints are halfway between values returned by output_value */ + return (int) (((INT32) (2*j + 1) * MAXJSAMPLE + maxj) / (2*maxj)); +} + + +/* + * Create the colormap. + */ + +LOCAL(void) +create_colormap (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + JSAMPARRAY colormap; /* Created colormap */ + int total_colors; /* Number of distinct output colors */ + int i,j,k, nci, blksize, blkdist, ptr, val; + + /* Select number of colors for each component */ + total_colors = select_ncolors(cinfo, cquantize->Ncolors); + + /* Report selected color counts */ + if (cinfo->out_color_components == 3) + TRACEMS4(cinfo, 1, JTRC_QUANT_3_NCOLORS, + total_colors, cquantize->Ncolors[0], + cquantize->Ncolors[1], cquantize->Ncolors[2]); + else + TRACEMS1(cinfo, 1, JTRC_QUANT_NCOLORS, total_colors); + + /* Allocate and fill in the colormap. */ + /* The colors are ordered in the map in standard row-major order, */ + /* i.e. rightmost (highest-indexed) color changes most rapidly. */ + + colormap = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (JDIMENSION) total_colors, (JDIMENSION) cinfo->out_color_components); + + /* blksize is number of adjacent repeated entries for a component */ + /* blkdist is distance between groups of identical entries for a component */ + blkdist = total_colors; + + for (i = 0; i < cinfo->out_color_components; i++) { + /* fill in colormap entries for i'th color component */ + nci = cquantize->Ncolors[i]; /* # of distinct values for this color */ + blksize = blkdist / nci; + for (j = 0; j < nci; j++) { + /* Compute j'th output value (out of nci) for component */ + val = output_value(cinfo, i, j, nci-1); + /* Fill in all colormap entries that have this value of this component */ + for (ptr = j * blksize; ptr < total_colors; ptr += blkdist) { + /* fill in blksize entries beginning at ptr */ + for (k = 0; k < blksize; k++) + colormap[i][ptr+k] = (JSAMPLE) val; + } + } + blkdist = blksize; /* blksize of this color is blkdist of next */ + } + + /* Save the colormap in private storage, + * where it will survive color quantization mode changes. + */ + cquantize->sv_colormap = colormap; + cquantize->sv_actual = total_colors; +} + + +/* + * Create the color index table. + */ + +LOCAL(void) +create_colorindex (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + JSAMPROW indexptr; + int i,j,k, nci, blksize, val, pad; + + /* For ordered dither, we pad the color index tables by MAXJSAMPLE in + * each direction (input index values can be -MAXJSAMPLE .. 2*MAXJSAMPLE). + * This is not necessary in the other dithering modes. However, we + * flag whether it was done in case user changes dithering mode. + */ + if (cinfo->dither_mode == JDITHER_ORDERED) { + pad = MAXJSAMPLE*2; + cquantize->is_padded = TRUE; + } else { + pad = 0; + cquantize->is_padded = FALSE; + } + + cquantize->colorindex = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (JDIMENSION) (MAXJSAMPLE+1 + pad), + (JDIMENSION) cinfo->out_color_components); + + /* blksize is number of adjacent repeated entries for a component */ + blksize = cquantize->sv_actual; + + for (i = 0; i < cinfo->out_color_components; i++) { + /* fill in colorindex entries for i'th color component */ + nci = cquantize->Ncolors[i]; /* # of distinct values for this color */ + blksize = blksize / nci; + + /* adjust colorindex pointers to provide padding at negative indexes. */ + if (pad) + cquantize->colorindex[i] += MAXJSAMPLE; + + /* in loop, val = index of current output value, */ + /* and k = largest j that maps to current val */ + indexptr = cquantize->colorindex[i]; + val = 0; + k = largest_input_value(cinfo, i, 0, nci-1); + for (j = 0; j <= MAXJSAMPLE; j++) { + while (j > k) /* advance val if past boundary */ + k = largest_input_value(cinfo, i, ++val, nci-1); + /* premultiply so that no multiplication needed in main processing */ + indexptr[j] = (JSAMPLE) (val * blksize); + } + /* Pad at both ends if necessary */ + if (pad) + for (j = 1; j <= MAXJSAMPLE; j++) { + indexptr[-j] = indexptr[0]; + indexptr[MAXJSAMPLE+j] = indexptr[MAXJSAMPLE]; + } + } +} + + +/* + * Create an ordered-dither array for a component having ncolors + * distinct output values. + */ + +LOCAL(ODITHER_MATRIX_PTR) +make_odither_array (j_decompress_ptr cinfo, int ncolors) +{ + ODITHER_MATRIX_PTR odither; + int j,k; + INT32 num,den; + + odither = (ODITHER_MATRIX_PTR) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(ODITHER_MATRIX)); + /* The inter-value distance for this color is MAXJSAMPLE/(ncolors-1). + * Hence the dither value for the matrix cell with fill order f + * (f=0..N-1) should be (N-1-2*f)/(2*N) * MAXJSAMPLE/(ncolors-1). + * On 16-bit-int machine, be careful to avoid overflow. + */ + den = 2 * ODITHER_CELLS * ((INT32) (ncolors - 1)); + for (j = 0; j < ODITHER_SIZE; j++) { + for (k = 0; k < ODITHER_SIZE; k++) { + num = ((INT32) (ODITHER_CELLS-1 - 2*((int)base_dither_matrix[j][k]))) + * MAXJSAMPLE; + /* Ensure round towards zero despite C's lack of consistency + * about rounding negative values in integer division... + */ + odither[j][k] = (int) (num<0 ? -((-num)/den) : num/den); + } + } + return odither; +} + + +/* + * Create the ordered-dither tables. + * Components having the same number of representative colors may + * share a dither table. + */ + +LOCAL(void) +create_odither_tables (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + ODITHER_MATRIX_PTR odither; + int i, j, nci; + + for (i = 0; i < cinfo->out_color_components; i++) { + nci = cquantize->Ncolors[i]; /* # of distinct values for this color */ + odither = NULL; /* search for matching prior component */ + for (j = 0; j < i; j++) { + if (nci == cquantize->Ncolors[j]) { + odither = cquantize->odither[j]; + break; + } + } + if (odither == NULL) /* need a new table? */ + odither = make_odither_array(cinfo, nci); + cquantize->odither[i] = odither; + } +} + + +/* + * Map some rows of pixels to the output colormapped representation. + */ + +METHODDEF(void) +color_quantize (j_decompress_ptr cinfo, JSAMPARRAY input_buf, + JSAMPARRAY output_buf, int num_rows) +/* General case, no dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + JSAMPARRAY colorindex = cquantize->colorindex; + register int pixcode, ci; + register JSAMPROW ptrin, ptrout; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + register int nc = cinfo->out_color_components; + + for (row = 0; row < num_rows; row++) { + ptrin = input_buf[row]; + ptrout = output_buf[row]; + for (col = width; col > 0; col--) { + pixcode = 0; + for (ci = 0; ci < nc; ci++) { + pixcode += GETJSAMPLE(colorindex[ci][GETJSAMPLE(*ptrin++)]); + } + *ptrout++ = (JSAMPLE) pixcode; + } + } +} + + +METHODDEF(void) +color_quantize3 (j_decompress_ptr cinfo, JSAMPARRAY input_buf, + JSAMPARRAY output_buf, int num_rows) +/* Fast path for out_color_components==3, no dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + register int pixcode; + register JSAMPROW ptrin, ptrout; + JSAMPROW colorindex0 = cquantize->colorindex[0]; + JSAMPROW colorindex1 = cquantize->colorindex[1]; + JSAMPROW colorindex2 = cquantize->colorindex[2]; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + + for (row = 0; row < num_rows; row++) { + ptrin = input_buf[row]; + ptrout = output_buf[row]; + for (col = width; col > 0; col--) { + pixcode = GETJSAMPLE(colorindex0[GETJSAMPLE(*ptrin++)]); + pixcode += GETJSAMPLE(colorindex1[GETJSAMPLE(*ptrin++)]); + pixcode += GETJSAMPLE(colorindex2[GETJSAMPLE(*ptrin++)]); + *ptrout++ = (JSAMPLE) pixcode; + } + } +} + + +METHODDEF(void) +quantize_ord_dither (j_decompress_ptr cinfo, JSAMPARRAY input_buf, + JSAMPARRAY output_buf, int num_rows) +/* General case, with ordered dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + register JSAMPROW input_ptr; + register JSAMPROW output_ptr; + JSAMPROW colorindex_ci; + int * dither; /* points to active row of dither matrix */ + int row_index, col_index; /* current indexes into dither matrix */ + int nc = cinfo->out_color_components; + int ci; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + + for (row = 0; row < num_rows; row++) { + /* Initialize output values to 0 so can process components separately */ + FMEMZERO((void FAR *) output_buf[row], + (size_t) (width * SIZEOF(JSAMPLE))); + row_index = cquantize->row_index; + for (ci = 0; ci < nc; ci++) { + input_ptr = input_buf[row] + ci; + output_ptr = output_buf[row]; + colorindex_ci = cquantize->colorindex[ci]; + dither = cquantize->odither[ci][row_index]; + col_index = 0; + + for (col = width; col > 0; col--) { + /* Form pixel value + dither, range-limit to 0..MAXJSAMPLE, + * select output value, accumulate into output code for this pixel. + * Range-limiting need not be done explicitly, as we have extended + * the colorindex table to produce the right answers for out-of-range + * inputs. The maximum dither is +- MAXJSAMPLE; this sets the + * required amount of padding. + */ + *output_ptr += colorindex_ci[GETJSAMPLE(*input_ptr)+dither[col_index]]; + input_ptr += nc; + output_ptr++; + col_index = (col_index + 1) & ODITHER_MASK; + } + } + /* Advance row index for next row */ + row_index = (row_index + 1) & ODITHER_MASK; + cquantize->row_index = row_index; + } +} + + +METHODDEF(void) +quantize3_ord_dither (j_decompress_ptr cinfo, JSAMPARRAY input_buf, + JSAMPARRAY output_buf, int num_rows) +/* Fast path for out_color_components==3, with ordered dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + register int pixcode; + register JSAMPROW input_ptr; + register JSAMPROW output_ptr; + JSAMPROW colorindex0 = cquantize->colorindex[0]; + JSAMPROW colorindex1 = cquantize->colorindex[1]; + JSAMPROW colorindex2 = cquantize->colorindex[2]; + int * dither0; /* points to active row of dither matrix */ + int * dither1; + int * dither2; + int row_index, col_index; /* current indexes into dither matrix */ + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + + for (row = 0; row < num_rows; row++) { + row_index = cquantize->row_index; + input_ptr = input_buf[row]; + output_ptr = output_buf[row]; + dither0 = cquantize->odither[0][row_index]; + dither1 = cquantize->odither[1][row_index]; + dither2 = cquantize->odither[2][row_index]; + col_index = 0; + + for (col = width; col > 0; col--) { + pixcode = GETJSAMPLE(colorindex0[GETJSAMPLE(*input_ptr++) + + dither0[col_index]]); + pixcode += GETJSAMPLE(colorindex1[GETJSAMPLE(*input_ptr++) + + dither1[col_index]]); + pixcode += GETJSAMPLE(colorindex2[GETJSAMPLE(*input_ptr++) + + dither2[col_index]]); + *output_ptr++ = (JSAMPLE) pixcode; + col_index = (col_index + 1) & ODITHER_MASK; + } + row_index = (row_index + 1) & ODITHER_MASK; + cquantize->row_index = row_index; + } +} + + +METHODDEF(void) +quantize_fs_dither (j_decompress_ptr cinfo, JSAMPARRAY input_buf, + JSAMPARRAY output_buf, int num_rows) +/* General case, with Floyd-Steinberg dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + register LOCFSERROR cur; /* current error or pixel value */ + LOCFSERROR belowerr; /* error for pixel below cur */ + LOCFSERROR bpreverr; /* error for below/prev col */ + LOCFSERROR bnexterr; /* error for below/next col */ + LOCFSERROR delta; + register FSERRPTR errorptr; /* => fserrors[] at column before current */ + register JSAMPROW input_ptr; + register JSAMPROW output_ptr; + JSAMPROW colorindex_ci; + JSAMPROW colormap_ci; + int pixcode; + int nc = cinfo->out_color_components; + int dir; /* 1 for left-to-right, -1 for right-to-left */ + int dirnc; /* dir * nc */ + int ci; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + JSAMPLE *range_limit = cinfo->sample_range_limit; + SHIFT_TEMPS + + for (row = 0; row < num_rows; row++) { + /* Initialize output values to 0 so can process components separately */ + FMEMZERO((void FAR *) output_buf[row], + (size_t) (width * SIZEOF(JSAMPLE))); + for (ci = 0; ci < nc; ci++) { + input_ptr = input_buf[row] + ci; + output_ptr = output_buf[row]; + if (cquantize->on_odd_row) { + /* work right to left in this row */ + input_ptr += (width-1) * nc; /* so point to rightmost pixel */ + output_ptr += width-1; + dir = -1; + dirnc = -nc; + errorptr = cquantize->fserrors[ci] + (width+1); /* => entry after last column */ + } else { + /* work left to right in this row */ + dir = 1; + dirnc = nc; + errorptr = cquantize->fserrors[ci]; /* => entry before first column */ + } + colorindex_ci = cquantize->colorindex[ci]; + colormap_ci = cquantize->sv_colormap[ci]; + /* Preset error values: no error propagated to first pixel from left */ + cur = 0; + /* and no error propagated to row below yet */ + belowerr = bpreverr = 0; + + for (col = width; col > 0; col--) { + /* cur holds the error propagated from the previous pixel on the + * current line. Add the error propagated from the previous line + * to form the complete error correction term for this pixel, and + * round the error term (which is expressed * 16) to an integer. + * RIGHT_SHIFT rounds towards minus infinity, so adding 8 is correct + * for either sign of the error value. + * Note: errorptr points to *previous* column's array entry. + */ + cur = RIGHT_SHIFT(cur + errorptr[dir] + 8, 4); + /* Form pixel value + error, and range-limit to 0..MAXJSAMPLE. + * The maximum error is +- MAXJSAMPLE; this sets the required size + * of the range_limit array. + */ + cur += GETJSAMPLE(*input_ptr); + cur = GETJSAMPLE(range_limit[cur]); + /* Select output value, accumulate into output code for this pixel */ + pixcode = GETJSAMPLE(colorindex_ci[cur]); + *output_ptr += (JSAMPLE) pixcode; + /* Compute actual representation error at this pixel */ + /* Note: we can do this even though we don't have the final */ + /* pixel code, because the colormap is orthogonal. */ + cur -= GETJSAMPLE(colormap_ci[pixcode]); + /* Compute error fractions to be propagated to adjacent pixels. + * Add these into the running sums, and simultaneously shift the + * next-line error sums left by 1 column. + */ + bnexterr = cur; + delta = cur * 2; + cur += delta; /* form error * 3 */ + errorptr[0] = (FSERROR) (bpreverr + cur); + cur += delta; /* form error * 5 */ + bpreverr = belowerr + cur; + belowerr = bnexterr; + cur += delta; /* form error * 7 */ + /* At this point cur contains the 7/16 error value to be propagated + * to the next pixel on the current line, and all the errors for the + * next line have been shifted over. We are therefore ready to move on. + */ + input_ptr += dirnc; /* advance input ptr to next column */ + output_ptr += dir; /* advance output ptr to next column */ + errorptr += dir; /* advance errorptr to current column */ + } + /* Post-loop cleanup: we must unload the final error value into the + * final fserrors[] entry. Note we need not unload belowerr because + * it is for the dummy column before or after the actual array. + */ + errorptr[0] = (FSERROR) bpreverr; /* unload prev err into array */ + } + cquantize->on_odd_row = (cquantize->on_odd_row ? FALSE : TRUE); + } +} + + +/* + * Allocate workspace for Floyd-Steinberg errors. + */ + +LOCAL(void) +alloc_fs_workspace (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + size_t arraysize; + int i; + + arraysize = (size_t) ((cinfo->output_width + 2) * SIZEOF(FSERROR)); + for (i = 0; i < cinfo->out_color_components; i++) { + cquantize->fserrors[i] = (FSERRPTR) + (*cinfo->mem->alloc_large)((j_common_ptr) cinfo, JPOOL_IMAGE, arraysize); + } +} + + +/* + * Initialize for one-pass color quantization. + */ + +METHODDEF(void) +start_pass_1_quant (j_decompress_ptr cinfo, boolean is_pre_scan) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + size_t arraysize; + int i; + + /* Install my colormap. */ + cinfo->colormap = cquantize->sv_colormap; + cinfo->actual_number_of_colors = cquantize->sv_actual; + + /* Initialize for desired dithering mode. */ + switch (cinfo->dither_mode) { + case JDITHER_NONE: + if (cinfo->out_color_components == 3) + cquantize->pub.color_quantize = color_quantize3; + else + cquantize->pub.color_quantize = color_quantize; + break; + case JDITHER_ORDERED: + if (cinfo->out_color_components == 3) + cquantize->pub.color_quantize = quantize3_ord_dither; + else + cquantize->pub.color_quantize = quantize_ord_dither; + cquantize->row_index = 0; /* initialize state for ordered dither */ + /* If user changed to ordered dither from another mode, + * we must recreate the color index table with padding. + * This will cost extra space, but probably isn't very likely. + */ + if (! cquantize->is_padded) + create_colorindex(cinfo); + /* Create ordered-dither tables if we didn't already. */ + if (cquantize->odither[0] == NULL) + create_odither_tables(cinfo); + break; + case JDITHER_FS: + cquantize->pub.color_quantize = quantize_fs_dither; + cquantize->on_odd_row = FALSE; /* initialize state for F-S dither */ + /* Allocate Floyd-Steinberg workspace if didn't already. */ + if (cquantize->fserrors[0] == NULL) + alloc_fs_workspace(cinfo); + /* Initialize the propagated errors to zero. */ + arraysize = (size_t) ((cinfo->output_width + 2) * SIZEOF(FSERROR)); + for (i = 0; i < cinfo->out_color_components; i++) + FMEMZERO((void FAR *) cquantize->fserrors[i], arraysize); + break; + default: + ERREXIT(cinfo, JERR_NOT_COMPILED); + break; + } +} + + +/* + * Finish up at the end of the pass. + */ + +METHODDEF(void) +finish_pass_1_quant (j_decompress_ptr cinfo) +{ + /* no work in 1-pass case */ +} + + +/* + * Switch to a new external colormap between output passes. + * Shouldn't get to this module! + */ + +METHODDEF(void) +new_color_map_1_quant (j_decompress_ptr cinfo) +{ + ERREXIT(cinfo, JERR_MODE_CHANGE); +} + + +/* + * Module initialization routine for 1-pass color quantization. + */ + +GLOBAL(void) +jinit_1pass_quantizer (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize; + + cquantize = (my_cquantize_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_cquantizer)); + cinfo->cquantize = (struct jpeg_color_quantizer *) cquantize; + cquantize->pub.start_pass = start_pass_1_quant; + cquantize->pub.finish_pass = finish_pass_1_quant; + cquantize->pub.new_color_map = new_color_map_1_quant; + cquantize->fserrors[0] = NULL; /* Flag FS workspace not allocated */ + cquantize->odither[0] = NULL; /* Also flag odither arrays not allocated */ + + /* Make sure my internal arrays won't overflow */ + if (cinfo->out_color_components > MAX_Q_COMPS) + ERREXIT1(cinfo, JERR_QUANT_COMPONENTS, MAX_Q_COMPS); + /* Make sure colormap indexes can be represented by JSAMPLEs */ + if (cinfo->desired_number_of_colors > (MAXJSAMPLE+1)) + ERREXIT1(cinfo, JERR_QUANT_MANY_COLORS, MAXJSAMPLE+1); + + /* Create the colormap and color index table. */ + create_colormap(cinfo); + create_colorindex(cinfo); + + /* Allocate Floyd-Steinberg workspace now if requested. + * We do this now since it is FAR storage and may affect the memory + * manager's space calculations. If the user changes to FS dither + * mode in a later pass, we will allocate the space then, and will + * possibly overrun the max_memory_to_use setting. + */ + if (cinfo->dither_mode == JDITHER_FS) + alloc_fs_workspace(cinfo); +} + +#endif /* QUANT_1PASS_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant2.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant2.c new file mode 100644 index 000000000..38fc2af7a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jquant2.c @@ -0,0 +1,1311 @@ +/* + * jquant2.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modified 2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains 2-pass color quantization (color mapping) routines. + * These routines provide selection of a custom color map for an image, + * followed by mapping of the image to that color map, with optional + * Floyd-Steinberg dithering. + * It is also possible to use just the second pass to map to an arbitrary + * externally-given color map. + * + * Note: ordered dithering is not supported, since there isn't any fast + * way to compute intercolor distances; it's unclear that ordered dither's + * fundamental assumptions even hold with an irregularly spaced color map. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + +#ifdef QUANT_2PASS_SUPPORTED + + +/* + * This module implements the well-known Heckbert paradigm for color + * quantization. Most of the ideas used here can be traced back to + * Heckbert's seminal paper + * Heckbert, Paul. "Color Image Quantization for Frame Buffer Display", + * Proc. SIGGRAPH '82, Computer Graphics v.16 #3 (July 1982), pp 297-304. + * + * In the first pass over the image, we accumulate a histogram showing the + * usage count of each possible color. To keep the histogram to a reasonable + * size, we reduce the precision of the input; typical practice is to retain + * 5 or 6 bits per color, so that 8 or 4 different input values are counted + * in the same histogram cell. + * + * Next, the color-selection step begins with a box representing the whole + * color space, and repeatedly splits the "largest" remaining box until we + * have as many boxes as desired colors. Then the mean color in each + * remaining box becomes one of the possible output colors. + * + * The second pass over the image maps each input pixel to the closest output + * color (optionally after applying a Floyd-Steinberg dithering correction). + * This mapping is logically trivial, but making it go fast enough requires + * considerable care. + * + * Heckbert-style quantizers vary a good deal in their policies for choosing + * the "largest" box and deciding where to cut it. The particular policies + * used here have proved out well in experimental comparisons, but better ones + * may yet be found. + * + * In earlier versions of the IJG code, this module quantized in YCbCr color + * space, processing the raw upsampled data without a color conversion step. + * This allowed the color conversion math to be done only once per colormap + * entry, not once per pixel. However, that optimization precluded other + * useful optimizations (such as merging color conversion with upsampling) + * and it also interfered with desired capabilities such as quantizing to an + * externally-supplied colormap. We have therefore abandoned that approach. + * The present code works in the post-conversion color space, typically RGB. + * + * To improve the visual quality of the results, we actually work in scaled + * RGB space, giving G distances more weight than R, and R in turn more than + * B. To do everything in integer math, we must use integer scale factors. + * The 2/3/1 scale factors used here correspond loosely to the relative + * weights of the colors in the NTSC grayscale equation. + * If you want to use this code to quantize a non-RGB color space, you'll + * probably need to change these scale factors. + */ + +#define R_SCALE 2 /* scale R distances by this much */ +#define G_SCALE 3 /* scale G distances by this much */ +#define B_SCALE 1 /* and B by this much */ + +/* Relabel R/G/B as components 0/1/2, respecting the RGB ordering defined + * in jmorecfg.h. As the code stands, it will do the right thing for R,G,B + * and B,G,R orders. If you define some other weird order in jmorecfg.h, + * you'll get compile errors until you extend this logic. In that case + * you'll probably want to tweak the histogram sizes too. + */ + +#if RGB_RED == 0 +#define C0_SCALE R_SCALE +#endif +#if RGB_BLUE == 0 +#define C0_SCALE B_SCALE +#endif +#if RGB_GREEN == 1 +#define C1_SCALE G_SCALE +#endif +#if RGB_RED == 2 +#define C2_SCALE R_SCALE +#endif +#if RGB_BLUE == 2 +#define C2_SCALE B_SCALE +#endif + + +/* + * First we have the histogram data structure and routines for creating it. + * + * The number of bits of precision can be adjusted by changing these symbols. + * We recommend keeping 6 bits for G and 5 each for R and B. + * If you have plenty of memory and cycles, 6 bits all around gives marginally + * better results; if you are short of memory, 5 bits all around will save + * some space but degrade the results. + * To maintain a fully accurate histogram, we'd need to allocate a "long" + * (preferably unsigned long) for each cell. In practice this is overkill; + * we can get by with 16 bits per cell. Few of the cell counts will overflow, + * and clamping those that do overflow to the maximum value will give close- + * enough results. This reduces the recommended histogram size from 256Kb + * to 128Kb, which is a useful savings on PC-class machines. + * (In the second pass the histogram space is re-used for pixel mapping data; + * in that capacity, each cell must be able to store zero to the number of + * desired colors. 16 bits/cell is plenty for that too.) + * Since the JPEG code is intended to run in small memory model on 80x86 + * machines, we can't just allocate the histogram in one chunk. Instead + * of a true 3-D array, we use a row of pointers to 2-D arrays. Each + * pointer corresponds to a C0 value (typically 2^5 = 32 pointers) and + * each 2-D array has 2^6*2^5 = 2048 or 2^6*2^6 = 4096 entries. Note that + * on 80x86 machines, the pointer row is in near memory but the actual + * arrays are in far memory (same arrangement as we use for image arrays). + */ + +#define MAXNUMCOLORS (MAXJSAMPLE+1) /* maximum size of colormap */ + +/* These will do the right thing for either R,G,B or B,G,R color order, + * but you may not like the results for other color orders. + */ +#define HIST_C0_BITS 5 /* bits of precision in R/B histogram */ +#define HIST_C1_BITS 6 /* bits of precision in G histogram */ +#define HIST_C2_BITS 5 /* bits of precision in B/R histogram */ + +/* Number of elements along histogram axes. */ +#define HIST_C0_ELEMS (1<cquantize; + register JSAMPROW ptr; + register histptr histp; + register hist3d histogram = cquantize->histogram; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + + for (row = 0; row < num_rows; row++) { + ptr = input_buf[row]; + for (col = width; col > 0; col--) { + /* get pixel value and index into the histogram */ + histp = & histogram[GETJSAMPLE(ptr[0]) >> C0_SHIFT] + [GETJSAMPLE(ptr[1]) >> C1_SHIFT] + [GETJSAMPLE(ptr[2]) >> C2_SHIFT]; + /* increment, check for overflow and undo increment if so. */ + if (++(*histp) <= 0) + (*histp)--; + ptr += 3; + } + } +} + + +/* + * Next we have the really interesting routines: selection of a colormap + * given the completed histogram. + * These routines work with a list of "boxes", each representing a rectangular + * subset of the input color space (to histogram precision). + */ + +typedef struct { + /* The bounds of the box (inclusive); expressed as histogram indexes */ + int c0min, c0max; + int c1min, c1max; + int c2min, c2max; + /* The volume (actually 2-norm) of the box */ + INT32 volume; + /* The number of nonzero histogram cells within this box */ + long colorcount; +} box; + +typedef box * boxptr; + + +LOCAL(boxptr) +find_biggest_color_pop (boxptr boxlist, int numboxes) +/* Find the splittable box with the largest color population */ +/* Returns NULL if no splittable boxes remain */ +{ + register boxptr boxp; + register int i; + register long maxc = 0; + boxptr which = NULL; + + for (i = 0, boxp = boxlist; i < numboxes; i++, boxp++) { + if (boxp->colorcount > maxc && boxp->volume > 0) { + which = boxp; + maxc = boxp->colorcount; + } + } + return which; +} + + +LOCAL(boxptr) +find_biggest_volume (boxptr boxlist, int numboxes) +/* Find the splittable box with the largest (scaled) volume */ +/* Returns NULL if no splittable boxes remain */ +{ + register boxptr boxp; + register int i; + register INT32 maxv = 0; + boxptr which = NULL; + + for (i = 0, boxp = boxlist; i < numboxes; i++, boxp++) { + if (boxp->volume > maxv) { + which = boxp; + maxv = boxp->volume; + } + } + return which; +} + + +LOCAL(void) +update_box (j_decompress_ptr cinfo, boxptr boxp) +/* Shrink the min/max bounds of a box to enclose only nonzero elements, */ +/* and recompute its volume and population */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + histptr histp; + int c0,c1,c2; + int c0min,c0max,c1min,c1max,c2min,c2max; + INT32 dist0,dist1,dist2; + long ccount; + + c0min = boxp->c0min; c0max = boxp->c0max; + c1min = boxp->c1min; c1max = boxp->c1max; + c2min = boxp->c2min; c2max = boxp->c2max; + + if (c0max > c0min) + for (c0 = c0min; c0 <= c0max; c0++) + for (c1 = c1min; c1 <= c1max; c1++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++) + if (*histp++ != 0) { + boxp->c0min = c0min = c0; + goto have_c0min; + } + } + have_c0min: + if (c0max > c0min) + for (c0 = c0max; c0 >= c0min; c0--) + for (c1 = c1min; c1 <= c1max; c1++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++) + if (*histp++ != 0) { + boxp->c0max = c0max = c0; + goto have_c0max; + } + } + have_c0max: + if (c1max > c1min) + for (c1 = c1min; c1 <= c1max; c1++) + for (c0 = c0min; c0 <= c0max; c0++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++) + if (*histp++ != 0) { + boxp->c1min = c1min = c1; + goto have_c1min; + } + } + have_c1min: + if (c1max > c1min) + for (c1 = c1max; c1 >= c1min; c1--) + for (c0 = c0min; c0 <= c0max; c0++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++) + if (*histp++ != 0) { + boxp->c1max = c1max = c1; + goto have_c1max; + } + } + have_c1max: + if (c2max > c2min) + for (c2 = c2min; c2 <= c2max; c2++) + for (c0 = c0min; c0 <= c0max; c0++) { + histp = & histogram[c0][c1min][c2]; + for (c1 = c1min; c1 <= c1max; c1++, histp += HIST_C2_ELEMS) + if (*histp != 0) { + boxp->c2min = c2min = c2; + goto have_c2min; + } + } + have_c2min: + if (c2max > c2min) + for (c2 = c2max; c2 >= c2min; c2--) + for (c0 = c0min; c0 <= c0max; c0++) { + histp = & histogram[c0][c1min][c2]; + for (c1 = c1min; c1 <= c1max; c1++, histp += HIST_C2_ELEMS) + if (*histp != 0) { + boxp->c2max = c2max = c2; + goto have_c2max; + } + } + have_c2max: + + /* Update box volume. + * We use 2-norm rather than real volume here; this biases the method + * against making long narrow boxes, and it has the side benefit that + * a box is splittable iff norm > 0. + * Since the differences are expressed in histogram-cell units, + * we have to shift back to JSAMPLE units to get consistent distances; + * after which, we scale according to the selected distance scale factors. + */ + dist0 = ((c0max - c0min) << C0_SHIFT) * C0_SCALE; + dist1 = ((c1max - c1min) << C1_SHIFT) * C1_SCALE; + dist2 = ((c2max - c2min) << C2_SHIFT) * C2_SCALE; + boxp->volume = dist0*dist0 + dist1*dist1 + dist2*dist2; + + /* Now scan remaining volume of box and compute population */ + ccount = 0; + for (c0 = c0min; c0 <= c0max; c0++) + for (c1 = c1min; c1 <= c1max; c1++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++, histp++) + if (*histp != 0) { + ccount++; + } + } + boxp->colorcount = ccount; +} + + +LOCAL(int) +median_cut (j_decompress_ptr cinfo, boxptr boxlist, int numboxes, + int desired_colors) +/* Repeatedly select and split the largest box until we have enough boxes */ +{ + int n,lb; + int c0,c1,c2,cmax; + register boxptr b1,b2; + + while (numboxes < desired_colors) { + /* Select box to split. + * Current algorithm: by population for first half, then by volume. + */ + if (numboxes*2 <= desired_colors) { + b1 = find_biggest_color_pop(boxlist, numboxes); + } else { + b1 = find_biggest_volume(boxlist, numboxes); + } + if (b1 == NULL) /* no splittable boxes left! */ + break; + b2 = &boxlist[numboxes]; /* where new box will go */ + /* Copy the color bounds to the new box. */ + b2->c0max = b1->c0max; b2->c1max = b1->c1max; b2->c2max = b1->c2max; + b2->c0min = b1->c0min; b2->c1min = b1->c1min; b2->c2min = b1->c2min; + /* Choose which axis to split the box on. + * Current algorithm: longest scaled axis. + * See notes in update_box about scaling distances. + */ + c0 = ((b1->c0max - b1->c0min) << C0_SHIFT) * C0_SCALE; + c1 = ((b1->c1max - b1->c1min) << C1_SHIFT) * C1_SCALE; + c2 = ((b1->c2max - b1->c2min) << C2_SHIFT) * C2_SCALE; + /* We want to break any ties in favor of green, then red, blue last. + * This code does the right thing for R,G,B or B,G,R color orders only. + */ +#if RGB_RED == 0 + cmax = c1; n = 1; + if (c0 > cmax) { cmax = c0; n = 0; } + if (c2 > cmax) { n = 2; } +#else + cmax = c1; n = 1; + if (c2 > cmax) { cmax = c2; n = 2; } + if (c0 > cmax) { n = 0; } +#endif + /* Choose split point along selected axis, and update box bounds. + * Current algorithm: split at halfway point. + * (Since the box has been shrunk to minimum volume, + * any split will produce two nonempty subboxes.) + * Note that lb value is max for lower box, so must be < old max. + */ + switch (n) { + case 0: + lb = (b1->c0max + b1->c0min) / 2; + b1->c0max = lb; + b2->c0min = lb+1; + break; + case 1: + lb = (b1->c1max + b1->c1min) / 2; + b1->c1max = lb; + b2->c1min = lb+1; + break; + case 2: + lb = (b1->c2max + b1->c2min) / 2; + b1->c2max = lb; + b2->c2min = lb+1; + break; + } + /* Update stats for boxes */ + update_box(cinfo, b1); + update_box(cinfo, b2); + numboxes++; + } + return numboxes; +} + + +LOCAL(void) +compute_color (j_decompress_ptr cinfo, boxptr boxp, int icolor) +/* Compute representative color for a box, put it in colormap[icolor] */ +{ + /* Current algorithm: mean weighted by pixels (not colors) */ + /* Note it is important to get the rounding correct! */ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + histptr histp; + int c0,c1,c2; + int c0min,c0max,c1min,c1max,c2min,c2max; + long count; + long total = 0; + long c0total = 0; + long c1total = 0; + long c2total = 0; + + c0min = boxp->c0min; c0max = boxp->c0max; + c1min = boxp->c1min; c1max = boxp->c1max; + c2min = boxp->c2min; c2max = boxp->c2max; + + for (c0 = c0min; c0 <= c0max; c0++) + for (c1 = c1min; c1 <= c1max; c1++) { + histp = & histogram[c0][c1][c2min]; + for (c2 = c2min; c2 <= c2max; c2++) { + if ((count = *histp++) != 0) { + total += count; + c0total += ((c0 << C0_SHIFT) + ((1<>1)) * count; + c1total += ((c1 << C1_SHIFT) + ((1<>1)) * count; + c2total += ((c2 << C2_SHIFT) + ((1<>1)) * count; + } + } + } + + cinfo->colormap[0][icolor] = (JSAMPLE) ((c0total + (total>>1)) / total); + cinfo->colormap[1][icolor] = (JSAMPLE) ((c1total + (total>>1)) / total); + cinfo->colormap[2][icolor] = (JSAMPLE) ((c2total + (total>>1)) / total); +} + + +LOCAL(void) +select_colors (j_decompress_ptr cinfo, int desired_colors) +/* Master routine for color selection */ +{ + boxptr boxlist; + int numboxes; + int i; + + /* Allocate workspace for box list */ + boxlist = (boxptr) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, desired_colors * SIZEOF(box)); + /* Initialize one box containing whole space */ + numboxes = 1; + boxlist[0].c0min = 0; + boxlist[0].c0max = MAXJSAMPLE >> C0_SHIFT; + boxlist[0].c1min = 0; + boxlist[0].c1max = MAXJSAMPLE >> C1_SHIFT; + boxlist[0].c2min = 0; + boxlist[0].c2max = MAXJSAMPLE >> C2_SHIFT; + /* Shrink it to actually-used volume and set its statistics */ + update_box(cinfo, & boxlist[0]); + /* Perform median-cut to produce final box list */ + numboxes = median_cut(cinfo, boxlist, numboxes, desired_colors); + /* Compute the representative color for each box, fill colormap */ + for (i = 0; i < numboxes; i++) + compute_color(cinfo, & boxlist[i], i); + cinfo->actual_number_of_colors = numboxes; + TRACEMS1(cinfo, 1, JTRC_QUANT_SELECTED, numboxes); +} + + +/* + * These routines are concerned with the time-critical task of mapping input + * colors to the nearest color in the selected colormap. + * + * We re-use the histogram space as an "inverse color map", essentially a + * cache for the results of nearest-color searches. All colors within a + * histogram cell will be mapped to the same colormap entry, namely the one + * closest to the cell's center. This may not be quite the closest entry to + * the actual input color, but it's almost as good. A zero in the cache + * indicates we haven't found the nearest color for that cell yet; the array + * is cleared to zeroes before starting the mapping pass. When we find the + * nearest color for a cell, its colormap index plus one is recorded in the + * cache for future use. The pass2 scanning routines call fill_inverse_cmap + * when they need to use an unfilled entry in the cache. + * + * Our method of efficiently finding nearest colors is based on the "locally + * sorted search" idea described by Heckbert and on the incremental distance + * calculation described by Spencer W. Thomas in chapter III.1 of Graphics + * Gems II (James Arvo, ed. Academic Press, 1991). Thomas points out that + * the distances from a given colormap entry to each cell of the histogram can + * be computed quickly using an incremental method: the differences between + * distances to adjacent cells themselves differ by a constant. This allows a + * fairly fast implementation of the "brute force" approach of computing the + * distance from every colormap entry to every histogram cell. Unfortunately, + * it needs a work array to hold the best-distance-so-far for each histogram + * cell (because the inner loop has to be over cells, not colormap entries). + * The work array elements have to be INT32s, so the work array would need + * 256Kb at our recommended precision. This is not feasible in DOS machines. + * + * To get around these problems, we apply Thomas' method to compute the + * nearest colors for only the cells within a small subbox of the histogram. + * The work array need be only as big as the subbox, so the memory usage + * problem is solved. Furthermore, we need not fill subboxes that are never + * referenced in pass2; many images use only part of the color gamut, so a + * fair amount of work is saved. An additional advantage of this + * approach is that we can apply Heckbert's locality criterion to quickly + * eliminate colormap entries that are far away from the subbox; typically + * three-fourths of the colormap entries are rejected by Heckbert's criterion, + * and we need not compute their distances to individual cells in the subbox. + * The speed of this approach is heavily influenced by the subbox size: too + * small means too much overhead, too big loses because Heckbert's criterion + * can't eliminate as many colormap entries. Empirically the best subbox + * size seems to be about 1/512th of the histogram (1/8th in each direction). + * + * Thomas' article also describes a refined method which is asymptotically + * faster than the brute-force method, but it is also far more complex and + * cannot efficiently be applied to small subboxes. It is therefore not + * useful for programs intended to be portable to DOS machines. On machines + * with plenty of memory, filling the whole histogram in one shot with Thomas' + * refined method might be faster than the present code --- but then again, + * it might not be any faster, and it's certainly more complicated. + */ + + +/* log2(histogram cells in update box) for each axis; this can be adjusted */ +#define BOX_C0_LOG (HIST_C0_BITS-3) +#define BOX_C1_LOG (HIST_C1_BITS-3) +#define BOX_C2_LOG (HIST_C2_BITS-3) + +#define BOX_C0_ELEMS (1<actual_number_of_colors; + int maxc0, maxc1, maxc2; + int centerc0, centerc1, centerc2; + int i, x, ncolors; + INT32 minmaxdist, min_dist, max_dist, tdist; + INT32 mindist[MAXNUMCOLORS]; /* min distance to colormap entry i */ + + /* Compute true coordinates of update box's upper corner and center. + * Actually we compute the coordinates of the center of the upper-corner + * histogram cell, which are the upper bounds of the volume we care about. + * Note that since ">>" rounds down, the "center" values may be closer to + * min than to max; hence comparisons to them must be "<=", not "<". + */ + maxc0 = minc0 + ((1 << BOX_C0_SHIFT) - (1 << C0_SHIFT)); + centerc0 = (minc0 + maxc0) >> 1; + maxc1 = minc1 + ((1 << BOX_C1_SHIFT) - (1 << C1_SHIFT)); + centerc1 = (minc1 + maxc1) >> 1; + maxc2 = minc2 + ((1 << BOX_C2_SHIFT) - (1 << C2_SHIFT)); + centerc2 = (minc2 + maxc2) >> 1; + + /* For each color in colormap, find: + * 1. its minimum squared-distance to any point in the update box + * (zero if color is within update box); + * 2. its maximum squared-distance to any point in the update box. + * Both of these can be found by considering only the corners of the box. + * We save the minimum distance for each color in mindist[]; + * only the smallest maximum distance is of interest. + */ + minmaxdist = 0x7FFFFFFFL; + + for (i = 0; i < numcolors; i++) { + /* We compute the squared-c0-distance term, then add in the other two. */ + x = GETJSAMPLE(cinfo->colormap[0][i]); + if (x < minc0) { + tdist = (x - minc0) * C0_SCALE; + min_dist = tdist*tdist; + tdist = (x - maxc0) * C0_SCALE; + max_dist = tdist*tdist; + } else if (x > maxc0) { + tdist = (x - maxc0) * C0_SCALE; + min_dist = tdist*tdist; + tdist = (x - minc0) * C0_SCALE; + max_dist = tdist*tdist; + } else { + /* within cell range so no contribution to min_dist */ + min_dist = 0; + if (x <= centerc0) { + tdist = (x - maxc0) * C0_SCALE; + max_dist = tdist*tdist; + } else { + tdist = (x - minc0) * C0_SCALE; + max_dist = tdist*tdist; + } + } + + x = GETJSAMPLE(cinfo->colormap[1][i]); + if (x < minc1) { + tdist = (x - minc1) * C1_SCALE; + min_dist += tdist*tdist; + tdist = (x - maxc1) * C1_SCALE; + max_dist += tdist*tdist; + } else if (x > maxc1) { + tdist = (x - maxc1) * C1_SCALE; + min_dist += tdist*tdist; + tdist = (x - minc1) * C1_SCALE; + max_dist += tdist*tdist; + } else { + /* within cell range so no contribution to min_dist */ + if (x <= centerc1) { + tdist = (x - maxc1) * C1_SCALE; + max_dist += tdist*tdist; + } else { + tdist = (x - minc1) * C1_SCALE; + max_dist += tdist*tdist; + } + } + + x = GETJSAMPLE(cinfo->colormap[2][i]); + if (x < minc2) { + tdist = (x - minc2) * C2_SCALE; + min_dist += tdist*tdist; + tdist = (x - maxc2) * C2_SCALE; + max_dist += tdist*tdist; + } else if (x > maxc2) { + tdist = (x - maxc2) * C2_SCALE; + min_dist += tdist*tdist; + tdist = (x - minc2) * C2_SCALE; + max_dist += tdist*tdist; + } else { + /* within cell range so no contribution to min_dist */ + if (x <= centerc2) { + tdist = (x - maxc2) * C2_SCALE; + max_dist += tdist*tdist; + } else { + tdist = (x - minc2) * C2_SCALE; + max_dist += tdist*tdist; + } + } + + mindist[i] = min_dist; /* save away the results */ + if (max_dist < minmaxdist) + minmaxdist = max_dist; + } + + /* Now we know that no cell in the update box is more than minmaxdist + * away from some colormap entry. Therefore, only colors that are + * within minmaxdist of some part of the box need be considered. + */ + ncolors = 0; + for (i = 0; i < numcolors; i++) { + if (mindist[i] <= minmaxdist) + colorlist[ncolors++] = (JSAMPLE) i; + } + return ncolors; +} + + +LOCAL(void) +find_best_colors (j_decompress_ptr cinfo, int minc0, int minc1, int minc2, + int numcolors, JSAMPLE colorlist[], JSAMPLE bestcolor[]) +/* Find the closest colormap entry for each cell in the update box, + * given the list of candidate colors prepared by find_nearby_colors. + * Return the indexes of the closest entries in the bestcolor[] array. + * This routine uses Thomas' incremental distance calculation method to + * find the distance from a colormap entry to successive cells in the box. + */ +{ + int ic0, ic1, ic2; + int i, icolor; + register INT32 * bptr; /* pointer into bestdist[] array */ + JSAMPLE * cptr; /* pointer into bestcolor[] array */ + INT32 dist0, dist1; /* initial distance values */ + register INT32 dist2; /* current distance in inner loop */ + INT32 xx0, xx1; /* distance increments */ + register INT32 xx2; + INT32 inc0, inc1, inc2; /* initial values for increments */ + /* This array holds the distance to the nearest-so-far color for each cell */ + INT32 bestdist[BOX_C0_ELEMS * BOX_C1_ELEMS * BOX_C2_ELEMS]; + + /* Initialize best-distance for each cell of the update box */ + bptr = bestdist; + for (i = BOX_C0_ELEMS*BOX_C1_ELEMS*BOX_C2_ELEMS-1; i >= 0; i--) + *bptr++ = 0x7FFFFFFFL; + + /* For each color selected by find_nearby_colors, + * compute its distance to the center of each cell in the box. + * If that's less than best-so-far, update best distance and color number. + */ + + /* Nominal steps between cell centers ("x" in Thomas article) */ +#define STEP_C0 ((1 << C0_SHIFT) * C0_SCALE) +#define STEP_C1 ((1 << C1_SHIFT) * C1_SCALE) +#define STEP_C2 ((1 << C2_SHIFT) * C2_SCALE) + + for (i = 0; i < numcolors; i++) { + icolor = GETJSAMPLE(colorlist[i]); + /* Compute (square of) distance from minc0/c1/c2 to this color */ + inc0 = (minc0 - GETJSAMPLE(cinfo->colormap[0][icolor])) * C0_SCALE; + dist0 = inc0*inc0; + inc1 = (minc1 - GETJSAMPLE(cinfo->colormap[1][icolor])) * C1_SCALE; + dist0 += inc1*inc1; + inc2 = (minc2 - GETJSAMPLE(cinfo->colormap[2][icolor])) * C2_SCALE; + dist0 += inc2*inc2; + /* Form the initial difference increments */ + inc0 = inc0 * (2 * STEP_C0) + STEP_C0 * STEP_C0; + inc1 = inc1 * (2 * STEP_C1) + STEP_C1 * STEP_C1; + inc2 = inc2 * (2 * STEP_C2) + STEP_C2 * STEP_C2; + /* Now loop over all cells in box, updating distance per Thomas method */ + bptr = bestdist; + cptr = bestcolor; + xx0 = inc0; + for (ic0 = BOX_C0_ELEMS-1; ic0 >= 0; ic0--) { + dist1 = dist0; + xx1 = inc1; + for (ic1 = BOX_C1_ELEMS-1; ic1 >= 0; ic1--) { + dist2 = dist1; + xx2 = inc2; + for (ic2 = BOX_C2_ELEMS-1; ic2 >= 0; ic2--) { + if (dist2 < *bptr) { + *bptr = dist2; + *cptr = (JSAMPLE) icolor; + } + dist2 += xx2; + xx2 += 2 * STEP_C2 * STEP_C2; + bptr++; + cptr++; + } + dist1 += xx1; + xx1 += 2 * STEP_C1 * STEP_C1; + } + dist0 += xx0; + xx0 += 2 * STEP_C0 * STEP_C0; + } + } +} + + +LOCAL(void) +fill_inverse_cmap (j_decompress_ptr cinfo, int c0, int c1, int c2) +/* Fill the inverse-colormap entries in the update box that contains */ +/* histogram cell c0/c1/c2. (Only that one cell MUST be filled, but */ +/* we can fill as many others as we wish.) */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + int minc0, minc1, minc2; /* lower left corner of update box */ + int ic0, ic1, ic2; + register JSAMPLE * cptr; /* pointer into bestcolor[] array */ + register histptr cachep; /* pointer into main cache array */ + /* This array lists the candidate colormap indexes. */ + JSAMPLE colorlist[MAXNUMCOLORS]; + int numcolors; /* number of candidate colors */ + /* This array holds the actually closest colormap index for each cell. */ + JSAMPLE bestcolor[BOX_C0_ELEMS * BOX_C1_ELEMS * BOX_C2_ELEMS]; + + /* Convert cell coordinates to update box ID */ + c0 >>= BOX_C0_LOG; + c1 >>= BOX_C1_LOG; + c2 >>= BOX_C2_LOG; + + /* Compute true coordinates of update box's origin corner. + * Actually we compute the coordinates of the center of the corner + * histogram cell, which are the lower bounds of the volume we care about. + */ + minc0 = (c0 << BOX_C0_SHIFT) + ((1 << C0_SHIFT) >> 1); + minc1 = (c1 << BOX_C1_SHIFT) + ((1 << C1_SHIFT) >> 1); + minc2 = (c2 << BOX_C2_SHIFT) + ((1 << C2_SHIFT) >> 1); + + /* Determine which colormap entries are close enough to be candidates + * for the nearest entry to some cell in the update box. + */ + numcolors = find_nearby_colors(cinfo, minc0, minc1, minc2, colorlist); + + /* Determine the actually nearest colors. */ + find_best_colors(cinfo, minc0, minc1, minc2, numcolors, colorlist, + bestcolor); + + /* Save the best color numbers (plus 1) in the main cache array */ + c0 <<= BOX_C0_LOG; /* convert ID back to base cell indexes */ + c1 <<= BOX_C1_LOG; + c2 <<= BOX_C2_LOG; + cptr = bestcolor; + for (ic0 = 0; ic0 < BOX_C0_ELEMS; ic0++) { + for (ic1 = 0; ic1 < BOX_C1_ELEMS; ic1++) { + cachep = & histogram[c0+ic0][c1+ic1][c2]; + for (ic2 = 0; ic2 < BOX_C2_ELEMS; ic2++) { + *cachep++ = (histcell) (GETJSAMPLE(*cptr++) + 1); + } + } + } +} + + +/* + * Map some rows of pixels to the output colormapped representation. + */ + +METHODDEF(void) +pass2_no_dither (j_decompress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPARRAY output_buf, int num_rows) +/* This version performs no dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + register JSAMPROW inptr, outptr; + register histptr cachep; + register int c0, c1, c2; + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + + for (row = 0; row < num_rows; row++) { + inptr = input_buf[row]; + outptr = output_buf[row]; + for (col = width; col > 0; col--) { + /* get pixel value and index into the cache */ + c0 = GETJSAMPLE(*inptr++) >> C0_SHIFT; + c1 = GETJSAMPLE(*inptr++) >> C1_SHIFT; + c2 = GETJSAMPLE(*inptr++) >> C2_SHIFT; + cachep = & histogram[c0][c1][c2]; + /* If we have not seen this color before, find nearest colormap entry */ + /* and update the cache */ + if (*cachep == 0) + fill_inverse_cmap(cinfo, c0,c1,c2); + /* Now emit the colormap index for this cell */ + *outptr++ = (JSAMPLE) (*cachep - 1); + } + } +} + + +METHODDEF(void) +pass2_fs_dither (j_decompress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPARRAY output_buf, int num_rows) +/* This version performs Floyd-Steinberg dithering */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + register LOCFSERROR cur0, cur1, cur2; /* current error or pixel value */ + LOCFSERROR belowerr0, belowerr1, belowerr2; /* error for pixel below cur */ + LOCFSERROR bpreverr0, bpreverr1, bpreverr2; /* error for below/prev col */ + register FSERRPTR errorptr; /* => fserrors[] at column before current */ + JSAMPROW inptr; /* => current input pixel */ + JSAMPROW outptr; /* => current output pixel */ + histptr cachep; + int dir; /* +1 or -1 depending on direction */ + int dir3; /* 3*dir, for advancing inptr & errorptr */ + int row; + JDIMENSION col; + JDIMENSION width = cinfo->output_width; + JSAMPLE *range_limit = cinfo->sample_range_limit; + int *error_limit = cquantize->error_limiter; + JSAMPROW colormap0 = cinfo->colormap[0]; + JSAMPROW colormap1 = cinfo->colormap[1]; + JSAMPROW colormap2 = cinfo->colormap[2]; + SHIFT_TEMPS + + for (row = 0; row < num_rows; row++) { + inptr = input_buf[row]; + outptr = output_buf[row]; + if (cquantize->on_odd_row) { + /* work right to left in this row */ + inptr += (width-1) * 3; /* so point to rightmost pixel */ + outptr += width-1; + dir = -1; + dir3 = -3; + errorptr = cquantize->fserrors + (width+1)*3; /* => entry after last column */ + cquantize->on_odd_row = FALSE; /* flip for next time */ + } else { + /* work left to right in this row */ + dir = 1; + dir3 = 3; + errorptr = cquantize->fserrors; /* => entry before first real column */ + cquantize->on_odd_row = TRUE; /* flip for next time */ + } + /* Preset error values: no error propagated to first pixel from left */ + cur0 = cur1 = cur2 = 0; + /* and no error propagated to row below yet */ + belowerr0 = belowerr1 = belowerr2 = 0; + bpreverr0 = bpreverr1 = bpreverr2 = 0; + + for (col = width; col > 0; col--) { + /* curN holds the error propagated from the previous pixel on the + * current line. Add the error propagated from the previous line + * to form the complete error correction term for this pixel, and + * round the error term (which is expressed * 16) to an integer. + * RIGHT_SHIFT rounds towards minus infinity, so adding 8 is correct + * for either sign of the error value. + * Note: errorptr points to *previous* column's array entry. + */ + cur0 = RIGHT_SHIFT(cur0 + errorptr[dir3+0] + 8, 4); + cur1 = RIGHT_SHIFT(cur1 + errorptr[dir3+1] + 8, 4); + cur2 = RIGHT_SHIFT(cur2 + errorptr[dir3+2] + 8, 4); + /* Limit the error using transfer function set by init_error_limit. + * See comments with init_error_limit for rationale. + */ + cur0 = error_limit[cur0]; + cur1 = error_limit[cur1]; + cur2 = error_limit[cur2]; + /* Form pixel value + error, and range-limit to 0..MAXJSAMPLE. + * The maximum error is +- MAXJSAMPLE (or less with error limiting); + * this sets the required size of the range_limit array. + */ + cur0 += GETJSAMPLE(inptr[0]); + cur1 += GETJSAMPLE(inptr[1]); + cur2 += GETJSAMPLE(inptr[2]); + cur0 = GETJSAMPLE(range_limit[cur0]); + cur1 = GETJSAMPLE(range_limit[cur1]); + cur2 = GETJSAMPLE(range_limit[cur2]); + /* Index into the cache with adjusted pixel value */ + cachep = & histogram[cur0>>C0_SHIFT][cur1>>C1_SHIFT][cur2>>C2_SHIFT]; + /* If we have not seen this color before, find nearest colormap */ + /* entry and update the cache */ + if (*cachep == 0) + fill_inverse_cmap(cinfo, cur0>>C0_SHIFT,cur1>>C1_SHIFT,cur2>>C2_SHIFT); + /* Now emit the colormap index for this cell */ + { register int pixcode = *cachep - 1; + *outptr = (JSAMPLE) pixcode; + /* Compute representation error for this pixel */ + cur0 -= GETJSAMPLE(colormap0[pixcode]); + cur1 -= GETJSAMPLE(colormap1[pixcode]); + cur2 -= GETJSAMPLE(colormap2[pixcode]); + } + /* Compute error fractions to be propagated to adjacent pixels. + * Add these into the running sums, and simultaneously shift the + * next-line error sums left by 1 column. + */ + { register LOCFSERROR bnexterr, delta; + + bnexterr = cur0; /* Process component 0 */ + delta = cur0 * 2; + cur0 += delta; /* form error * 3 */ + errorptr[0] = (FSERROR) (bpreverr0 + cur0); + cur0 += delta; /* form error * 5 */ + bpreverr0 = belowerr0 + cur0; + belowerr0 = bnexterr; + cur0 += delta; /* form error * 7 */ + bnexterr = cur1; /* Process component 1 */ + delta = cur1 * 2; + cur1 += delta; /* form error * 3 */ + errorptr[1] = (FSERROR) (bpreverr1 + cur1); + cur1 += delta; /* form error * 5 */ + bpreverr1 = belowerr1 + cur1; + belowerr1 = bnexterr; + cur1 += delta; /* form error * 7 */ + bnexterr = cur2; /* Process component 2 */ + delta = cur2 * 2; + cur2 += delta; /* form error * 3 */ + errorptr[2] = (FSERROR) (bpreverr2 + cur2); + cur2 += delta; /* form error * 5 */ + bpreverr2 = belowerr2 + cur2; + belowerr2 = bnexterr; + cur2 += delta; /* form error * 7 */ + } + /* At this point curN contains the 7/16 error value to be propagated + * to the next pixel on the current line, and all the errors for the + * next line have been shifted over. We are therefore ready to move on. + */ + inptr += dir3; /* Advance pixel pointers to next column */ + outptr += dir; + errorptr += dir3; /* advance errorptr to current column */ + } + /* Post-loop cleanup: we must unload the final error values into the + * final fserrors[] entry. Note we need not unload belowerrN because + * it is for the dummy column before or after the actual array. + */ + errorptr[0] = (FSERROR) bpreverr0; /* unload prev errs into array */ + errorptr[1] = (FSERROR) bpreverr1; + errorptr[2] = (FSERROR) bpreverr2; + } +} + + +/* + * Initialize the error-limiting transfer function (lookup table). + * The raw F-S error computation can potentially compute error values of up to + * +- MAXJSAMPLE. But we want the maximum correction applied to a pixel to be + * much less, otherwise obviously wrong pixels will be created. (Typical + * effects include weird fringes at color-area boundaries, isolated bright + * pixels in a dark area, etc.) The standard advice for avoiding this problem + * is to ensure that the "corners" of the color cube are allocated as output + * colors; then repeated errors in the same direction cannot cause cascading + * error buildup. However, that only prevents the error from getting + * completely out of hand; Aaron Giles reports that error limiting improves + * the results even with corner colors allocated. + * A simple clamping of the error values to about +- MAXJSAMPLE/8 works pretty + * well, but the smoother transfer function used below is even better. Thanks + * to Aaron Giles for this idea. + */ + +LOCAL(void) +init_error_limit (j_decompress_ptr cinfo) +/* Allocate and fill in the error_limiter table */ +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + int * table; + int in, out; + + table = (int *) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, (MAXJSAMPLE*2+1) * SIZEOF(int)); + table += MAXJSAMPLE; /* so can index -MAXJSAMPLE .. +MAXJSAMPLE */ + cquantize->error_limiter = table; + +#define STEPSIZE ((MAXJSAMPLE+1)/16) + /* Map errors 1:1 up to +- MAXJSAMPLE/16 */ + out = 0; + for (in = 0; in < STEPSIZE; in++, out++) { + table[in] = out; table[-in] = -out; + } + /* Map errors 1:2 up to +- 3*MAXJSAMPLE/16 */ + for (; in < STEPSIZE*3; in++, out += (in&1) ? 0 : 1) { + table[in] = out; table[-in] = -out; + } + /* Clamp the rest to final out value (which is (MAXJSAMPLE+1)/8) */ + for (; in <= MAXJSAMPLE; in++) { + table[in] = out; table[-in] = -out; + } +#undef STEPSIZE +} + + +/* + * Finish up at the end of each pass. + */ + +METHODDEF(void) +finish_pass1 (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + + /* Select the representative colors and fill in cinfo->colormap */ + cinfo->colormap = cquantize->sv_colormap; + select_colors(cinfo, cquantize->desired); + /* Force next pass to zero the color index table */ + cquantize->needs_zeroed = TRUE; +} + + +METHODDEF(void) +finish_pass2 (j_decompress_ptr cinfo) +{ + /* no work */ +} + + +/* + * Initialize for each processing pass. + */ + +METHODDEF(void) +start_pass_2_quant (j_decompress_ptr cinfo, boolean is_pre_scan) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + hist3d histogram = cquantize->histogram; + int i; + + /* Only F-S dithering or no dithering is supported. */ + /* If user asks for ordered dither, give him F-S. */ + if (cinfo->dither_mode != JDITHER_NONE) + cinfo->dither_mode = JDITHER_FS; + + if (is_pre_scan) { + /* Set up method pointers */ + cquantize->pub.color_quantize = prescan_quantize; + cquantize->pub.finish_pass = finish_pass1; + cquantize->needs_zeroed = TRUE; /* Always zero histogram */ + } else { + /* Set up method pointers */ + if (cinfo->dither_mode == JDITHER_FS) + cquantize->pub.color_quantize = pass2_fs_dither; + else + cquantize->pub.color_quantize = pass2_no_dither; + cquantize->pub.finish_pass = finish_pass2; + + /* Make sure color count is acceptable */ + i = cinfo->actual_number_of_colors; + if (i < 1) + ERREXIT1(cinfo, JERR_QUANT_FEW_COLORS, 1); + if (i > MAXNUMCOLORS) + ERREXIT1(cinfo, JERR_QUANT_MANY_COLORS, MAXNUMCOLORS); + + if (cinfo->dither_mode == JDITHER_FS) { + size_t arraysize = (size_t) ((cinfo->output_width + 2) * + (3 * SIZEOF(FSERROR))); + /* Allocate Floyd-Steinberg workspace if we didn't already. */ + if (cquantize->fserrors == NULL) + cquantize->fserrors = (FSERRPTR) (*cinfo->mem->alloc_large) + ((j_common_ptr) cinfo, JPOOL_IMAGE, arraysize); + /* Initialize the propagated errors to zero. */ + FMEMZERO((void FAR *) cquantize->fserrors, arraysize); + /* Make the error-limit table if we didn't already. */ + if (cquantize->error_limiter == NULL) + init_error_limit(cinfo); + cquantize->on_odd_row = FALSE; + } + + } + /* Zero the histogram or inverse color map, if necessary */ + if (cquantize->needs_zeroed) { + for (i = 0; i < HIST_C0_ELEMS; i++) { + FMEMZERO((void FAR *) histogram[i], + HIST_C1_ELEMS*HIST_C2_ELEMS * SIZEOF(histcell)); + } + cquantize->needs_zeroed = FALSE; + } +} + + +/* + * Switch to a new external colormap between output passes. + */ + +METHODDEF(void) +new_color_map_2_quant (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize; + + /* Reset the inverse color map */ + cquantize->needs_zeroed = TRUE; +} + + +/* + * Module initialization routine for 2-pass color quantization. + */ + +GLOBAL(void) +jinit_2pass_quantizer (j_decompress_ptr cinfo) +{ + my_cquantize_ptr cquantize; + int i; + + cquantize = (my_cquantize_ptr) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, + SIZEOF(my_cquantizer)); + cinfo->cquantize = (struct jpeg_color_quantizer *) cquantize; + cquantize->pub.start_pass = start_pass_2_quant; + cquantize->pub.new_color_map = new_color_map_2_quant; + cquantize->fserrors = NULL; /* flag optional arrays not allocated */ + cquantize->error_limiter = NULL; + + /* Make sure jdmaster didn't give me a case I can't handle */ + if (cinfo->out_color_components != 3) + ERREXIT(cinfo, JERR_NOTIMPL); + + /* Allocate the histogram/inverse colormap storage */ + cquantize->histogram = (hist3d) (*cinfo->mem->alloc_small) + ((j_common_ptr) cinfo, JPOOL_IMAGE, HIST_C0_ELEMS * SIZEOF(hist2d)); + for (i = 0; i < HIST_C0_ELEMS; i++) { + cquantize->histogram[i] = (hist2d) (*cinfo->mem->alloc_large) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + HIST_C1_ELEMS*HIST_C2_ELEMS * SIZEOF(histcell)); + } + cquantize->needs_zeroed = TRUE; /* histogram is garbage now */ + + /* Allocate storage for the completed colormap, if required. + * We do this now since it is FAR storage and may affect + * the memory manager's space calculations. + */ + if (cinfo->enable_2pass_quant) { + /* Make sure color count is acceptable */ + int desired = cinfo->desired_number_of_colors; + /* Lower bound on # of colors ... somewhat arbitrary as long as > 0 */ + if (desired < 8) + ERREXIT1(cinfo, JERR_QUANT_FEW_COLORS, 8); + /* Make sure colormap indexes can be represented by JSAMPLEs */ + if (desired > MAXNUMCOLORS) + ERREXIT1(cinfo, JERR_QUANT_MANY_COLORS, MAXNUMCOLORS); + cquantize->sv_colormap = (*cinfo->mem->alloc_sarray) + ((j_common_ptr) cinfo,JPOOL_IMAGE, (JDIMENSION) desired, (JDIMENSION) 3); + cquantize->desired = desired; + } else + cquantize->sv_colormap = NULL; + + /* Only F-S dithering or no dithering is supported. */ + /* If user asks for ordered dither, give him F-S. */ + if (cinfo->dither_mode != JDITHER_NONE) + cinfo->dither_mode = JDITHER_FS; + + /* Allocate Floyd-Steinberg workspace if necessary. + * This isn't really needed until pass 2, but again it is FAR storage. + * Although we will cope with a later change in dither_mode, + * we do not promise to honor max_memory_to_use if dither_mode changes. + */ + if (cinfo->dither_mode == JDITHER_FS) { + cquantize->fserrors = (FSERRPTR) (*cinfo->mem->alloc_large) + ((j_common_ptr) cinfo, JPOOL_IMAGE, + (size_t) ((cinfo->output_width + 2) * (3 * SIZEOF(FSERROR)))); + /* Might as well create the error-limiting table too. */ + init_error_limit(cinfo); + } +} + +#endif /* QUANT_2PASS_SUPPORTED */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jutils.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jutils.c new file mode 100644 index 000000000..5b16b6d03 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LibJPEG/source/jutils.c @@ -0,0 +1,227 @@ +/* + * jutils.c + * + * Copyright (C) 1991-1996, Thomas G. Lane. + * Modified 2009-2011 by Guido Vollbeding. + * This file is part of the Independent JPEG Group's software. + * For conditions of distribution and use, see the accompanying README file. + * + * This file contains tables and miscellaneous utility routines needed + * for both compression and decompression. + * Note we prefix all global names with "j" to minimize conflicts with + * a surrounding application. + */ + +#define JPEG_INTERNALS +#include "jinclude.h" +#include "jpeglib.h" + + +/* + * jpeg_zigzag_order[i] is the zigzag-order position of the i'th element + * of a DCT block read in natural order (left to right, top to bottom). + */ + +#if 0 /* This table is not actually needed in v6a */ + +const int jpeg_zigzag_order[DCTSIZE2] = { + 0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63 +}; + +#endif + +/* + * jpeg_natural_order[i] is the natural-order position of the i'th element + * of zigzag order. + * + * When reading corrupted data, the Huffman decoders could attempt + * to reference an entry beyond the end of this array (if the decoded + * zero run length reaches past the end of the block). To prevent + * wild stores without adding an inner-loop test, we put some extra + * "63"s after the real entries. This will cause the extra coefficient + * to be stored in location 63 of the block, not somewhere random. + * The worst case would be a run-length of 15, which means we need 16 + * fake entries. + */ + +const int jpeg_natural_order[DCTSIZE2+16] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order7[7*7+16] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 14, 21, 28, 35, + 42, 49, 50, 43, 36, 29, 22, 30, + 37, 44, 51, 52, 45, 38, 46, 53, + 54, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order6[6*6+16] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 41, 34, 27, + 20, 13, 21, 28, 35, 42, 43, 36, + 29, 37, 44, 45, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order5[5*5+16] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 12, + 19, 26, 33, 34, 27, 20, 28, 35, + 36, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order4[4*4+16] = { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 25, 18, 11, 19, 26, 27, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order3[3*3+16] = { + 0, 1, 8, 16, 9, 2, 10, 17, + 18, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + +const int jpeg_natural_order2[2*2+16] = { + 0, 1, 8, 9, + 63, 63, 63, 63, 63, 63, 63, 63, /* extra entries for safety in decoder */ + 63, 63, 63, 63, 63, 63, 63, 63 +}; + + +/* + * Arithmetic utilities + */ + +GLOBAL(long) +jdiv_round_up (long a, long b) +/* Compute a/b rounded up to next integer, ie, ceil(a/b) */ +/* Assumes a >= 0, b > 0 */ +{ + return (a + b - 1L) / b; +} + + +GLOBAL(long) +jround_up (long a, long b) +/* Compute a rounded up to next multiple of b, ie, ceil(a/b)*b */ +/* Assumes a >= 0, b > 0 */ +{ + a += b - 1L; + return a - (a % b); +} + + +/* On normal machines we can apply MEMCOPY() and MEMZERO() to sample arrays + * and coefficient-block arrays. This won't work on 80x86 because the arrays + * are FAR and we're assuming a small-pointer memory model. However, some + * DOS compilers provide far-pointer versions of memcpy() and memset() even + * in the small-model libraries. These will be used if USE_FMEM is defined. + * Otherwise, the routines below do it the hard way. (The performance cost + * is not all that great, because these routines aren't very heavily used.) + */ + +#ifndef NEED_FAR_POINTERS /* normal case, same as regular macro */ +#define FMEMCOPY(dest,src,size) MEMCOPY(dest,src,size) +#else /* 80x86 case, define if we can */ +#ifdef USE_FMEM +#define FMEMCOPY(dest,src,size) _fmemcpy((void FAR *)(dest), (const void FAR *)(src), (size_t)(size)) +#else +/* This function is for use by the FMEMZERO macro defined in jpegint.h. + * Do not call this function directly, use the FMEMZERO macro instead. + */ +GLOBAL(void) +jzero_far (void FAR * target, size_t bytestozero) +/* Zero out a chunk of FAR memory. */ +/* This might be sample-array data, block-array data, or alloc_large data. */ +{ + register char FAR * ptr = (char FAR *) target; + register size_t count; + + for (count = bytestozero; count > 0; count--) { + *ptr++ = 0; + } +} +#endif +#endif + + +GLOBAL(void) +jcopy_sample_rows (JSAMPARRAY input_array, int source_row, + JSAMPARRAY output_array, int dest_row, + int num_rows, JDIMENSION num_cols) +/* Copy some rows of samples from one place to another. + * num_rows rows are copied from input_array[source_row++] + * to output_array[dest_row++]; these areas may overlap for duplication. + * The source and destination arrays must be at least as wide as num_cols. + */ +{ + register JSAMPROW inptr, outptr; +#ifdef FMEMCOPY + register size_t count = (size_t) (num_cols * SIZEOF(JSAMPLE)); +#else + register JDIMENSION count; +#endif + register int row; + + input_array += source_row; + output_array += dest_row; + + for (row = num_rows; row > 0; row--) { + inptr = *input_array++; + outptr = *output_array++; +#ifdef FMEMCOPY + FMEMCOPY(outptr, inptr, count); +#else + for (count = num_cols; count > 0; count--) + *outptr++ = *inptr++; /* needn't bother with GETJSAMPLE() here */ +#endif + } +} + + +GLOBAL(void) +jcopy_block_row (JBLOCKROW input_row, JBLOCKROW output_row, + JDIMENSION num_blocks) +/* Copy a row of coefficient blocks from one place to another. */ +{ +#ifdef FMEMCOPY + FMEMCOPY(output_row, input_row, num_blocks * (DCTSIZE2 * SIZEOF(JCOEF))); +#else + register JCOEFPTR inptr, outptr; + register long count; + + inptr = (JCOEFPTR) input_row; + outptr = (JCOEFPTR) output_row; + for (count = (long) num_blocks * DCTSIZE2; count > 0; count--) { + *outptr++ = *inptr++; + } +#endif +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 000000000..e03b8b745 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1367 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +#if TCP_LISTEN_BACKLOG +/* need to allocate API message for accept so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) API_MSG_VAR_ALLOC(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) API_MSG_VAR_FREE(msg) +#else /* TCP_LISTEN_BACKLOG */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_RECVMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->recvmbox) && (((conn)->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & (NETCONN_FLAG_MBOXCLOSED|NETCONN_FLAG_MBOXINVALID)) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) SYS_ARCH_INC(conn->mbox_threads_waiting, 1) +#define NETCONN_MBOX_WAITING_DEC(conn) SYS_ARCH_DEC(conn->mbox_threads_waiting, 1) +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define NETCONN_RECVMBOX_WAITABLE(conn) sys_mbox_valid(&(conn)->recvmbox) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & NETCONN_FLAG_MBOXCLOSED) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) +#define NETCONN_MBOX_WAITING_DEC(conn) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free all its resources but not the netconn itself. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_prepare_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + return ERR_OK; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + /* Already called netconn_prepare_delete() before */ + err = ERR_OK; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err = netconn_prepare_delete(conn); + } + if (err == ERR_OK) { + netconn_free(conn); + } + return err; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific interface and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param if_idx the local interface index to bind the netconn to + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind_if(struct netconn *conn, u8_t if_idx) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind_if: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.if_idx = if_idx; + err = netconn_apimsg(lwip_netconn_do_bind_if, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + err_t err; + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + /* NOTE: Although the opengroup spec says a pending error shall be returned to + send/recv/getsockopt(SO_ERROR) only, we return it for listening + connections also, to handle embedded-system errors */ + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (!NETCONN_ACCEPTMBOX_WAITABLE(conn)) { + /* don't accept if closed: this might block the application task + waiting on acceptmbox forever! */ + return ERR_CLSD; + } + + API_MSG_VAR_ALLOC_ACCEPT(msg); + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn)) { + if (sys_arch_mbox_tryfetch(&conn->acceptmbox, &accept_ptr) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(accept_ptr)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (lwip_netconn_is_err_msg(accept_ptr, &err)) { + /* a connection has been aborted: e.g. out of pcbs or out of netconns during accept */ + API_MSG_VAR_FREE_ACCEPT(msg); + return err; + } + if (accept_ptr == NULL) { + /* connection has been aborted */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CLSD; + } + newconn = (struct netconn *)accept_ptr; +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received (this is internal, it's just here for describing common errors) + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_CONN if not connected + * ERR_CLSD if TCP connection has been closed + * ERR_WOULDBLOCK if the netconn is nonblocking but would block to wait for data + * ERR_TIMEOUT if the netconn has a receive timeout and no data was received + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf, u8_t apiflags) +{ + void *buf = NULL; + u16_t len; + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + err_t err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + return ERR_CONN; + } + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK) || + (conn->flags & NETCONN_FLAG_MBOXCLOSED) || (conn->pending_err != ERR_OK)) { + if (sys_arch_mbox_tryfetch(&conn->recvmbox, &buf) == SYS_ARCH_TIMEOUT) { + err_t err; + NETCONN_MBOX_WAITING_DEC(conn); + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (conn->flags & NETCONN_FLAG_MBOXCLOSED) { + return ERR_CONN; + } + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(buf)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + err_t err; + /* Check if this is an error message or a pbuf */ + if (lwip_netconn_is_err_msg(buf, &err)) { + /* new_buf has been zeroed above already */ + if (err == ERR_CLSD) { + /* connection closed translates to ERR_OK with *new_buf == NULL */ + return ERR_OK; + } + return err; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf *)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +#if LWIP_TCP +static err_t +netconn_tcp_recvd_msg(struct netconn *conn, size_t len, struct api_msg *msg) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + msg->conn = conn; + msg->msg.r.len = len; + + return netconn_apimsg(lwip_netconn_do_recv, msg); +} + +err_t +netconn_tcp_recvd(struct netconn *conn, size_t len) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + err = netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + return err; +} + +static err_t +netconn_recv_data_tcp(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + err_t err; + struct pbuf *buf; + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + /* This only happens when calling this function more than once *after* receiving FIN */ + return ERR_CONN; + } + if (netconn_is_flag_set(conn, NETCONN_FIN_RX_PENDING)) { + netconn_clear_flags(conn, NETCONN_FIN_RX_PENDING); + goto handle_fin; + } + + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* need to allocate API message here so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ + API_MSG_VAR_ALLOC(msg); + } + + err = netconn_recv_data(conn, (void **)new_buf, apiflags); + if (err != ERR_OK) { + if (!(apiflags & NETCONN_NOAUTORCVD)) { + API_MSG_VAR_FREE(msg); + } + return err; + } + buf = *new_buf; + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* Let the stack know that we have taken the data. */ + u16_t len = buf ? buf->tot_len : 1; + /* don't care for the return value of lwip_netconn_do_recv */ + /* @todo: this should really be fixed, e.g. by retrying in poll on error */ + netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + } + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + if (apiflags & NETCONN_NOFIN) { + /* received a FIN but the caller cannot handle it right now: + re-enqueue it and return "no data" */ + netconn_set_flags(conn, NETCONN_FIN_RX_PENDING); + return ERR_WOULDBLOCK; + } else { +handle_fin: + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + err = netconn_err(conn); + if (err != ERR_OK) { + return err; + } + return ERR_RST; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + } + return err; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, 0); +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, apiflags); +} +#endif /* LWIP_TCP */ + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, 0); +} + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, apiflags); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data_tcp(conn, &p, 0); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf, 0); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + struct netvector vector; + vector.ptr = dataptr; + vector.len = size; + return netconn_write_vectors_partly(conn, &vector, 1, apiflags, bytes_written); +} + +/** + * Send vectorized data atomically over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param vectors array of vectors containing data to send + * @param vectorcnt number of vectors in the array + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + size_t size; + int i; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP), return ERR_VAL;); + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + /* sum up the total size */ + size = 0; + for (i = 0; i < vectorcnt; i++) { + size += vectors[i].len; + if (size < vectors[i].len) { + /* overflow */ + return ERR_VAL; + } + } + if (size == 0) { + return ERR_OK; + } else if (size > SSIZE_MAX) { + ssize_t limited; + /* this is required by the socket layer (cannot send full size_t range) */ + if (!bytes_written) { + return ERR_VAL; + } + /* limit the amount of data to send */ + limited = SSIZE_MAX; + size = (size_t)limited; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.vector = vectors; + API_MSG_VAR_REF(msg).msg.w.vector_cnt = vectorcnt; + API_MSG_VAR_REF(msg).msg.w.vector_off = 0; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; + API_MSG_VAR_REF(msg).msg.w.offset = 0; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if (err == ERR_OK) { + if (bytes_written != NULL) { + *bytes_written = API_MSG_VAR_REF(msg).msg.w.offset; + } + /* for blocking, check all requested bytes were written, NOTE: send_timeout is + treated as dontblock (see dontblock assignment above) */ + if (!dontblock) { + LWIP_ASSERT("do_write failed to write all bytes", API_MSG_VAR_REF(msg).msg.w.offset == size); + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_common + * Get and reset pending error on a netconn + * + * @param conn the netconn to get the error from + * @return and pending error or ERR_OK if no error was pending + */ +err_t +netconn_err(struct netconn *conn) +{ + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + if (conn == NULL) { + return ERR_OK; + } + SYS_ARCH_PROTECT(lev); + err = conn->pending_err; + conn->pending_err = ERR_OK; + SYS_ARCH_UNPROTECT(lev); + return err; +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (u8_t)((shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0))); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param if_idx the index of the netif + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group_netif(struct netconn *conn, + const ip_addr_t *multiaddr, + u8_t if_idx, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (if_idx == NETIF_NO_INDEX) { + return ERR_IF; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.if_idx = if_idx; + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group_netif, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + +#ifdef LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, dns_addrtype, &err)) { +#else + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, NETCONN_DNS_DEFAULT, &err)) { +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return err; + } +#endif /* LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE */ + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH - 1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH - 1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_send_msg_wait_sem(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg), API_EXPR_REF(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + if (cberr != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 000000000..39531024e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,2173 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) netconn_is_flag_set(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT) + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_MBOX_VALID(conn, mbox) (sys_mbox_valid(mbox) && ((conn->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#else +#define NETCONN_MBOX_VALID(conn, mbox) sys_mbox_valid(mbox) +#endif + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +static void netconn_drain(struct netconn *conn); + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_NETCONN_FULLDUPLEX +const u8_t netconn_deleted = 0; + +int +lwip_netconn_is_deallocated_msg(void *msg) +{ + if (msg == &netconn_deleted) { + return 1; + } + return 0; +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +const u8_t netconn_aborted = 0; +const u8_t netconn_reset = 0; +const u8_t netconn_closed = 0; + +/** Translate an error to a unique void* passed via an mbox */ +static void * +lwip_netconn_err_to_msg(err_t err) +{ + switch (err) { + case ERR_ABRT: + return LWIP_CONST_CAST(void *, &netconn_aborted); + case ERR_RST: + return LWIP_CONST_CAST(void *, &netconn_reset); + case ERR_CLSD: + return LWIP_CONST_CAST(void *, &netconn_closed); + default: + LWIP_ASSERT("unhandled error", err == ERR_OK); + return NULL; + } +} + +int +lwip_netconn_is_err_msg(void *msg, err_t *err) +{ + LWIP_ASSERT("err != NULL", err != NULL); + + if (msg == &netconn_aborted) { + *err = ERR_ABRT; + return 1; + } else if (msg == &netconn_reset) { + *err = ERR_RST; + return 1; + } else if (msg == &netconn_closed) { + *err = ERR_CLSD; + return 1; + } + return 0; +} +#endif /* LWIP_TCP */ + + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + if (conn->flags & NETCONN_FLAG_PKTINFO) { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr *udphdr = (const struct udp_hdr *)ip_next_header_ptr(); + buf->flags = NETBUF_FLAG_DESTADDR; + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + void *msg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + LWIP_ASSERT("err != ERR_OK unhandled", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + if (p != NULL) { + msg = p; + len = p->tot_len; + } else { + msg = LWIP_CONST_CAST(void *, &netconn_closed); + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, msg) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + void *mbox_msg; + SYS_ARCH_DECL_PROTECT(lev); + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + SYS_ARCH_PROTECT(lev); + + /* when err is called, the pcb is deallocated, so delete the reference */ + conn->pcb.tcp = NULL; + /* store pending error */ + conn->pending_err = err; + /* prevent application threads from blocking on 'recvmbox'/'acceptmbox' */ + conn->flags |= NETCONN_FLAG_MBOXCLOSED; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + SYS_ARCH_UNPROTECT(lev); + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + mbox_msg = lwip_netconn_err_to_msg(err); + /* pass error message to recvmbox to wake up pending recv */ + if (NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, mbox_msg); + } + /* pass error message to acceptmbox to wake up pending accept */ + if (NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, mbox_msg); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t *op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + if (old_state == NETCONN_CLOSE) { + /* let close succeed: the connection is closed after all... */ + conn->current_msg->err = ERR_OK; + } else { + /* Write and connect fail */ + conn->current_msg->err = err; + } + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + sys_sem_signal(op_completed_sem); + } else { + /* @todo: test what happens for error on nonblocking connect */ + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + LWIP_ASSERT("expect newpcb == NULL or err == ERR_OK", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->state: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb *pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if (NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + u8_t init_flags = 0; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->pending_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch (NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; +#if LWIP_NETBUF_RECVINFO + init_flags |= NETCONN_FLAG_PKTINFO; +#endif /* LWIP_NETBUF_RECVINFO */ + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = init_flags; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + +#if LWIP_NETCONN_FULLDUPLEX + /* in fullduplex, netconn is drained here */ + netconn_drain(conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; + + /* This runs when mbox and netconn are marked as closed, + so we don't need to lock against rx packets */ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("netconn marked closed", conn->flags & NETCONN_FLAG_MBOXINVALID); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + pbuf_free((struct pbuf *)mem); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_NETCONN_FULLDUPLEX +static void +netconn_mark_mbox_invalid(struct netconn *conn) +{ + int i, num_waiting; + void *msg = LWIP_CONST_CAST(void *, &netconn_deleted); + + /* Prevent new calls/threads from reading from the mbox */ + conn->flags |= NETCONN_FLAG_MBOXINVALID; + + SYS_ARCH_LOCKED(num_waiting = conn->mbox_threads_waiting); + for (i = 0; i < num_waiting; i++) { + if (sys_mbox_valid_val(conn->recvmbox)) { + sys_mbox_trypost(&conn->recvmbox, msg); + } else { + sys_mbox_trypost(&conn->acceptmbox, msg); + } + } +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, shut_close; + u8_t close_finished = 0; + struct tcp_pcb *tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + shut_close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + shut_close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + shut_close = 1; + } else { + shut_close = 0; + } + + /* Set back some callback pointers */ + if (shut_close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (shut_close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (shut_close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (shut_close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (shut_close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t *op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +/** + * Bind a pcb contained in a netconn to an interface + * Called from netconn_bind_if. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind_if(void *m) +{ + struct netif *netif; + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + netif = netif_get_by_index(msg->msg.bc.if_idx); + + if ((netif != NULL) && (msg->conn->pcb.tcp != NULL)) { + err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(msg->conn->pcb.raw, netif); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(msg->conn->pcb.udp, netif); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(msg->conn->pcb.tcp, netif); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t *op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do { + err = ERR_VAL; + } while (0)); + break; + } + } + msg->err = err; + /* For all other protocols, netconn_connect() calls netconn_apimsg(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb *lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + err = ERR_VAL; + } else { + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } else { + err = ERR_CONN; + } + } else { + err = ERR_ARG; + } + } else { + err = ERR_CONN; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + err = ERR_CONN; + break; + } + } else { + err = ERR_CONN; + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + size_t remaining = msg->msg.r.len; + do { + u16_t recved = (u16_t)((remaining > 0xffff) ? 0xffff : remaining); + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + u8_t write_more; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len", + conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len); + LWIP_ASSERT("conn->current_msg->msg.w.vector_cnt > 0", conn->current_msg->msg.w.vector_cnt > 0); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->current_msg->msg.w.offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + } else { + /* partial write */ + err = ERR_OK; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + do { + dataptr = (const u8_t *)conn->current_msg->msg.w.vector->ptr + conn->current_msg->msg.w.vector_off; + diff = conn->current_msg->msg.w.vector->len - conn->current_msg->msg.w.vector_off; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + /* set error according to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", + ((conn->current_msg->msg.w.vector_off + len) <= conn->current_msg->msg.w.vector->len)); + /* we should loop around for more sending in the following cases: + 1) We couldn't finish the current vector because of 16-bit size limitations. + tcp_write() and tcp_sndbuf() both are limited to 16-bit sizes + 2) We are sending the remainder of the current vector and have more */ + if ((len == 0xffff && diff > 0xffffUL) || + (len == (u16_t)diff && conn->current_msg->msg.w.vector_cnt > 1)) { + write_more = 1; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + write_more = 0; + } + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + if (err == ERR_OK) { + conn->current_msg->msg.w.offset += len; + conn->current_msg->msg.w.vector_off += len; + /* check if current vector is finished */ + if (conn->current_msg->msg.w.vector_off == conn->current_msg->msg.w.vector->len) { + conn->current_msg->msg.w.vector_cnt--; + /* if we have additional vectors, move on to them */ + if (conn->current_msg->msg.w.vector_cnt > 0) { + conn->current_msg->msg.w.vector++; + conn->current_msg->msg.w.vector_off = 0; + } + } + } + } while (write_more && err == ERR_OK); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + } + } + + if (err == ERR_OK) { + err_t out_err; + if ((conn->current_msg->msg.w.offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length (caller reads length from msg.w.offset) */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM, set error according + to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + write_finished = 1; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + err = ERR_CONN; + } +#else /* LWIP_TCP */ + err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t *write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group_netif + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group_netif(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + struct netif *netif; + + netif = netif_get_by_index(msg->msg.jl.if_idx); + if (netif == NULL) { + msg->err = ERR_IF; + goto done; + } + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + +done: + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); +#if LWIP_TCPIP_CORE_LOCKING + /* For core locking, only block if we need to wait for answer/timeout */ + if (API_EXPR_DEREF(msg->err) == ERR_INPROGRESS) { + UNLOCK_TCPIP_CORE(); + sys_sem_wait(API_EXPR_REF_SEM(msg->sem)); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("do_gethostbyname still in progress!!", API_EXPR_DEREF(msg->err) != ERR_INPROGRESS); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/err.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 000000000..dd2b62dab --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/if_api.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/if_api.c new file mode 100644 index 000000000..8e094d090 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/if_api.c @@ -0,0 +1,102 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + * + * @defgroup if_api Interface Identification API + * @ingroup socket + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#include "lwip/opt.h" + +#if LWIP_SOCKET + +#include "lwip/errno.h" +#include "lwip/if_api.h" +#include "lwip/netifapi.h" +#include "lwip/priv/sockets_priv.h" + +/** + * @ingroup if_api + * Maps an interface index to its corresponding name. + * @param ifindex interface index + * @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes + * @return If ifindex is an interface index, then the function shall return the + * value supplied in ifname, which points to a buffer now containing the interface name. + * Otherwise, the function shall return a NULL pointer. + */ +char * +lwip_if_indextoname(unsigned int ifindex, char *ifname) +{ +#if LWIP_NETIF_API + if (ifindex <= 0xff) { + err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname); + if (!err && ifname[0] != '\0') { + return ifname; + } + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifindex); + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + set_errno(ENXIO); + return NULL; +} + +/** + * @ingroup if_api + * Returs the interface index corresponding to name ifname. + * @param ifname Interface name + * @return The corresponding index if ifname is the name of an interface; + * otherwise, zero. + */ +unsigned int +lwip_if_nametoindex(const char *ifname) +{ +#if LWIP_NETIF_API + err_t err; + u8_t idx; + + err = netifapi_netif_name_to_index(ifname, &idx); + if (!err) { + return idx; + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + return 0; /* invalid index */ +} + +#endif /* LWIP_SOCKET */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 000000000..3b910de10 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,250 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf->flags = 0; + buf->toport_chksum = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom *)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netdb.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 000000000..87714259d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,414 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent * +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char **)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx = 0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf); + hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char **)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any_val(ai_family == AF_INET6, addr); + } else { + ip_addr_set_loopback_val(ai_family == AF_INET6, addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr)); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in *)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr *)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 000000000..25957cd5c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,380 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include /* strncpy */ + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** +* Call netif_name_to_index() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_name_to_index(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name); + return ERR_OK; +} + +/** +* Call netif_index_to_name() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_index_to_name(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) { + /* return failure via empty name */ + msg->msg.ifs.name[0] = '\0'; + } + return ERR_OK; +} + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +#if LWIP_ARP && LWIP_IPV4 +/** + * @ingroup netifapi_arp + * Add or update an entry in the ARP cache. + * For an update, ipaddr is used to find the cache entry. + * + * @param ipaddr IPv4 address of cache entry + * @param ethaddr hardware address mapped to ipaddr + * @param type type of ARP cache entry + * @return ERR_OK: entry added/updated, else error from err_t + */ +err_t +netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_add_static_entry(ipaddr, ethaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + LWIP_UNUSED_ARG(ethaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} + +/** + * @ingroup netifapi_arp + * Remove an entry in the ARP cache identified by ipaddr + * + * @param ipaddr IPv4 address of cache entry + * @param type type of ARP cache entry + * @return ERR_OK: entry removed, else error from err_t + */ +err_t +netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_remove_static_entry(ipaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} +#endif /* LWIP_ARP && LWIP_IPV4 */ + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_name_to_index() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param name the interface name of the netif +* @param idx output index of the found netif +*/ +err_t +netifapi_netif_name_to_index(const char *name, u8_t *idx) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + *idx = 0; + +#if LWIP_MPU_COMPATIBLE + strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1); + NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0'; +#else + NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name); +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call); + if (!err) { + *idx = NETIFAPI_VAR_REF(msg).msg.ifs.index; + } + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_index_to_name() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param idx the interface index of the netif +* @param name output name of the found netif, empty '\0' string if netif not found. +* name should be of at least NETIF_NAMESIZE bytes +*/ +err_t +netifapi_netif_index_to_name(u8_t idx, char *name) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).msg.ifs.index = idx; +#if !LWIP_MPU_COMPATIBLE + NETIFAPI_VAR_REF(msg).msg.ifs.name = name; +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call); +#if LWIP_MPU_COMPATIBLE + if (!err) { + strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1); + name[NETIF_NAMESIZE - 1] = '\0'; + } +#endif /* LWIP_MPU_COMPATIBLE */ + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/sockets.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 000000000..cb7df914f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,4160 @@ +/** + * @file + * Sockets BSD-Like API module + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/api.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/mld6.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +#include +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name) +#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name) +#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock) +#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name) + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \ + ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \ + } \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + done_socket(sock); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + u32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \ + ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000)) +#endif + + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +#if LWIP_IGMP +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_mld6_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface index */ + u8_t if_idx; + /** the group address */ + ip6_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_drop_registered_mld6_memberships(int s); +#endif /* LWIP_IPV6_MLD */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +#if LWIP_TCPIP_CORE_LOCKING +/* protect the select_cb_list using core lock */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE() +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE() +#else /* LWIP_TCPIP_CORE_LOCKING */ +/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +/** This counter is increased from lwip_select when the list is changed + and checked in select_check_waiters to see if it has changed. */ +static volatile int select_cb_ctr; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#define DEFAULT_SOCKET_EVENTCB event_callback +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent); +#else +#define DEFAULT_SOCKET_EVENTCB NULL +#endif +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); +static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata); +static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Thread-safe increment of sock->fd_used, with overflow check */ +static int +sock_inc_used(struct lwip_sock *sock) +{ + int ret; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + if (sock->fd_free_pending) { + /* prevent new usage of this socket if free is pending */ + ret = 0; + } else { + ++sock->fd_used; + ret = 1; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + } + SYS_ARCH_UNPROTECT(lev); + return ret; +} + +/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */ +static int +sock_inc_used_locked(struct lwip_sock *sock) +{ + LWIP_ASSERT("sock != NULL", sock != NULL); + + if (sock->fd_free_pending) { + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 0; + } + + ++sock->fd_used; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 1; +} + +/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being + * released (and possibly reused) when used from more than one thread + * (e.g. read-while-write or close-while-write, etc) + * This function is called at the end of functions using (try)get_socket*(). + */ +static void +done_socket(struct lwip_sock *sock) +{ + int freed = 0; + int is_tcp = 0; + struct netconn *conn = NULL; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + if (--sock->fd_used == 0) { + if (sock->fd_free_pending) { + /* free the socket */ + sock->fd_used = 1; + is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP; + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + } + } + SYS_ARCH_UNPROTECT(lev); + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define sock_inc_used(sock) 1 +#define sock_inc_used_locked(sock) 1 +#define done_socket(sock) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn_nouse(int fd) +{ + int s = fd - LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd)); + return NULL; + } + return &sockets[s]; +} + +struct lwip_sock * +lwip_socket_dbg_get_socket(int fd) +{ + return tryget_socket_unconn_nouse(fd); +} + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used(ret)) { + return NULL; + } + } + return ret; +} + +/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */ +static struct lwip_sock * +tryget_socket_unconn_locked(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used_locked(ret)) { + return NULL; + } + } + return ret; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket_unconn(fd); + if (sock != NULL) { + if (sock->conn) { + return sock; + } + done_socket(sock); + } + return NULL; +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket(fd); + if (!sock) { + if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd)); + } + set_errno(EBADF); + return NULL; + } + return sock; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(accepted); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn) { +#if LWIP_NETCONN_FULLDUPLEX + if (sockets[i].fd_used) { + SYS_ARCH_UNPROTECT(lev); + continue; + } + sockets[i].fd_used = 1; + sockets[i].fd_free_pending = 0; +#endif + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata.pbuf = NULL; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0); + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket (under lock) + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + * @param conn the socekt's netconn is stored here, must be freed externally + * @param lastdata lastdata is stored here, must be freed externally + */ +static int +free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata) +{ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + sock->fd_used--; + if (sock->fd_used > 0) { + sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0); + return 0; + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + LWIP_UNUSED_ARG(is_tcp); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + *lastdata = sock->lastdata; + sock->lastdata.pbuf = NULL; + *conn = sock->conn; + sock->conn = NULL; + return 1; +} + +/** Free a socket's leftover members. + */ +static void +free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata) +{ + if (lastdata->pbuf != NULL) { + if (is_tcp) { + pbuf_free(lastdata->pbuf); + } else { + netbuf_delete(lastdata->netbuf); + } + } + if (conn != NULL) { + /* netconn_prepare_delete() has already been called, here we only free the conn */ + netconn_delete(conn); + } +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + int freed; + struct netconn *conn; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + SYS_ARCH_UNPROTECT(lev); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + int recvevent; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + done_socket(sock); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + recvevent = (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + if (newconn->callback) { + LOCK_TCPIP_CORE(); + while (recvevent > 0) { + recvevent--; + newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0); + } + UNLOCK_TCPIP_CORE(); + } + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if ((addr != NULL) && (addrlen != NULL)) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + done_socket(sock); + done_socket(nsock); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ +#if LWIP_IPV6_MLD + /* drop all possibly joined MLD6 memberships */ + lwip_socket_drop_registered_mld6_memberships(s); +#endif /* LWIP_IPV6_MLD */ + + err = netconn_prepare_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +#if LWIP_TCP +/* Helper function to loop over receiving pbufs from netconn + * until "len" bytes are received or we're otherwise done. + * Keeps sock->lastdata for peeking or partly copying. + */ +static ssize_t +lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags) +{ + u8_t apiflags = NETCONN_NOAUTORCVD; + ssize_t recvd = 0; + ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX; + + LWIP_ASSERT("no socket given", sock != NULL); + LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP); + + if (flags & MSG_DONTWAIT) { + apiflags |= NETCONN_DONTBLOCK; + } + + do { + struct pbuf *p; + err_t err; + u16_t copylen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata.pbuf) { + p = sock->lastdata.pbuf; + } else { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n", + err, (void *)p)); + + if (err != ERR_OK) { + if (recvd > 0) { + /* already received data, return that (this trusts in getting the same error from + netconn layer again next time netconn_recv is called) */ + goto lwip_recv_tcp_done; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n", + lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("p != NULL", p != NULL); + sock->lastdata.pbuf = p; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n", + p->tot_len, (int)recv_left, (int)recvd)); + + if (recv_left > p->tot_len) { + copylen = p->tot_len; + } else { + copylen = (u16_t)recv_left; + } + if (recvd + copylen < recvd) { + /* overflow */ + copylen = (u16_t)(SSIZE_MAX - recvd); + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0); + + recvd += copylen; + + /* TCP combines multiple pbufs for one recv */ + LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen); + recv_left -= copylen; + + /* Unless we peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* ... check if there is data left in the pbuf */ + LWIP_ASSERT("invalid copylen", p->tot_len >= copylen); + if (p->tot_len - copylen > 0) { + /* If so, it should be saved in the sock structure for the next recv call. + We store the pbuf but hide/free the consumed data: */ + sock->lastdata.pbuf = pbuf_free_header(p, copylen); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf)); + } else { + sock->lastdata.pbuf = NULL; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p)); + pbuf_free(p); + } + } + /* once we have some data to return, only add more if we don't need to wait */ + apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN; + /* @todo: do we need to support peeking more than one pbuf? */ + } while ((recv_left > 0) && !(flags & MSG_PEEK)); +lwip_recv_tcp_done: + if ((recvd > 0) && !(flags & MSG_PEEK)) { + /* ensure window update after copying all data */ + netconn_tcp_recvd(sock->conn, (size_t)recvd); + } + sock_set_errno(sock, 0); + return recvd; +} +#endif + +/* Convert a netbuf's address data to struct sockaddr */ +static int +lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port, + struct sockaddr *from, socklen_t *fromlen) +{ + int truncated = 0; + union sockaddr_aligned saddr; + + LWIP_UNUSED_ARG(conn); + + LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL); + LWIP_ASSERT("from != NULL", from != NULL); + LWIP_ASSERT("fromlen != NULL", fromlen != NULL); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + if (*fromlen < saddr.sa.sa_len) { + truncated = 1; + } else if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + return truncated; +} + +#if LWIP_TCP +/* Helper function to get a tcp socket's remote address info */ +static int +lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret) +{ + if (sock == NULL) { + return 0; + } + LWIP_UNUSED_ARG(dbg_fn); + LWIP_UNUSED_ARG(dbg_s); + LWIP_UNUSED_ARG(dbg_ret); + +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + /* get remote addr/port from tcp_pcb */ + u16_t port; + ip_addr_t tmpaddr; + netconn_getaddr(sock->conn, &tmpaddr, &port, 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret)); + if (from && fromlen) { + return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen); + } + } + return 0; +} +#endif + +/* Helper function to receive a netbuf from a udp or raw netconn. + * Keeps sock->lastdata for peeking. + */ +static err_t +lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s) +{ + struct netbuf *buf; + u8_t apiflags; + err_t err; + u16_t buflen, copylen, copied; + int i; + + LWIP_UNUSED_ARG(dbg_s); + LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;); + + if (flags & MSG_DONTWAIT) { + apiflags = NETCONN_DONTBLOCK; + } else { + apiflags = 0; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf)); + /* Check if there is data left from the last recv operation. */ + buf = sock->lastdata.netbuf; + if (buf == NULL) { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n", + err, (void *)buf)); + + if (err != ERR_OK) { + return err; + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata.netbuf = buf; + } + buflen = buf->p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen)); + + copied = 0; + /* copy the pbuf payload into the iovs */ + for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) { + u16_t len_left = (u16_t)(buflen - copied); + if (msg->msg_iov[i].iov_len > len_left) { + copylen = len_left; + } else { + copylen = (u16_t)msg->msg_iov[i].iov_len; + } + + /* copy the contents of the received buffer into + the supplied memory buffer */ + pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied); + copied = (u16_t)(copied + copylen); + } + + /* Check to see from where the data was.*/ +#if !SOCKETS_DEBUG + if (msg->msg_name && msg->msg_namelen) +#endif /* !SOCKETS_DEBUG */ + { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf)); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied)); + if (msg->msg_name && msg->msg_namelen) { + lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf), + (struct sockaddr *)msg->msg_name, &msg->msg_namelen); + } + } + + /* Initialize flag output */ + msg->msg_flags = 0; + + if (msg->msg_control) { + u8_t wrote_msg = 0; +#if LWIP_NETBUF_RECVINFO + /* Check if packet info was recorded */ + if (buf->flags & NETBUF_FLAG_DESTADDR) { + if (IP_IS_V4(&buf->toaddr)) { +#if LWIP_IPV4 + if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) { + struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */ + struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr); + chdr->cmsg_level = IPPROTO_IP; + chdr->cmsg_type = IP_PKTINFO; + chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pkti->ipi_ifindex = buf->p->if_idx; + inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf))); + msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo)); + wrote_msg = 1; + } else { + msg->msg_flags |= MSG_CTRUNC; + } +#endif /* LWIP_IPV4 */ + } + } +#endif /* LWIP_NETBUF_RECVINFO */ + + if (!wrote_msg) { + msg->msg_controllen = 0; + } + } + + /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */ + if ((flags & MSG_PEEK) == 0) { + sock->lastdata.netbuf = NULL; + netbuf_delete(buf); + } + if (datagram_len) { + *datagram_len = buflen; + } + return ERR_OK; +} + +ssize_t +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + ssize_t ret; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } +#if LWIP_TCP + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + ret = lwip_recv_tcp(sock, mem, len, flags); + lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret); + done_socket(sock); + return ret; + } else +#endif + { + u16_t datagram_len = 0; + struct iovec vec; + struct msghdr msg; + err_t err; + vec.iov_base = mem; + vec.iov_len = len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_name = from; + msg.msg_namelen = (fromlen ? *fromlen : 0); + err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX); + if (fromlen) { + *fromlen = msg.msg_namelen; + } + } + + sock_set_errno(sock, 0); + done_socket(sock); + return ret; +} + +ssize_t +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +ssize_t +lwip_readv(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_recvmsg(s, &msg, 0); +} + +ssize_t +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +ssize_t +lwip_recvmsg(int s, struct msghdr *message, int flags) +{ + struct lwip_sock *sock; + int i; + ssize_t buflen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags)); + LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;); + LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0, + set_errno(EOPNOTSUPP); return -1;); + + if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) { + set_errno(EMSGSIZE); + return -1; + } + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* check for valid vectors */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) || + ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) || + ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) { + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len); + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + int recv_flags = flags; + message->msg_flags = 0; + /* recv the data */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + /* try to receive into this vector's buffer */ + ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags); + if (recvd_local > 0) { + /* sum up received bytes */ + buflen += recvd_local; + } + if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) || + (flags & MSG_PEEK)) { + /* returned prematurely (or peeking, which might actually be limitated to the first iov) */ + if (buflen <= 0) { + /* nothing received at all, propagate the error */ + buflen = recvd_local; + } + break; + } + /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */ + recv_flags |= MSG_DONTWAIT; + } + if (buflen > 0) { + /* reset socket error since we have received something */ + sock_set_errno(sock, 0); + } + /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */ + done_socket(sock); + return buflen; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + u16_t datagram_len = 0; + err_t err; + err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + if (datagram_len > buflen) { + message->msg_flags |= MSG_TRUNC; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return (int)datagram_len; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + done_socket(sock); + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +} + +ssize_t +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX), + sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0, + sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + + written = 0; + err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf chain_buf; + int i; + ssize_t size = 0; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + /* initialize chain buffer with destination */ + memset(&chain_buf, 0, sizeof(struct netbuf)); + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port); + netbuf_fromport(&chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) { + /* overflow */ + goto sendmsg_emsgsize; + } + } + if (size > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf.p); + netbuf_set_chksum(&chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p; + if (msg->msg_iov[i].iov_len > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf.p == NULL) { + chain_buf.p = chain_buf.ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + if (chain_buf.p->tot_len + p->len > 0xffff) { + /* overflow */ + pbuf_free(p); + goto sendmsg_emsgsize; + } + pbuf_cat(chain_buf.p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(&chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr)); + IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &chain_buf); + } + + /* deallocated the buffer */ + netbuf_free(&chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? size : -1); +sendmsg_emsgsize: + sock_set_errno(sock, EMSGSIZE); + netbuf_free(&chain_buf); + done_socket(sock); + return -1; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + done_socket(sock); + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + + if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) { + /* cannot fit into one datagram (at least for us) */ + sock_set_errno(sock, EMSGSIZE); + done_socket(sock); + return -1; + } + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)), + DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); +#if LWIP_NETBUF_RECVINFO + if (conn) { + /* netconn layer enables pktinfo by default, sockets default to off */ + conn->flags &= ~NETCONN_FLAG_PKTINFO; + } +#endif /* LWIP_NETBUF_RECVINFO */ + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + done_socket(&sockets[i - LWIP_SOCKET_OFFSET]); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +ssize_t +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +ssize_t +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/* Add select_cb to select_cb_list. */ +static void +lwip_link_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Protect the select_cb_list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb->next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = select_cb; + } + select_cb_list = select_cb; +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + + /* Now we can safely unprotect */ + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} + +/* Remove select_cb from select_cb_list. */ +static void +lwip_unlink_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Take us off the list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + if (select_cb->next != NULL) { + select_cb->next->prev = select_cb->prev; + } + if (select_cb_list == select_cb) { + LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL); + select_cb_list = select_cb->next; + } else { + LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL); + select_cb->prev->next = select_cb->next; + } +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + void *lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* no a valid open socket */ + return -1; + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all of the set sockets in one of the three fdsets passed to select as used. + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_selscan aborts select when non-open sockets are found. + */ +static void +lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets) +{ + SYS_ARCH_DECL_PROTECT(lev); + if (fdset) { + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is in the set, lock it (unless already done) */ + if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* leave the socket used until released by lwip_select_dec_sockets_used */ + FD_SET(i, used_sockets); + } + SYS_ARCH_UNPROTECT(lev); + } + } + } +} + +/* Mark all sockets passed to select as used to prevent them from being freed + * from other threads while select is running. + * Marked sockets are added to 'used_sockets' to mark them only once an be able + * to unmark them correctly. + */ +static void +lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets) +{ + FD_ZERO(used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets); +} + +/* Let go all sockets that were marked as used when starting select */ +static void +lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets) +{ + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is not in the set, continue */ + if (FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(i); + LWIP_ASSERT("socket gone at the end of select", sock != NULL); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets) +#define lwip_select_dec_sockets_used(maxfdp1, used_sockets) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif +#if LWIP_NETCONN_FULLDUPLEX + fd_set used_sockets; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1, + timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1)); + + if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) { + set_errno(EINVAL); + return -1; + } + + lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + if (nready < 0) { + /* one of the sockets in one of the fd_sets was invalid */ + set_errno(EBADF); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } else if (nready > 0) { + /* one or more sockets are set, no need to wait */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } else { + /* If we don't have any current events, then suspend if we are supposed to */ + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables (unless we're running in MPU compatible + mode). */ + API_SELECT_CB_VAR_DECLARE(select_cb); + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + API_SELECT_CB_VAR_REF(select_cb).readset = readset; + API_SELECT_CB_VAR_REF(select_cb).writeset = writeset; + API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + set_errno(EBUSY); + break; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + set_errno(EBADF); + break; + } + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000)); + if (msecs_long <= 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } else { + msectimeout = (u32_t)msecs_long; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* Not a valid socket */ + nready = -1; + set_errno(EBADF); + } + } + } + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* See what's set now after waiting */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } + } + } + + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} +#endif /* LWIP_SOCKET_SELECT */ + +#if LWIP_SOCKET_POLL +/** Options for the lwip_pollscan function. */ +enum lwip_pollscan_opts +{ + /** Clear revents in each struct pollfd. */ + LWIP_POLLSCAN_CLEAR = 1, + + /** Increment select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_INC_WAIT = 2, + + /** Decrement select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_DEC_WAIT = 4 +}; + +/** + * Update revents in each struct pollfd. + * Optionally update select_waiting in struct lwip_sock. + * + * @param fds array of structures to update + * @param nfds number of structures in fds + * @param opts what to update and how + * @return number of structures that have revents != 0 + */ +static int +lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts) +{ + int nready = 0; + nfds_t fdi; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + if ((opts & LWIP_POLLSCAN_CLEAR) != 0) { + fds[fdi].revents = 0; + } + + /* Negative fd means the caller wants us to ignore this struct. + POLLNVAL means we already detected that the fd is invalid; + if another thread has since opened a new socket with that fd, + we must not use that socket. */ + if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) { + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(fds[fdi].fd); + if (sock != NULL) { + void* lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + + if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + break; + } + } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) { + fds[fdi].revents |= POLLIN; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd)); + } + /* See if netconn of this socket is ready for write */ + if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) { + fds[fdi].revents |= POLLOUT; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd)); + } + /* See if netconn of this socket had an error */ + if (errevent != 0) { + /* POLLERR is output only. */ + fds[fdi].revents |= POLLERR; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd)); + } + } else { + /* Not a valid socket */ + SYS_ARCH_UNPROTECT(lev); + /* POLLNVAL is output only. */ + fds[fdi].revents |= POLLNVAL; + return -1; + } + } + + /* Will return the number of structures that have events, + not the number of events. */ + if (fds[fdi].revents != 0) { + nready++; + } + } + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all sockets as used. + * + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_pollscan aborts select when non-open sockets are found. + */ +static void +lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + /* Increase the reference counter */ + tryget_socket_unconn(fds[fdi].fd); + } + } +} + +/* Let go all sockets that were marked as used when starting poll */ +static void +lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_poll_inc_sockets_used(fds, nfds) +#define lwip_poll_dec_sockets_used(fds, nfds) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + u32_t waitres = 0; + int nready; + u32_t msectimeout; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n", + (void*)fds, (int)nfds, timeout)); + LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)), + set_errno(EINVAL); return -1;); + + lwip_poll_inc_sockets_used(fds, nfds); + + /* Go through each struct pollfd to count number of structures + which currently match */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR); + + if (nready < 0) { + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + API_SELECT_CB_VAR_DECLARE(select_cb); + + if (timeout == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n")); + goto return_success; + } + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds; + API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(EAGAIN); + lwip_poll_dec_sockets_used(fds, nfds); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in. + Also, check for events again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT); + + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout < 0) { + /* Wait forever */ + msectimeout = 0; + } else { + /* timeout == 0 would have been handled earlier. */ + LWIP_ASSERT("timeout > 0", timeout > 0); + msectimeout = timeout; + } + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + + /* Decrease select_waiting for each socket we are interested in, + and check which events occurred while we waited. */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT); + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n")); + goto return_success; + } + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready)); +return_success: + lwip_poll_dec_sockets_used(fds, nfds); + set_errno(0); + return nready; +} + +/** + * Check whether event_callback should wake up a thread waiting in + * lwip_poll. + */ +static int +lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent) +{ + nfds_t fdi; + for (fdi = 0; fdi < scb->poll_nfds; fdi++) { + const struct pollfd *pollfd = &scb->poll_fds[fdi]; + if (pollfd->fd == fd) { + /* Do not update pollfd->revents right here; + that would be a data race because lwip_pollscan + accesses revents without protecting. */ + if (has_recvevent && (pollfd->events & POLLIN) != 0) { + return 1; + } + if (has_sendevent && (pollfd->events & POLLOUT) != 0) { + return 1; + } + if (has_errevent) { + /* POLLERR is output only. */ + return 1; + } + } + } + return 0; +} +#endif /* LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + * + * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function + * must have the core lock held when signaling the following events + * as they might cause select_list_cb to be checked: + * NETCONN_EVT_RCVPLUS + * NETCONN_EVT_SENDPLUS + * NETCONN_EVT_ERROR + * This requirement will be asserted in select_check_waiters() + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s, check_waiters; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + /* conn->socket is -1 on initialization + lwip_accept adjusts sock->recvevent if conn->socket < -1 */ + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + check_waiters = 1; + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + if (sock->rcvevent > 1) { + check_waiters = 0; + } + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + check_waiters = 0; + break; + case NETCONN_EVT_SENDPLUS: + if (sock->sendevent) { + check_waiters = 0; + } + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + check_waiters = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting && check_waiters) { + /* Save which events are active */ + int has_recvevent, has_sendevent, has_errevent; + has_recvevent = sock->rcvevent > 0; + has_sendevent = sock->sendevent != 0; + has_errevent = sock->errevent != 0; + SYS_ARCH_UNPROTECT(lev); + /* Check any select calls waiting on this socket */ + select_check_waiters(s, has_recvevent, has_sendevent, has_errevent); + } else { + SYS_ARCH_UNPROTECT(lev); + } + done_socket(sock); +} + +/** + * Check if any select waiters are waiting on this socket and its events + * + * @note on synchronization of select_cb_list: + * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding + * the core lock. We do a single pass through the list and signal any waiters. + * Core lock should already be held when calling here!!!! + + * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration + * of the loop, thus creating a possibility where a thread could modify the + * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to + * detect this change and restart the list walk. The list is expected to be small + */ +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent) +{ + struct lwip_select_cb *scb; +#if !LWIP_TCPIP_CORE_LOCKING + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if !LWIP_TCPIP_CORE_LOCKING + SYS_ARCH_PROTECT(lev); +again: + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; +#if LWIP_SOCKET_POLL + if (scb->poll_fds != NULL) { + do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent); + } +#endif /* LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL + else +#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT + { + /* Test this select call for our socket */ + if (has_recvevent) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (has_sendevent) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (has_errevent) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + } +#endif /* LWIP_SOCKET_SELECT */ + if (do_signal) { + scb->sem_signalled = 1; + /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling + the semaphore, as this might lead to the select thread taking itself off the list, + invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } +#if LWIP_TCPIP_CORE_LOCKING + } +#else + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + } + SYS_ARCH_UNPROTECT(lev); +#endif +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + done_socket(sock); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + done_socket(sock); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static int +lwip_sockopt_to_ipopt(int optname) +{ + /* Map SO_* values to our internal SOF_* values + * We should not rely on #defines in socket.h + * being in sync with ip.h. + */ + switch (optname) { + case SO_BROADCAST: + return SOF_BROADCAST; + case SO_KEEPALIVE: + return SOF_KEEPALIVE; + case SO_REUSEADDR: + return SOF_REUSEADDR; + default: + LWIP_ASSERT("Unknown socket option", 0); + return 0; + } +} + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static int +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT + if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + done_socket(sock); + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int *)optval = 1; + } else { + *(int *)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int *)optval ? "on" : "off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int *)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int *)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int *)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int *)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int); + *(int *)optval = err_to_errno(netconn_err(sock->conn)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + s16_t conn_linger; + struct linger *linger = (struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t *)optval = 1; + } else { + *(u8_t *)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int *)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static int +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT + if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int *)optval ? "on" : "off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_sendtimeout(sock->conn, ms_long); + break; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_recvtimeout(sock->conn, (u32_t)ms_long); + break; + } +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int *)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + const struct linger *linger = (const struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + done_socket(sock); + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + case SO_BINDTODEVICE: { + const struct ifreq *iface; + struct netif *n = NULL; + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq); + + iface = (const struct ifreq *)optval; + if (iface->ifr_name[0] != 0) { + n = netif_find(iface->ifr_name); + if (n == NULL) { + done_socket(sock); + return ENODEV; + } + } + + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(sock->conn->pcb.tcp, n); + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(sock->conn->pcb.udp, n); + break; +#endif +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(sock->conn->pcb.raw, n); + break; +#endif + default: + LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0); + break; + } + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_NETBUF_RECVINFO + case IP_PKTINFO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); + if (*(const int *)optval) { + sock->conn->flags |= NETCONN_FLAG_PKTINFO; + } else { + sock->conn->flags &= ~NETCONN_FLAG_PKTINFO; + } + break; +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval)); + break; + case IP_MULTICAST_IF: { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int *)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; +#if LWIP_IPV6_MLD + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t mld6_err; + struct netif *netif; + ip6_addr_t multi_addr; + const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP); + inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr); + LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu); + netif = netif_get_by_index((u8_t)imr->ipv6mr_interface); + if (netif == NULL) { + err = EADDRNOTAVAIL; + break; + } + + if (optname == IPV6_JOIN_GROUP) { + if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + mld6_err = ERR_OK; + } else { + mld6_err = mld6_joingroup_netif(netif, &multi_addr); + } + } else { + mld6_err = mld6_leavegroup_netif(netif, &multi_addr); + lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr); + } + if (mld6_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IPV6_MLD */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + done_socket(sock); + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + done_socket(sock); + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct netbuf *nb; + if (sock->lastdata.netbuf) { + nb = sock->lastdata.netbuf; + *((int *)argp) = nb->p->tot_len; + } else { + struct netbuf *rxbuf; + err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK); + if (err != ERR_OK) { + *((int *)argp) = 0; + } else { + sock->lastdata.netbuf = rxbuf; + *((int *)argp) = rxbuf->p->tot_len; + } + } + done_socket(sock); + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata.netbuf) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + recv_avail += sock->lastdata.pbuf->tot_len; + } else { + recv_avail += sock->lastdata.netbuf->p->tot_len; + } + } + *((int *)argp) = recv_avail; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp))); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(int *)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + done_socket(sock); + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * The flag O_NONBLOCK and access modes are supported for F_GETFL, only + * the flag O_NONBLOCK is implemented for F_SETFL. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + int op_mode = 0; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); +#else + SYS_ARCH_DECL_PROTECT(lev); + /* the proper thing to do here would be to get into the tcpip_thread, + but locking should be OK as well since we only *read* some flags */ + SYS_ARCH_PROTECT(lev); +#endif +#if LWIP_TCP + if (sock->conn->pcb.tcp) { + if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) { + op_mode |= O_RDONLY; + } + if (!(sock->conn->pcb.tcp->flags & TF_FIN)) { + op_mode |= O_WRONLY; + } + } +#endif +#if LWIP_TCPIP_CORE_LOCKING + UNLOCK_TCPIP_CORE(); +#else + SYS_ARCH_UNPROTECT(lev); +#endif + } else { + op_mode |= O_RDWR; + } + + /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */ + ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode; + + break; + case F_SETFL: + /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */ + val &= ~(O_RDONLY | O_WRONLY | O_RDWR); + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + done_socket(sock); + return ret; +} + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +int +fcntl(int s, int cmd, ...) +{ + va_list ap; + int val; + + va_start(ap, cmd); + val = va_arg(ap, int); + va_end(ap); + return lwip_fcntl(s, cmd, val); +} +#endif + +const char * +lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + const char *ret = NULL; + int size_int = (int)size; + if (size_int < 0) { + set_errno(ENOSPC); + return NULL; + } + switch (af) { +#if LWIP_IPV4 + case AF_INET: + ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif +#if LWIP_IPV6 + case AF_INET6: + ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif + default: + set_errno(EAFNOSUPPORT); + break; + } + return ret; +} + +int +lwip_inet_pton(int af, const char *src, void *dst) +{ + int err; + switch (af) { +#if LWIP_IPV4 + case AF_INET: + err = ip4addr_aton(src, (ip4_addr_t *)dst); + break; +#endif +#if LWIP_IPV6 + case AF_INET6: { + /* convert into temporary variable since ip6_addr_t might be larger + than in6_addr when scopes are enabled */ + ip6_addr_t addr; + err = ip6addr_aton(src, &addr); + if (err) { + memcpy(dst, &addr.addr, sizeof(addr.addr)); + } + break; + } +#endif + default: + err = -1; + set_errno(EAFNOSUPPORT); + break; + } + return err; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/** Register a new MLD6 membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == NULL) { + socket_ipv6_multicast_memberships[i].sock = sock; + socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx; + ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered MLD6 membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv6_multicast_memberships[i].sock == sock) && + (socket_ipv6_multicast_memberships[i].if_idx == if_idx) && + ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_mld6_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr; + u8_t if_idx; + + ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr); + if_idx = socket_ipv6_multicast_memberships[i].if_idx; + + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + + netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_SOCKET */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 000000000..743553a58 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,658 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t tcpip_mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static void tcpip_thread_handle_msg(struct tcpip_msg *msg); + +#if !LWIP_TIMERS +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#else /* !LWIP_TIMERS */ +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg) +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +static void +tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime, res; + +again: + LWIP_ASSERT_CORE_LOCKED(); + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) { + UNLOCK_TCPIP_CORE(); + sys_arch_mbox_fetch(mbox, msg, 0); + LOCK_TCPIP_CORE(); + return; + } else if (sleeptime == 0) { + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } + + UNLOCK_TCPIP_CORE(); + res = sys_arch_mbox_fetch(mbox, msg, sleeptime); + LOCK_TCPIP_CORE(); + if (res == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} +#endif /* !LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + LWIP_MARK_TCPIP_THREAD(); + + LOCK_TCPIP_CORE(); + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + while (1) { /* MAIN Loop */ + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + tcpip_thread_handle_msg(msg); + } +} + +/* Handle a single tcpip_msg + * This is in its own function for access by tests only. + */ +static void +tcpip_thread_handle_msg(struct tcpip_msg *msg) +{ + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) { + pbuf_free(msg->msg.inp.p); + } + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } +} + +#ifdef TCPIP_THREAD_TEST +/** Work on queued items in single-threaded test mode */ +int +tcpip_thread_poll_one(void) +{ + int ret = 0; + struct tcpip_msg *msg; + + if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) { + LOCK_TCPIP_CORE(); + if (msg != NULL) { + tcpip_thread_handle_msg(msg); + ret = 1; + } + UNLOCK_TCPIP_CORE(); + } + return ret; +} +#endif + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Blocks until the request is posted. + * Must not be called from interrupt context! + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_try_callback + */ +err_t +tcpip_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Does NOT block when the request cannot be posted because the + * tcpip_mbox is full, but returns ERR_MEM instead. + * Can be called from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_callback + */ +err_t +tcpip_try_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * @ingroup lwip_os + * Allocate a structure for a static callback message and initialize it. + * The message has a special type such that lwIP never frees it. + * This is intended to be used to send "static" messages from interrupt context, + * e.g. the message is allocated once and posted several times from an IRQ + * using tcpip_callbackmsg_trycallback(). + * Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_callbackmsg_trycallback(). + * + * @see tcpip_callbackmsg_trycallback() + * @see tcpip_callbackmsg_delete() + */ +struct tcpip_callback_msg * +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg *)msg; +} + +/** + * @ingroup lwip_os + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + * + * @see tcpip_callbackmsg_new() + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread tcpip_mbox. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread mbox. + * Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(), + * mainly to help FreeRTOS, where calls differ between task level and ISR level. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost_fromisr() return code (without change, so this + * knowledge can be used to e.g. propagate "bool needs_scheduling") + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost_fromisr(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_try_callback(pbuf_free_int, p); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_try_callback(mem_free, m); +} + +#endif /* !NO_SYS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 000000000..269f4a490 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1463 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/apps/mqtt_priv.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + + + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +static void mqtt_cyclic_timer(void *arg); + +#if defined(LWIP_DEBUG) +static const char *const mqtt_message_type_str[15] = { + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + +/** Add single item to ring buffer */ +static void +mqtt_ringbuf_put(struct mqtt_ringbuf_t *rb, u8_t item) +{ + rb->buf[rb->put] = item; + rb->put++; + if (rb->put >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->put = 0; + } +} + +/** Return pointer to ring buffer get position */ +static u8_t * +mqtt_ringbuf_get_ptr(struct mqtt_ringbuf_t *rb) +{ + return &rb->buf[rb->get]; +} + +static void +mqtt_ringbuf_advance_get_idx(struct mqtt_ringbuf_t *rb, u16_t len) +{ + LWIP_ASSERT("mqtt_ringbuf_advance_get_idx: len < MQTT_OUTPUT_RINGBUF_SIZE", len < MQTT_OUTPUT_RINGBUF_SIZE); + + rb->get += len; + if (rb->get >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->get = rb->get - MQTT_OUTPUT_RINGBUF_SIZE; + } +} + +/** Return number of bytes in ring buffer */ +static u16_t +mqtt_ringbuf_len(struct mqtt_ringbuf_t *rb) +{ + u32_t len = rb->put - rb->get; + if (len > 0xFFFF) { + len += MQTT_OUTPUT_RINGBUF_SIZE; + } + return (u16_t)len; +} + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - (rb)->get)) + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct altcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = altcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, rb->get, rb->put)); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(altcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + altcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, size_t r_objs_len, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail = iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t *const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs, size_t r_objs_len) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param fdup MQTT DUP flag + * @param fqos MQTT QoS field + * @param fretain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t fdup, + u8_t fqos, u8_t fretain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((fdup & 1) << 3) | ((fqos & 3) << 1) | (fretain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + altcp_recv(client->conn, NULL); + altcp_err(client->conn, NULL); + altcp_sent(client->conn, NULL); + res = altcp_close(client->conn); + if (res != ERR_OK) { + altcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive / 2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t +mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + size_t var_hdr_payload_bufsize = sizeof(client->rx_buffer) - fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + LWIP_ASSERT("client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN", client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN); + LWIP_ASSERT("fixed_hdr_idx <= client->msg_idx", fixed_hdr_idx <= client->msg_idx); + LWIP_ERROR("buffer length mismatch", fixed_hdr_idx + length <= MQTT_VAR_HEADER_BUFFER_LEN, + return MQTT_CONNECT_DISCONNECTED); + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + if (length < 2) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short CONNACK message\n")); + goto out_disconnect; + } + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + u8_t *topic; + u16_t after_topic; + u8_t bkp; + u16_t topic_len; + u16_t qos_len = (qos ? 2U : 0U); + if (length < 2 + qos_len) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet\n")); + goto out_disconnect; + } + topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + if ((topic_len > length - (2 + qos_len)) || + (topic_len > var_hdr_payload_bufsize - (2 + qos_len))) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (topic)\n")); + goto out_disconnect; + } + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check buffer length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos ? 2U : 1U)) > var_hdr_payload_bufsize) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + if (length < after_topic + 2U) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (after_topic)\n")); + goto out_disconnect; + } + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %"U32_F"\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + if (length < (size_t)(payload_offset + payload_length)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short packet (payload)\n")); + goto out_disconnect; + } + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + /* We ALWAYS parse the header here first. Even if the header was not + included in this segment, we re-parse it here by buffering it in + client->rx_buffer. client->msg_idx keeps track of this. */ + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + /* parse header from old pbuf (buffered in client->rx_buffer) */ + b = client->rx_buffer[fixed_hdr_idx]; + } else { + /* parse header from this pbuf and save it in client->rx_buffer in case + it comes in segmented */ + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + /* fixed header contains at least 2 bytes but can contain more, depending on + 'remaining length'. All bytes but the last of this have 0x80 set to + indicate more bytes are coming. */ + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + /* fixed header is done */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: Remaining length after fixed header: %"U32_F"\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message (changes remaining length if this is + not the first segment of this message) */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + /* Fixed header has been parsed, parse variable header */ + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer + cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: msg_idx: %"U32_F", cpy_len: %"U16_F", remaining %"U32_F"\n", client->msg_idx, cpy_len, msg_rem_len)); + if ((msg_rem_len == 0) || (cpy_len == buffer_space)) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct altcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + altcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct altcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct altcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct altcp_pcb *tpcb, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + altcp_recv(tpcb, mqtt_tcp_recv_cb); + altcp_sent(tpcb, mqtt_tcp_sent_cb); + altcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + + if (qos > 0) { + total_len += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + return (mqtt_client_t *)mem_calloc(1, sizeof(mqtt_client_t)); +} + +/** + * @ingroup mqtt + * Free MQTT client instance + * @param client Pointer to instance to be freed + */ +void +mqtt_client_free(mqtt_client_t *client) +{ + mem_free(client); +} + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + u16_t client_user_len = 0, client_pass_len = 0; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list, LWIP_ARRAYSIZE(client->req_list)); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_user != NULL) { + flags |= MQTT_CONNECT_FLAG_USERNAME; + len = strlen(client_info->client_user); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length must be > 0", len > 0, return ERR_VAL); + client_user_len = (u16_t)len; + len = remaining_length + 2 + client_user_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_pass != NULL) { + flags |= MQTT_CONNECT_FLAG_PASSWORD; + len = strlen(client_info->client_pass); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length must be > 0", len > 0, return ERR_VAL); + client_pass_len = (u16_t)len; + len = remaining_length + 2 + client_pass_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + +#if LWIP_ALTCP && LWIP_ALTCP_TLS + if (client_info->tls_config) { + client->conn = altcp_tls_new(client_info->tls_config, IP_GET_TYPE(ip_addr)); + } else +#endif + { + client->conn = altcp_tcp_new_ip_type(IP_GET_TYPE(ip_addr)); + } + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + altcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = altcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = altcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + altcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + /* Append user name if given */ + if ((flags & MQTT_CONNECT_FLAG_USERNAME) != 0) { + mqtt_output_append_string(&client->output, client_info->client_user, client_user_len); + } + /* Append password if given */ + if ((flags & MQTT_CONNECT_FLAG_PASSWORD) != 0) { + mqtt_output_append_string(&client->output, client_info->client_pass, client_pass_len); + } + return ERR_OK; + +tcp_fail: + altcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c new file mode 100644 index 000000000..b7ff56adc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/apps/sntp/sntp.c @@ -0,0 +1,869 @@ +/** + * @file + * SNTP client module + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + */ + + +/** + * @defgroup sntp SNTP + * @ingroup apps + * + * This is simple "SNTP" client for the lwIP raw API. + * It is a minimal implementation of SNTPv4 as specified in RFC 4330. + * + * For a list of some public NTP servers, see this link: + * http://support.ntp.org/bin/view/Servers/NTPPoolServers + * + * @todo: + * - complete SNTP_CHECK_RESPONSE checks 3 and 4 + */ + +#include "lwip/apps/sntp.h" + +#include "lwip/opt.h" +#include "lwip/timeouts.h" +#include "lwip/udp.h" +#include "lwip/dns.h" +#include "lwip/ip_addr.h" +#include "lwip/pbuf.h" +#include "lwip/dhcp.h" + +#include +#include + +#if LWIP_UDP + +/* Handle support for more than one server via SNTP_MAX_SERVERS */ +#if SNTP_MAX_SERVERS > 1 +#define SNTP_SUPPORT_MULTIPLE_SERVERS 1 +#else /* NTP_MAX_SERVERS > 1 */ +#define SNTP_SUPPORT_MULTIPLE_SERVERS 0 +#endif /* NTP_MAX_SERVERS > 1 */ + +#ifndef SNTP_SUPPRESS_DELAY_CHECK +#if SNTP_UPDATE_DELAY < 15000 +#error "SNTPv4 RFC 4330 enforces a minimum update time of 15 seconds (define SNTP_SUPPRESS_DELAY_CHECK to disable this error)!" +#endif +#endif + +/* the various debug levels for this file */ +#define SNTP_DEBUG_TRACE (SNTP_DEBUG | LWIP_DBG_TRACE) +#define SNTP_DEBUG_STATE (SNTP_DEBUG | LWIP_DBG_STATE) +#define SNTP_DEBUG_WARN (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define SNTP_DEBUG_WARN_STATE (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define SNTP_DEBUG_SERIOUS (SNTP_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + +#define SNTP_ERR_KOD 1 + +/* SNTP protocol defines */ +#define SNTP_MSG_LEN 48 + +#define SNTP_OFFSET_LI_VN_MODE 0 +#define SNTP_LI_MASK 0xC0 +#define SNTP_LI_NO_WARNING (0x00 << 6) +#define SNTP_LI_LAST_MINUTE_61_SEC (0x01 << 6) +#define SNTP_LI_LAST_MINUTE_59_SEC (0x02 << 6) +#define SNTP_LI_ALARM_CONDITION (0x03 << 6) /* (clock not synchronized) */ + +#define SNTP_VERSION_MASK 0x38 +#define SNTP_VERSION (4/* NTP Version 4*/<<3) + +#define SNTP_MODE_MASK 0x07 +#define SNTP_MODE_CLIENT 0x03 +#define SNTP_MODE_SERVER 0x04 +#define SNTP_MODE_BROADCAST 0x05 + +#define SNTP_OFFSET_STRATUM 1 +#define SNTP_STRATUM_KOD 0x00 + +#define SNTP_OFFSET_ORIGINATE_TIME 24 +#define SNTP_OFFSET_RECEIVE_TIME 32 +#define SNTP_OFFSET_TRANSMIT_TIME 40 + +/* Number of seconds between 1970 and Feb 7, 2036 06:28:16 UTC (epoch 1) */ +#define DIFF_SEC_1970_2036 ((u32_t)2085978496L) + +/** Convert NTP timestamp fraction to microseconds. + */ +#ifndef SNTP_FRAC_TO_US +# if LWIP_HAVE_INT64 +# define SNTP_FRAC_TO_US(f) ((u32_t)(((u64_t)(f) * 1000000UL) >> 32)) +# else +# define SNTP_FRAC_TO_US(f) ((u32_t)(f) / 4295) +# endif +#endif /* !SNTP_FRAC_TO_US */ + +/* Configure behaviour depending on native, microsecond or second precision. + * Treat NTP timestamps as signed two's-complement integers. This way, + * timestamps that have the MSB set simply become negative offsets from + * the epoch (Feb 7, 2036 06:28:16 UTC). Representable dates range from + * 1968 to 2104. + */ +#ifndef SNTP_SET_SYSTEM_TIME_NTP +# ifdef SNTP_SET_SYSTEM_TIME_US +# define SNTP_SET_SYSTEM_TIME_NTP(s, f) \ + SNTP_SET_SYSTEM_TIME_US((u32_t)((s) + DIFF_SEC_1970_2036), SNTP_FRAC_TO_US(f)) +# else +# define SNTP_SET_SYSTEM_TIME_NTP(s, f) \ + SNTP_SET_SYSTEM_TIME((u32_t)((s) + DIFF_SEC_1970_2036)) +# endif +#endif /* !SNTP_SET_SYSTEM_TIME_NTP */ + +/* Get the system time either natively as NTP timestamp or convert from + * Unix time in seconds and microseconds. Take care to avoid overflow if the + * microsecond value is at the maximum of 999999. Also add 0.5 us fudge to + * avoid special values like 0, and to mask round-off errors that would + * otherwise break round-trip conversion identity. + */ +#ifndef SNTP_GET_SYSTEM_TIME_NTP +# define SNTP_GET_SYSTEM_TIME_NTP(s, f) do { \ + u32_t sec_, usec_; \ + SNTP_GET_SYSTEM_TIME(sec_, usec_); \ + (s) = (s32_t)(sec_ - DIFF_SEC_1970_2036); \ + (f) = usec_ * 4295 - ((usec_ * 2143) >> 16) + 2147; \ + } while (0) +#endif /* !SNTP_GET_SYSTEM_TIME_NTP */ + +/* Start offset of the timestamps to extract from the SNTP packet */ +#define SNTP_OFFSET_TIMESTAMPS \ + (SNTP_OFFSET_TRANSMIT_TIME + 8 - sizeof(struct sntp_timestamps)) + +/* Round-trip delay arithmetic helpers */ +#if SNTP_COMP_ROUNDTRIP +# if !LWIP_HAVE_INT64 +# error "SNTP round-trip delay compensation requires 64-bit arithmetic" +# endif +# define SNTP_SEC_FRAC_TO_S64(s, f) \ + ((s64_t)(((u64_t)(s) << 32) | (u32_t)(f))) +# define SNTP_TIMESTAMP_TO_S64(t) \ + SNTP_SEC_FRAC_TO_S64(lwip_ntohl((t).sec), lwip_ntohl((t).frac)) +#endif /* SNTP_COMP_ROUNDTRIP */ + +/** + * 64-bit NTP timestamp, in network byte order. + */ +struct sntp_time { + u32_t sec; + u32_t frac; +}; + +/** + * Timestamps to be extracted from the NTP header. + */ +struct sntp_timestamps { +#if SNTP_COMP_ROUNDTRIP || SNTP_CHECK_RESPONSE >= 2 + struct sntp_time orig; + struct sntp_time recv; +#endif + struct sntp_time xmit; +}; + +/** + * SNTP packet format (without optional fields) + * Timestamps are coded as 64 bits: + * - signed 32 bits seconds since Feb 07, 2036, 06:28:16 UTC (epoch 1) + * - unsigned 32 bits seconds fraction (2^32 = 1 second) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct sntp_msg { + PACK_STRUCT_FLD_8(u8_t li_vn_mode); + PACK_STRUCT_FLD_8(u8_t stratum); + PACK_STRUCT_FLD_8(u8_t poll); + PACK_STRUCT_FLD_8(u8_t precision); + PACK_STRUCT_FIELD(u32_t root_delay); + PACK_STRUCT_FIELD(u32_t root_dispersion); + PACK_STRUCT_FIELD(u32_t reference_identifier); + PACK_STRUCT_FIELD(u32_t reference_timestamp[2]); + PACK_STRUCT_FIELD(u32_t originate_timestamp[2]); + PACK_STRUCT_FIELD(u32_t receive_timestamp[2]); + PACK_STRUCT_FIELD(u32_t transmit_timestamp[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* function prototypes */ +static void sntp_request(void *arg); + +/** The operating mode */ +static u8_t sntp_opmode; + +/** The UDP pcb used by the SNTP client */ +static struct udp_pcb *sntp_pcb; +/** Names/Addresses of servers */ +struct sntp_server { +#if SNTP_SERVER_DNS + const char *name; +#endif /* SNTP_SERVER_DNS */ + ip_addr_t addr; +#if SNTP_MONITOR_SERVER_REACHABILITY + /** Reachability shift register as described in RFC 5905 */ + u8_t reachability; +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ +}; +static struct sntp_server sntp_servers[SNTP_MAX_SERVERS]; + +#if SNTP_GET_SERVERS_FROM_DHCP +static u8_t sntp_set_servers_from_dhcp; +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ +#if SNTP_SUPPORT_MULTIPLE_SERVERS +/** The currently used server (initialized to 0) */ +static u8_t sntp_current_server; +#else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ +#define sntp_current_server 0 +#endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ + +#if SNTP_RETRY_TIMEOUT_EXP +#define SNTP_RESET_RETRY_TIMEOUT() sntp_retry_timeout = SNTP_RETRY_TIMEOUT +/** Retry time, initialized with SNTP_RETRY_TIMEOUT and doubled with each retry. */ +static u32_t sntp_retry_timeout; +#else /* SNTP_RETRY_TIMEOUT_EXP */ +#define SNTP_RESET_RETRY_TIMEOUT() +#define sntp_retry_timeout SNTP_RETRY_TIMEOUT +#endif /* SNTP_RETRY_TIMEOUT_EXP */ + +#if SNTP_CHECK_RESPONSE >= 1 +/** Saves the last server address to compare with response */ +static ip_addr_t sntp_last_server_address; +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + +#if SNTP_CHECK_RESPONSE >= 2 +/** Saves the last timestamp sent (which is sent back by the server) + * to compare against in response. Stored in network byte order. */ +static struct sntp_time sntp_last_timestamp_sent; +#endif /* SNTP_CHECK_RESPONSE >= 2 */ + +#if defined(LWIP_DEBUG) && !defined(sntp_format_time) +/* Debug print helper. */ +static const char * +sntp_format_time(s32_t sec) +{ + time_t ut; + ut = (u32_t)((u32_t)sec + DIFF_SEC_1970_2036); + return ctime(&ut); +} +#endif /* LWIP_DEBUG && !sntp_format_time */ + +/** + * SNTP processing of received timestamp + */ +static void +sntp_process(const struct sntp_timestamps *timestamps) +{ + s32_t sec; + u32_t frac; + + sec = (s32_t)lwip_ntohl(timestamps->xmit.sec); + frac = lwip_ntohl(timestamps->xmit.frac); + +#if SNTP_COMP_ROUNDTRIP +# if SNTP_CHECK_RESPONSE >= 2 + if (timestamps->recv.sec != 0 || timestamps->recv.frac != 0) +# endif + { + s32_t dest_sec; + u32_t dest_frac; + u32_t step_sec; + + /* Get the destination time stamp, i.e. the current system time */ + SNTP_GET_SYSTEM_TIME_NTP(dest_sec, dest_frac); + + step_sec = (dest_sec < sec) ? ((u32_t)sec - (u32_t)dest_sec) + : ((u32_t)dest_sec - (u32_t)sec); + /* In order to avoid overflows, skip the compensation if the clock step + * is larger than about 34 years. */ + if ((step_sec >> 30) == 0) { + s64_t t1, t2, t3, t4; + + t4 = SNTP_SEC_FRAC_TO_S64(dest_sec, dest_frac); + t3 = SNTP_SEC_FRAC_TO_S64(sec, frac); + t1 = SNTP_TIMESTAMP_TO_S64(timestamps->orig); + t2 = SNTP_TIMESTAMP_TO_S64(timestamps->recv); + /* Clock offset calculation according to RFC 4330 */ + t4 += ((t2 - t1) + (t3 - t4)) / 2; + + sec = (s32_t)((u64_t)t4 >> 32); + frac = (u32_t)((u64_t)t4); + } + } +#endif /* SNTP_COMP_ROUNDTRIP */ + + SNTP_SET_SYSTEM_TIME_NTP(sec, frac); + LWIP_UNUSED_ARG(frac); /* might be unused if only seconds are set */ + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_process: %s, %" U32_F " us\n", + sntp_format_time(sec), SNTP_FRAC_TO_US(frac))); +} + +/** + * Initialize request struct to be sent to server. + */ +static void +sntp_initialize_request(struct sntp_msg *req) +{ + memset(req, 0, SNTP_MSG_LEN); + req->li_vn_mode = SNTP_LI_NO_WARNING | SNTP_VERSION | SNTP_MODE_CLIENT; + +#if SNTP_CHECK_RESPONSE >= 2 || SNTP_COMP_ROUNDTRIP + { + s32_t secs; + u32_t sec, frac; + /* Get the transmit timestamp */ + SNTP_GET_SYSTEM_TIME_NTP(secs, frac); + sec = lwip_htonl((u32_t)secs); + frac = lwip_htonl(frac); + +# if SNTP_CHECK_RESPONSE >= 2 + sntp_last_timestamp_sent.sec = sec; + sntp_last_timestamp_sent.frac = frac; +# endif + req->transmit_timestamp[0] = sec; + req->transmit_timestamp[1] = frac; + } +#endif /* SNTP_CHECK_RESPONSE >= 2 || SNTP_COMP_ROUNDTRIP */ +} + +/** + * Retry: send a new request (and increase retry timeout). + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_retry(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_retry: Next request will be sent in %"U32_F" ms\n", + sntp_retry_timeout)); + + /* set up a timer to send a retry and increase the retry delay */ + sys_timeout(sntp_retry_timeout, sntp_request, NULL); + +#if SNTP_RETRY_TIMEOUT_EXP + { + u32_t new_retry_timeout; + /* increase the timeout for next retry */ + new_retry_timeout = sntp_retry_timeout << 1; + /* limit to maximum timeout and prevent overflow */ + if ((new_retry_timeout <= SNTP_RETRY_TIMEOUT_MAX) && + (new_retry_timeout > sntp_retry_timeout)) { + sntp_retry_timeout = new_retry_timeout; + } + } +#endif /* SNTP_RETRY_TIMEOUT_EXP */ +} + +#if SNTP_SUPPORT_MULTIPLE_SERVERS +/** + * If Kiss-of-Death is received (or another packet parsing error), + * try the next server or retry the current server and increase the retry + * timeout if only one server is available. + * (implicitly, SNTP_MAX_SERVERS > 1) + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_try_next_server(void *arg) +{ + u8_t old_server, i; + LWIP_UNUSED_ARG(arg); + + old_server = sntp_current_server; + for (i = 0; i < SNTP_MAX_SERVERS - 1; i++) { + sntp_current_server++; + if (sntp_current_server >= SNTP_MAX_SERVERS) { + sntp_current_server = 0; + } + if (!ip_addr_isany(&sntp_servers[sntp_current_server].addr) +#if SNTP_SERVER_DNS + || (sntp_servers[sntp_current_server].name != NULL) +#endif + ) { + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_try_next_server: Sending request to server %"U16_F"\n", + (u16_t)sntp_current_server)); + /* new server: reset retry timeout */ + SNTP_RESET_RETRY_TIMEOUT(); + /* instantly send a request to the next server */ + sntp_request(NULL); + return; + } + } + /* no other valid server found */ + sntp_current_server = old_server; + sntp_retry(NULL); +} +#else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ +/* Always retry on error if only one server is supported */ +#define sntp_try_next_server sntp_retry +#endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ + +/** UDP recv callback for the sntp pcb */ +static void +sntp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct sntp_timestamps timestamps; + u8_t mode; + u8_t stratum; + err_t err; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + + err = ERR_ARG; +#if SNTP_CHECK_RESPONSE >= 1 + /* check server address and port */ + if (((sntp_opmode != SNTP_OPMODE_POLL) || ip_addr_cmp(addr, &sntp_last_server_address)) && + (port == SNTP_PORT)) +#else /* SNTP_CHECK_RESPONSE >= 1 */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + { + /* process the response */ + if (p->tot_len == SNTP_MSG_LEN) { + mode = pbuf_get_at(p, SNTP_OFFSET_LI_VN_MODE) & SNTP_MODE_MASK; + /* if this is a SNTP response... */ + if (((sntp_opmode == SNTP_OPMODE_POLL) && (mode == SNTP_MODE_SERVER)) || + ((sntp_opmode == SNTP_OPMODE_LISTENONLY) && (mode == SNTP_MODE_BROADCAST))) { + stratum = pbuf_get_at(p, SNTP_OFFSET_STRATUM); + + if (stratum == SNTP_STRATUM_KOD) { + /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ + err = SNTP_ERR_KOD; + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Received Kiss-of-Death\n")); + } else { + pbuf_copy_partial(p, ×tamps, sizeof(timestamps), SNTP_OFFSET_TIMESTAMPS); +#if SNTP_CHECK_RESPONSE >= 2 + /* check originate_timetamp against sntp_last_timestamp_sent */ + if (timestamps.orig.sec != sntp_last_timestamp_sent.sec || + timestamps.orig.frac != sntp_last_timestamp_sent.frac) { + LWIP_DEBUGF(SNTP_DEBUG_WARN, + ("sntp_recv: Invalid originate timestamp in response\n")); + } else +#endif /* SNTP_CHECK_RESPONSE >= 2 */ + /* @todo: add code for SNTP_CHECK_RESPONSE >= 3 and >= 4 here */ + { + /* correct answer */ + err = ERR_OK; + } + } + } else { + LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid mode in response: %"U16_F"\n", (u16_t)mode)); + /* wait for correct response */ + err = ERR_TIMEOUT; + } + } else { + LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid packet length: %"U16_F"\n", p->tot_len)); + } + } +#if SNTP_CHECK_RESPONSE >= 1 + else { + /* packet from wrong remote address or port, wait for correct response */ + err = ERR_TIMEOUT; + } +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + + pbuf_free(p); + + if (err == ERR_OK) { + /* correct packet received: process it it */ + sntp_process(×tamps); + +#if SNTP_MONITOR_SERVER_REACHABILITY + /* indicate that server responded */ + sntp_servers[sntp_current_server].reachability |= 1; +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + /* Set up timeout for next request (only if poll response was received)*/ + if (sntp_opmode == SNTP_OPMODE_POLL) { + u32_t sntp_update_delay; + sys_untimeout(sntp_try_next_server, NULL); + sys_untimeout(sntp_request, NULL); + + /* Correct response, reset retry timeout */ + SNTP_RESET_RETRY_TIMEOUT(); + + sntp_update_delay = (u32_t)SNTP_UPDATE_DELAY; + sys_timeout(sntp_update_delay, sntp_request, NULL); + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Scheduled next time request: %"U32_F" ms\n", + sntp_update_delay)); + } + } else if (err == SNTP_ERR_KOD) { + /* KOD errors are only processed in case of an explicit poll response */ + if (sntp_opmode == SNTP_OPMODE_POLL) { + /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ + sntp_try_next_server(NULL); + } + } else { + /* ignore any broken packet, poll mode: retry after timeout to avoid flooding */ + } +} + +/** Actually send an sntp request to a server. + * + * @param server_addr resolved IP address of the SNTP server + */ +static void +sntp_send_request(const ip_addr_t *server_addr) +{ + struct pbuf *p; + + LWIP_ASSERT("server_addr != NULL", server_addr != NULL); + + p = pbuf_alloc(PBUF_TRANSPORT, SNTP_MSG_LEN, PBUF_RAM); + if (p != NULL) { + struct sntp_msg *sntpmsg = (struct sntp_msg *)p->payload; + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_send_request: Sending request to server\n")); + /* initialize request message */ + sntp_initialize_request(sntpmsg); + /* send request */ + udp_sendto(sntp_pcb, p, server_addr, SNTP_PORT); + /* free the pbuf after sending it */ + pbuf_free(p); +#if SNTP_MONITOR_SERVER_REACHABILITY + /* indicate new packet has been sent */ + sntp_servers[sntp_current_server].reachability <<= 1; +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + /* set up receive timeout: try next server or retry on timeout */ + sys_timeout((u32_t)SNTP_RECV_TIMEOUT, sntp_try_next_server, NULL); +#if SNTP_CHECK_RESPONSE >= 1 + /* save server address to verify it in sntp_recv */ + ip_addr_copy(sntp_last_server_address, *server_addr); +#endif /* SNTP_CHECK_RESPONSE >= 1 */ + } else { + LWIP_DEBUGF(SNTP_DEBUG_SERIOUS, ("sntp_send_request: Out of memory, trying again in %"U32_F" ms\n", + (u32_t)SNTP_RETRY_TIMEOUT)); + /* out of memory: set up a timer to send a retry */ + sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_request, NULL); + } +} + +#if SNTP_SERVER_DNS +/** + * DNS found callback when using DNS names as server address. + */ +static void +sntp_dns_found(const char *hostname, const ip_addr_t *ipaddr, void *arg) +{ + LWIP_UNUSED_ARG(hostname); + LWIP_UNUSED_ARG(arg); + + if (ipaddr != NULL) { + /* Address resolved, send request */ + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_dns_found: Server address resolved, sending request\n")); + sntp_servers[sntp_current_server].addr = *ipaddr; + sntp_send_request(ipaddr); + } else { + /* DNS resolving failed -> try another server */ + LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_dns_found: Failed to resolve server address resolved, trying next server\n")); + sntp_try_next_server(NULL); + } +} +#endif /* SNTP_SERVER_DNS */ + +/** + * Send out an sntp request. + * + * @param arg is unused (only necessary to conform to sys_timeout) + */ +static void +sntp_request(void *arg) +{ + ip_addr_t sntp_server_address; + err_t err; + + LWIP_UNUSED_ARG(arg); + + /* initialize SNTP server address */ +#if SNTP_SERVER_DNS + if (sntp_servers[sntp_current_server].name) { + /* always resolve the name and rely on dns-internal caching & timeout */ + ip_addr_set_zero(&sntp_servers[sntp_current_server].addr); + err = dns_gethostbyname(sntp_servers[sntp_current_server].name, &sntp_server_address, + sntp_dns_found, NULL); + if (err == ERR_INPROGRESS) { + /* DNS request sent, wait for sntp_dns_found being called */ + LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_request: Waiting for server address to be resolved.\n")); + return; + } else if (err == ERR_OK) { + sntp_servers[sntp_current_server].addr = sntp_server_address; + } + } else +#endif /* SNTP_SERVER_DNS */ + { + sntp_server_address = sntp_servers[sntp_current_server].addr; + err = (ip_addr_isany_val(sntp_server_address)) ? ERR_ARG : ERR_OK; + } + + if (err == ERR_OK) { + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_request: current server address is %s\n", + ipaddr_ntoa(&sntp_server_address))); + sntp_send_request(&sntp_server_address); + } else { + /* address conversion failed, try another server */ + LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_request: Invalid server address, trying next server.\n")); + sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_try_next_server, NULL); + } +} + +/** + * @ingroup sntp + * Initialize this module. + * Send out request instantly or after SNTP_STARTUP_DELAY(_FUNC). + */ +void +sntp_init(void) +{ + /* LWIP_ASSERT_CORE_LOCKED(); is checked by udp_new() */ + +#ifdef SNTP_SERVER_ADDRESS +#if SNTP_SERVER_DNS + sntp_setservername(0, SNTP_SERVER_ADDRESS); +#else +#error SNTP_SERVER_ADDRESS string not supported SNTP_SERVER_DNS==0 +#endif +#endif /* SNTP_SERVER_ADDRESS */ + + if (sntp_pcb == NULL) { + sntp_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("Failed to allocate udp pcb for sntp client", sntp_pcb != NULL); + if (sntp_pcb != NULL) { + udp_recv(sntp_pcb, sntp_recv, NULL); + + if (sntp_opmode == SNTP_OPMODE_POLL) { + SNTP_RESET_RETRY_TIMEOUT(); +#if SNTP_STARTUP_DELAY + sys_timeout((u32_t)SNTP_STARTUP_DELAY_FUNC, sntp_request, NULL); +#else + sntp_request(NULL); +#endif + } else if (sntp_opmode == SNTP_OPMODE_LISTENONLY) { + ip_set_option(sntp_pcb, SOF_BROADCAST); + udp_bind(sntp_pcb, IP_ANY_TYPE, SNTP_PORT); + } + } + } +} + +/** + * @ingroup sntp + * Stop this module. + */ +void +sntp_stop(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (sntp_pcb != NULL) { +#if SNTP_MONITOR_SERVER_REACHABILITY + u8_t i; + for (i = 0; i < SNTP_MAX_SERVERS; i++) { + sntp_servers[i].reachability = 0; + } +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + sys_untimeout(sntp_request, NULL); + sys_untimeout(sntp_try_next_server, NULL); + udp_remove(sntp_pcb); + sntp_pcb = NULL; + } +} + +/** + * @ingroup sntp + * Get enabled state. + */ +u8_t sntp_enabled(void) +{ + return (sntp_pcb != NULL) ? 1 : 0; +} + +/** + * @ingroup sntp + * Sets the operating mode. + * @param operating_mode one of the available operating modes + */ +void +sntp_setoperatingmode(u8_t operating_mode) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("Invalid operating mode", operating_mode <= SNTP_OPMODE_LISTENONLY); + LWIP_ASSERT("Operating mode must not be set while SNTP client is running", sntp_pcb == NULL); + sntp_opmode = operating_mode; +} + +/** + * @ingroup sntp + * Gets the operating mode. + */ +u8_t +sntp_getoperatingmode(void) +{ + return sntp_opmode; +} + +#if SNTP_MONITOR_SERVER_REACHABILITY +/** + * @ingroup sntp + * Gets the server reachability shift register as described in RFC 5905. + * + * @param idx the index of the NTP server + */ +u8_t +sntp_getreachability(u8_t idx) +{ + if (idx < SNTP_MAX_SERVERS) { + return sntp_servers[idx].reachability; + } + return 0; +} +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_GET_SERVERS_FROM_DHCP +/** + * Config SNTP server handling by IP address, name, or DHCP; clear table + * @param set_servers_from_dhcp enable or disable getting server addresses from dhcp + */ +void +sntp_servermode_dhcp(int set_servers_from_dhcp) +{ + u8_t new_mode = set_servers_from_dhcp ? 1 : 0; + LWIP_ASSERT_CORE_LOCKED(); + if (sntp_set_servers_from_dhcp != new_mode) { + sntp_set_servers_from_dhcp = new_mode; + } +} +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +/** + * @ingroup sntp + * Initialize one of the NTP servers by IP address + * + * @param idx the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param server IP address of the NTP server to set + */ +void +sntp_setserver(u8_t idx, const ip_addr_t *server) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (idx < SNTP_MAX_SERVERS) { + if (server != NULL) { + sntp_servers[idx].addr = (*server); + } else { + ip_addr_set_zero(&sntp_servers[idx].addr); + } +#if SNTP_SERVER_DNS + sntp_servers[idx].name = NULL; +#endif + } +} + +#if LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP +/** + * Initialize one of the NTP servers by IP address, required by DHCP + * + * @param num the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param server IP address of the NTP server to set + */ +void +dhcp_set_ntp_servers(u8_t num, const ip4_addr_t *server) +{ + LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp: %s %u.%u.%u.%u as NTP server #%u via DHCP\n", + (sntp_set_servers_from_dhcp ? "Got" : "Rejected"), + ip4_addr1(server), ip4_addr2(server), ip4_addr3(server), ip4_addr4(server), num)); + if (sntp_set_servers_from_dhcp && num) { + u8_t i; + for (i = 0; (i < num) && (i < SNTP_MAX_SERVERS); i++) { + ip_addr_t addr; + ip_addr_copy_from_ip4(addr, server[i]); + sntp_setserver(i, &addr); + } + for (i = num; i < SNTP_MAX_SERVERS; i++) { + sntp_setserver(i, NULL); + } + } +} +#endif /* LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP */ + +/** + * @ingroup sntp + * Obtain one of the currently configured by IP address (or DHCP) NTP servers + * + * @param idx the index of the NTP server + * @return IP address of the indexed NTP server or "ip_addr_any" if the NTP + * server has not been configured by address (or at all). + */ +const ip_addr_t * +sntp_getserver(u8_t idx) +{ + if (idx < SNTP_MAX_SERVERS) { + return &sntp_servers[idx].addr; + } + return IP_ADDR_ANY; +} + +#if SNTP_SERVER_DNS +/** + * Initialize one of the NTP servers by name + * + * @param idx the index of the NTP server to set must be < SNTP_MAX_SERVERS + * @param server DNS name of the NTP server to set, to be resolved at contact time + */ +void +sntp_setservername(u8_t idx, const char *server) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (idx < SNTP_MAX_SERVERS) { + sntp_servers[idx].name = server; + } +} + +/** + * Obtain one of the currently configured by name NTP servers. + * + * @param idx the index of the NTP server + * @return IP address of the indexed NTP server or NULL if the NTP + * server has not been configured by name (or at all) + */ +const char * +sntp_getservername(u8_t idx) +{ + if (idx < SNTP_MAX_SERVERS) { + return sntp_servers[idx].name; + } + return NULL; +} +#endif /* SNTP_SERVER_DNS */ + +#endif /* LWIP_UDP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp.c new file mode 100644 index 000000000..d46d6cdb1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp.c @@ -0,0 +1,681 @@ +/** + * @file + * @defgroup altcp Application layered TCP Functions + * @ingroup altcp_api + * + * This file contains the common functions for altcp to work. + * For more details see @ref altcp_api. + */ + +/** + * @defgroup altcp_api Application layered TCP Introduction + * @ingroup callbackstyle_api + * + * Overview + * -------- + * altcp (application layered TCP connection API; to be used from TCPIP thread) + * is an abstraction layer that prevents applications linking hard against the + * @ref tcp.h functions while providing the same functionality. It is used to + * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application + * written for the tcp callback API without that application knowing the + * protocol details. + * + * * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h", + * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions + * with "altcp_" instead of "tcp_". + * + * With altcp support disabled (LWIP_ALTCP==0), applications written against the + * altcp API can still be compiled but are directly linked against the tcp.h + * callback API and then cannot use layered protocols. To minimize code changes + * in this case, the use of altcp_allocators is strongly suggested. + * + * Usage + * ----- + * To make use of this API from an existing tcp raw API application: + * * Include "lwip/altcp.h" instead of "lwip/tcp.h" + * * Replace "struct tcp_pcb" with "struct altcp_pcb" + * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link + * against the altcp functions + * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take + * an @ref altcp_allocator_t as an argument, whereas the original tcp API + * functions take no arguments. + * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an + * allocator object and a corresponding state (e.g. for TLS, the corresponding + * state may hold certificates or keys). This way, the application does not + * even need to know if it uses TLS or pure TCP, this is handled at runtime + * by passing a specific allocator. + * * An application can alternatively bind hard to the altcp_tls API by calling + * @ref altcp_tls_new or @ref altcp_tls_wrap. + * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is + * provided. + * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see + * @ref altcp_proxyconnect.h) + * + * altcp_allocator_t + * ----------------- + * An altcp allocator is created by the application by combining an allocator + * callback function and a corresponding state, e.g.:\code{.c} + * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)}; + * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert)); + * altcp_allocator_t tls_allocator = { + * altcp_tls_alloc, conf + * }; + * \endcode + * + * + * struct altcp_tls_config + * ----------------------- + * The struct altcp_tls_config holds state that is needed to create new TLS client + * or server connections (e.g. certificates and private keys). + * + * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS + * adaption). However, the parameters used to create it are defined in @ref + * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers + * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth + * for clients). + * + * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and + * private keys can be parsed by 'mbedtls_pk_parse_key()'. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/altcp_tcp.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +extern const struct altcp_functions altcp_tcp_functions; + +/** + * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool + * and zero the memory + */ +struct altcp_pcb * +altcp_alloc(void) +{ + struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB); + if (ret != NULL) { + memset(ret, 0, sizeof(struct altcp_pcb)); + } + return ret; +} + +/** + * For altcp layer implementations only: return a struct altcp_pcb to the pool + */ +void +altcp_free(struct altcp_pcb *conn) +{ + if (conn) { + if (conn->fns && conn->fns->dealloc) { + conn->fns->dealloc(conn); + } + memp_free(MEMP_ALTCP_PCB, conn); + } +} + +/** + * @ingroup altcp + * altcp_new_ip6: @ref altcp_new for IPv6 + */ +struct altcp_pcb * +altcp_new_ip6(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V6); +} + +/** + * @ingroup altcp + * altcp_new: @ref altcp_new for IPv4 + */ +struct altcp_pcb * +altcp_new(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V4); +} + +/** + * @ingroup altcp + * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an + * allocator function. + * + * @param allocator allocator function and argument + * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type) + * @return a new altcp_pcb or NULL on error + */ +struct altcp_pcb * +altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type) +{ + struct altcp_pcb *conn; + if (allocator == NULL) { + /* no allocator given, create a simple TCP connection */ + return altcp_tcp_new_ip_type(ip_type); + } + if (allocator->alloc == NULL) { + /* illegal allocator */ + return NULL; + } + conn = allocator->alloc(allocator->arg, ip_type); + if (conn == NULL) { + /* allocation failed */ + return NULL; + } + return conn; +} + +/** + * @ingroup altcp + * @see tcp_arg() + */ +void +altcp_arg(struct altcp_pcb *conn, void *arg) +{ + if (conn) { + conn->arg = arg; + } +} + +/** + * @ingroup altcp + * @see tcp_accept() + */ +void +altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept) +{ + if (conn != NULL) { + conn->accept = accept; + } +} + +/** + * @ingroup altcp + * @see tcp_recv() + */ +void +altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv) +{ + if (conn) { + conn->recv = recv; + } +} + +/** + * @ingroup altcp + * @see tcp_sent() + */ +void +altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent) +{ + if (conn) { + conn->sent = sent; + } +} + +/** + * @ingroup altcp + * @see tcp_poll() + */ +void +altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval) +{ + if (conn) { + conn->poll = poll; + conn->pollinterval = interval; + if (conn->fns && conn->fns->set_poll) { + conn->fns->set_poll(conn, interval); + } + } +} + +/** + * @ingroup altcp + * @see tcp_err() + */ +void +altcp_err(struct altcp_pcb *conn, altcp_err_fn err) +{ + if (conn) { + conn->err = err; + } +} + +/* Generic functions calling the "virtual" ones */ + +/** + * @ingroup altcp + * @see tcp_recved() + */ +void +altcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->fns && conn->fns->recved) { + conn->fns->recved(conn, len); + } +} + +/** + * @ingroup altcp + * @see tcp_bind() + */ +err_t +altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->fns && conn->fns->bind) { + return conn->fns->bind(conn, ipaddr, port); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_connect() + */ +err_t +altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn && conn->fns && conn->fns->connect) { + return conn->fns->connect(conn, ipaddr, port, connected); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_listen_with_backlog_and_err() + */ +struct altcp_pcb * +altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + if (conn && conn->fns && conn->fns->listen) { + return conn->fns->listen(conn, backlog, err); + } + return NULL; +} + +/** + * @ingroup altcp + * @see tcp_abort() + */ +void +altcp_abort(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->abort) { + conn->fns->abort(conn); + } +} + +/** + * @ingroup altcp + * @see tcp_close() + */ +err_t +altcp_close(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->close) { + return conn->fns->close(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_shutdown() + */ +err_t +altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn && conn->fns && conn->fns->shutdown) { + return conn->fns->shutdown(conn, shut_rx, shut_tx); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_write() + */ +err_t +altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->fns && conn->fns->write) { + return conn->fns->write(conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_output() + */ +err_t +altcp_output(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->output) { + return conn->fns->output(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_mss() + */ +u16_t +altcp_mss(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->mss) { + return conn->fns->mss(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndbuf() + */ +u16_t +altcp_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndbuf) { + return conn->fns->sndbuf(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndqueuelen() + */ +u16_t +altcp_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndqueuelen) { + return conn->fns->sndqueuelen(conn); + } + return 0; +} + +void +altcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disable) { + conn->fns->nagle_disable(conn); + } +} + +void +altcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_enable) { + conn->fns->nagle_enable(conn); + } +} + +int +altcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disabled) { + return conn->fns->nagle_disabled(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_setprio() + */ +void +altcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->fns && conn->fns->setprio) { + conn->fns->setprio(conn, prio); + } +} + +err_t +altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->fns && conn->fns->addrinfo) { + return conn->fns->addrinfo(conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getip) { + return conn->fns->getip(conn, local); + } + return NULL; +} + +u16_t +altcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getport) { + return conn->fns->getport(conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->dbg_get_tcp_state) { + return conn->fns->dbg_get_tcp_state(conn); + } + return CLOSED; +} +#endif + +/* Default implementations for the "virtual" functions */ + +void +altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn && conn->inner_conn) { + altcp_poll(conn->inner_conn, conn->poll, interval); + } +} + +void +altcp_default_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->inner_conn) { + altcp_recved(conn->inner_conn, len); + } +} + +err_t +altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->inner_conn) { + return altcp_bind(conn->inner_conn, ipaddr, port); + } + return ERR_VAL; +} + +err_t +altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn) { + if (shut_rx && shut_tx && conn->fns && conn->fns->close) { + /* default shutdown for both sides is close */ + return conn->fns->close(conn); + } + if (conn->inner_conn) { + return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx); + } + } + return ERR_VAL; +} + +err_t +altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->inner_conn) { + return altcp_write(conn->inner_conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +err_t +altcp_default_output(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_output(conn->inner_conn); + } + return ERR_VAL; +} + +u16_t +altcp_default_mss(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_mss(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndbuf(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndqueuelen(conn->inner_conn); + } + return 0; +} + +void +altcp_default_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_disable(conn->inner_conn); + } +} + +void +altcp_default_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_enable(conn->inner_conn); + } +} + +int +altcp_default_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_nagle_disabled(conn->inner_conn); + } + return 0; +} + +void +altcp_default_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->inner_conn) { + altcp_setprio(conn->inner_conn, prio); + } +} + +void +altcp_default_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + /* nothing to do */ +} + +err_t +altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->inner_conn) { + return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_default_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_ip(conn->inner_conn, local); + } + return NULL; +} + +u16_t +altcp_default_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_port(conn->inner_conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_dbg_get_tcp_state(conn->inner_conn); + } + return CLOSED; +} +#endif + + +#endif /* LWIP_ALTCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c new file mode 100644 index 000000000..cd619bc1b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c @@ -0,0 +1,87 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains allocation implementation that combine several layers. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/mem.h" + +#include + +#if LWIP_ALTCP_TLS + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type) +{ + struct altcp_pcb *inner_conn, *ret; + LWIP_UNUSED_ARG(ip_type); + + inner_conn = altcp_tcp_new_ip_type(ip_type); + if (inner_conn == NULL) { + return NULL; + } + ret = altcp_tls_wrap(config, inner_conn); + if (ret == NULL) { + altcp_close(inner_conn); + } + return ret; +} + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_alloc(void *arg, u8_t ip_type) +{ + return altcp_tls_new((struct altcp_tls_config *)arg, ip_type); +} + +#endif /* LWIP_ALTCP_TLS */ + +#endif /* LWIP_ALTCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c new file mode 100644 index 000000000..b715f0457 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c @@ -0,0 +1,543 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +#define ALTCP_TCP_ASSERT_CONN(conn) do { \ + LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \ + LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0) +#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \ + LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \ + LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \ + ALTCP_TCP_ASSERT_CONN(conn); } while(0) + + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_tcp_functions; + +static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb); + +/* callback functions for TCP */ +static err_t +altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->accept) { + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + altcp_tcp_setup(new_conn, new_tpcb); + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +static err_t +altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->recv) { + return conn->recv(conn->arg, conn, p, err); + } + } + if (p != NULL) { + /* prevent memory leaks */ + pbuf_free(p); + } + return ERR_OK; +} + +static err_t +altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->sent) { + return conn->sent(conn->arg, conn, len); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_tcp_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->state = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_err(tpcb, NULL); + tcp_poll(tpcb, NULL, tpcb->pollinterval); +} + +static void +altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, conn); + tcp_recv(tpcb, altcp_tcp_recv); + tcp_sent(tpcb, altcp_tcp_sent); + tcp_err(tpcb, altcp_tcp_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static void +altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + altcp_tcp_setup_callbacks(conn, tpcb); + conn->state = tpcb; + conn->fns = &altcp_tcp_functions; +} + +struct altcp_pcb * +altcp_tcp_new_ip_type(u8_t ip_type) +{ + /* Allocate the tcp pcb first to invoke the priority handling code + if we're out of pcbs */ + struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type); + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } else { + /* altcp_pcb allocation failed -> free the tcp_pcb too */ + tcp_close(tpcb); + } + } + return NULL; +} + +/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new. +* +* arg pointer is not used for TCP. +*/ +struct altcp_pcb * +altcp_tcp_alloc(void *arg, u8_t ip_type) +{ + LWIP_UNUSED_ARG(arg); + return altcp_tcp_new_ip_type(ip_type); +} + +struct altcp_pcb * +altcp_tcp_wrap(struct tcp_pcb *tpcb) +{ + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } + } + return NULL; +} + + +/* "virtual" functions calling into tcp */ +static void +altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_poll(pcb, altcp_tcp_poll, interval); + } +} + +static void +altcp_tcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_recved(pcb, len); + } +} + +static err_t +altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_bind(pcb, ipaddr, port); +} + +static err_t +altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + conn->connected = connected; + pcb = (struct tcp_pcb *)conn->state; + return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected); +} + +static struct altcp_pcb * +altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct tcp_pcb *pcb; + struct tcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err); + if (lpcb != NULL) { + conn->state = lpcb; + tcp_accept(lpcb, altcp_tcp_accept); + return conn; + } + return NULL; +} + +static void +altcp_tcp_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + tcp_abort(pcb); + } + } +} + +static err_t +altcp_tcp_close(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + if (pcb) { + err_t err; + tcp_poll_fn oldpoll = pcb->poll; + altcp_tcp_remove_callbacks(pcb); + err = tcp_close(pcb); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_tcp_setup_callbacks(conn, pcb); + /* poll callback is not included in the above */ + tcp_poll(pcb, oldpoll, pcb->pollinterval); + return err; + } + conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */ + } + altcp_free(conn); + return ERR_OK; +} + +static err_t +altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_shutdown(pcb, shut_rx, shut_tx); +} + +static err_t +altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_write(pcb, dataptr, len, apiflags); +} + +static err_t +altcp_tcp_output(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_output(pcb); +} + +static u16_t +altcp_tcp_mss(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_mss(pcb); +} + +static u16_t +altcp_tcp_sndbuf(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndbuf(pcb); +} + +static u16_t +altcp_tcp_sndqueuelen(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndqueuelen(pcb); +} + +static void +altcp_tcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_disable(pcb); + } +} + +static void +altcp_tcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_enable(pcb); + } +} + +static int +altcp_tcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_nagle_disabled(pcb); + } + return 0; +} + +static void +altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_setprio(pcb, prio); + } +} + +static void +altcp_tcp_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + ALTCP_TCP_ASSERT_CONN(conn); + /* no private state to clean up */ +} + +static err_t +altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port); + } + return ERR_VAL; +} + +static ip_addr_t * +altcp_tcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return &pcb->local_ip; + } else { + return &pcb->remote_ip; + } + } + } + return NULL; +} + +static u16_t +altcp_tcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return pcb->local_port; + } else { + return pcb->remote_port; + } + } + } + return 0; +} + +#ifdef LWIP_DEBUG +static enum tcp_state +altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + return pcb->state; + } + } + return CLOSED; +} +#endif +const struct altcp_functions altcp_tcp_functions = { + altcp_tcp_set_poll, + altcp_tcp_recved, + altcp_tcp_bind, + altcp_tcp_connect, + altcp_tcp_listen, + altcp_tcp_abort, + altcp_tcp_close, + altcp_tcp_shutdown, + altcp_tcp_write, + altcp_tcp_output, + altcp_tcp_mss, + altcp_tcp_sndbuf, + altcp_tcp_sndqueuelen, + altcp_tcp_nagle_disable, + altcp_tcp_nagle_enable, + altcp_tcp_nagle_disabled, + altcp_tcp_setprio, + altcp_tcp_dealloc, + altcp_tcp_get_tcp_addrinfo, + altcp_tcp_get_ip, + altcp_tcp_get_port +#ifdef LWIP_DEBUG + , altcp_tcp_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/def.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 000000000..9da36fee8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,240 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char * +lwip_strnstr(const char *buffer, const char *token, size_t n) +{ + const char *p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char *str1, const char *str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char *str1, const char *str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + len--; + } while ((len != 0) && (c1 != 0)); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char *result, size_t bufsize, int number) +{ + char *res = result; + char *tmp = result + bufsize - 1; + int n = (number >= 0) ? number : -number; + + /* handle invalid bufsize */ + if (bufsize < 2) { + if (bufsize == 1) { + *result = 0; + } + return; + } + + /* First, add sign */ + if (number < 0) { + *res++ = '-'; + } + /* Then create the string from the end and stop if buffer full, + and ensure output string is zero terminated */ + *tmp = 0; + while ((n != 0) && (tmp > res)) { + char val = (char)('0' + (n % 10)); + tmp--; + *tmp = val; + n = n / 10; + } + if (n) { + /* buffer is too small */ + *result = 0; + return; + } + if (*tmp == 0) { + /* Nothing added? */ + *res++ = '0'; + *res++ = 0; + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); +} +#endif diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/dns.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 000000000..9d2f61ed8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1631 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see DNS_MAX_SERVERS + * @see LWIP_DHCP_MAX_DNS_SERVERS + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t *addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t * +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char *entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * For now, this function compares case-insensitive to cope with all kinds of + * servers. This also means that "dns 0x20 bit encoding" must be checked + * externally, if we want to implement it. + * Currently, the request is sent exactly as passed in by he user request. + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset); + if ((n < 0) || (response_offset == 0xFFFF)) { + /* error or overflow */ + return 0xFFFF; + } + response_offset++; + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) { + return 0xFFFF; + } + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + response_offset++; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + return (u16_t)(response_offset + 1); +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf *p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if ((n < 0) || (offset == 0)) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (offset == 0xFFFF) { + return 0xFFFF; + } + return (u16_t)(offset + 1); +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry *entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t *dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + if (query_idx + n + 1 > 0xFFFF) { + /* u16_t overflow */ + goto overflow_return; + } + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1)); + query_idx = (u16_t)(query_idx + n + 1); + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +overflow_return: + pbuf_free(p); + return ERR_VAL; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb * +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb *pcb; + + pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (pcb == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (DNS_PORT_ALLOWED(port)) { + err = udp_bind(pcb, IP_ANY_TYPE, port); + } else { + /* this port is not allowed, try again */ + err = ERR_USE; + } + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(pcb); + return NULL; + } + udp_recv(pcb, dns_recv, NULL); + return pcb; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one (so overflow is no issue) */ + for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t *addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * Check whether there are other backup DNS servers available to try + */ +static u8_t +dns_backupserver_available(struct dns_table_entry *pentry) +{ + u8_t ret = 0; + + if (pentry) { + if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) { + ret = 1; + } + } + + return ret; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if (dns_backupserver_available(entry) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} + +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto ignore_packet; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto ignore_packet; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto ignore_packet; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY); + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + + /* if there is another backup DNS server to try + * then don't stop the DNS request + */ + if (dns_backupserver_available(entry)) { + /* avoid retrying the same server */ + entry->retries = DNS_MAX_RETRIES-1; + entry->tmr = 1; + + /* contact next available server for this entry */ + dns_check_entry(i); + + goto ignore_packet; + } + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto ignore_packet; /* ignore this packet */ + } + if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER); + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto ignore_packet; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_p_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) { + goto ignore_packet; /* ignore this packet */ + } + /* @todo: scope ip6addr? Might be required for link-local addresses at least? */ + ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + res_idx = (u16_t)(res_idx + lwip_htons(ans.len)); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +ignore_packet: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry *req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = (u8_t)(dns_seqno - entry->seqno); + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 000000000..818c68f45 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,608 @@ +/** + * @file + * Internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t *)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void *)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void *)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len = (u16_t)(chksum_len - chklen); + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + int swapped = 0; + + acc = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/init.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 000000000..b3737a356 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,380 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test { + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER +#error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) +#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) +#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) +#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) +#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) +#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) +#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) +#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) +#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) +#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) +#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) +#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL) +#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) +#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) +#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) +#error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) +#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) +#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) +#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) +#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) +#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) +#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) +#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ) +#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1)) +#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) +#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) +#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) +#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) +#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) +#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) +#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) +#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) +#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (LWIP_ALTCP && LWIP_EVENT_API) +#error "The application layered tcp API does not work with LWIP_EVENT_API" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) +#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) +#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) +#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) +#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT +#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT +#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 +#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 +#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) +#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING +#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE +#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY +#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE +#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG +#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG +#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS +#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST +#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT +#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) +#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) +#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) +#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN +#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) +#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) +#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS +#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ip.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 000000000..18514cf32 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,167 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert numeric IP address (both versions) into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char *ipaddr_ntoa(const ip_addr_t *addr) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa(ip_2_ip6(addr)); + } else { + return ip4addr_ntoa(ip_2_ip4(addr)); + } +} + +/** + * @ingroup ipaddr + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen); + } else { + return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen); + } +} + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char *c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 000000000..9f7139bc6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_ASSERT_CORE_LOCKED(); + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct autoip *autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch (autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN); + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip *autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip *autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 000000000..534574fea --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1990 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see LWIP_HOOK_DHCP_APPEND_OPTIONS + * @see LWIP_HOOK_DHCP_PARSE_OPTION + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif +#ifndef LWIP_HOOK_DHCP_PARSE_OPTION +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ +}; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len); +/* add a DHCP option (type, then length in bytes) */ +static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len); +/* add option values */ +static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value); +static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value); +static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value); +#if LWIP_NETIF_HOSTNAME +static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + dhcp->request_timeout = 0; /* stop timer */ + + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* send broadcast to any DHCP server */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + NETIF_FOREACH(netif) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release_and_stop(netif); + dhcp_start(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release_and_stop(netif); + dhcp_start(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_release_and_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + + pbuf_free(p_out); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) { + return; + } + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF); + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* per section 4.4.4, broadcast DECLINE messages */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10 * 1000; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n")); + udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * @ingroup dhcp4 + * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP). + * + * @param netif network interface + */ +void +dhcp_release_and_stop(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + ip_addr_t server_ip_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n")); + if (dhcp == NULL) { + return; + } + + /* already off? -> nothing to do */ + if (dhcp->state == DHCP_STATE_OFF) { + return; + } + + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + /* send release message when current IP was assigned via DHCP */ + if (dhcp_supplied_address(netif)) { + /* create and initialize the DHCP message header */ + struct pbuf *p_out; + u16_t options_out_len; + p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + } + + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +err_t +dhcp_release(struct netif *netif) +{ + dhcp_release_and_stop(netif); + return ERR_OK; +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +void +dhcp_stop(struct netif *netif) +{ + dhcp_release_and_stop(netif); +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static u16_t +dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + options[options_out_len++] = option_type; + options[options_out_len++] = option_len; + return options_out_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static u16_t +dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN); + options[options_out_len++] = value; + return options_out_len; +} + +static u16_t +dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + options[options_out_len++] = (u8_t)((value & 0x000000ffUL)); + return options_out_len; +} + +#if LWIP_NETIF_HOSTNAME +static u16_t +dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + options_out_len = dhcp_option_byte(options_out_len, options, *p++); + } + } + } + return options_out_len; +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + struct dhcp_msg *msg_in; +#if LWIP_DHCP_BOOTP_FILE + int file_overloaded = 0; +#endif + + LWIP_UNUSED_ARG(dhcp); + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx = (u16_t)(options_idx - q->len); + options_idx_max = (u16_t)(options_idx_max - q->len); + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t *)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = (u16_t)(offset + 2); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch (op) { + /* case(DHCP_OPTION_END): handled above */ + case (DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + break; + case (DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case (DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case (DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case (DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case (DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case (DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case (DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case (DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case (DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case (DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in, + dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0, + op, len, q, val_offset); + break; + } + if (op == DHCP_OPTION_PAD) { + offset++; + } else { + if (offset + len + 2 > 0xFFFF) { + /* overflow */ + return ERR_BUF; + } + offset = (u16_t)(offset + len + 2); + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + u16_t next_val_offset; + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len = (u8_t)(decode_len - 4); + next_val_offset = (u16_t)(val_offset + 4); + if (next_val_offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + val_offset = next_val_offset; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t *)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + } + if (offset >= q->len) { + offset = (u16_t)(offset - q->len); + offset_max = (u16_t)(offset_max - q->len); + if (offset < offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t *)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + return ERR_BUF; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; +#if LWIP_DHCP_BOOTP_FILE + file_overloaded = 1; +#endif + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } +#if LWIP_DHCP_BOOTP_FILE + if (!file_overloaded) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + struct dhcp_msg *msg_in; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(p, dhcp) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + msg_in = (struct dhcp_msg *)p->payload; + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif, msg_in); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif, msg_in); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + /* remember offered lease */ + dhcp_handle_offer(netif, msg_in); + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static struct pbuf * +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len) +{ + u16_t i; + struct pbuf *p_out; + struct dhcp_msg *msg_out; + u16_t options_out_len_loc; + +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + msg_out = (struct dhcp_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp_msg)); + + msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET; + msg_out->hlen = netif->hwaddr_len; + msg_out->xid = lwip_htonl(dhcp->xid); + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) { + ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif)); + } + for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + /* copy netif hardware address (padded with zeroes through memset already) */ + msg_out->chaddr[i] = netif->hwaddr[i]; + } + msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + /* Add option MESSAGE_TYPE */ + options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type); + if (options_out_len) { + *options_out_len = options_out_len_loc; + } + return p_out; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + */ +static void +dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out) +{ + options[options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) && + (options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + options[options_out_len++] = 0; + } + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len)); +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp *dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 000000000..442aac08a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1204 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/prot/iana.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + , ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static netif_addr_idx_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \ + (netif)->hints->addr_hint = (addrhint); }} while(0) +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Check for maximum ARP_TABLE_SIZE */ +#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX) +#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + int i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s16_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif) +{ + s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s16_t empty = ARP_TABLE_SIZE; + s16_t i = 0; + /* oldest entry with packets on queue */ + s16_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s16_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s16_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF */ + return (s16_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s16_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i)); + /* update address */ + SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s16_t i; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + int i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +ssize_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s16_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +int +etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + netif_addr_idx_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint; + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_ADDRHINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s16_t i_err; + netif_addr_idx_t i; + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i_err < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i_err; + } + LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX); + i = (netif_addr_idx_t)i_err; + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_ADDRHINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a pbuf that must be copied, copy the whole chain into a + * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN); + SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN); + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr); + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if (ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} + +#endif /* LWIP_IPV4 && LWIP_ARP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 000000000..a462ccd34 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,404 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped off when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t *src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL_BYTES(iphdr_in); + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t) * 2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload) + 1); + /* if debug is enabled but debug statement below is somehow disabled: */ + LWIP_UNUSED_ARG(code); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + u16_t alloc_len = (u16_t)(p->tot_len + hlen); + if (alloc_len < p->tot_len) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n")); + goto icmperr; + } + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_remove_header(r, hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_add_header(p, hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1); + } else { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8)); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_dst, &iphdr_src); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 000000000..b655aa3a4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,801 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group *group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the netif's igmp group list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp)); + + return group; +} + +/** + * Remove a group from netif's igmp group list, but don't free it yet + * + * @param group the group to remove from the netif's igmp group list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif *netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in netif's igmp group list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg *igmp; + struct igmp_group *group; + struct igmp_group *groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1); +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf *p = NULL; + struct igmp_msg *igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t *dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 000000000..26c26a91a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1132 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif *ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif *default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ +#if !LWIP_SINGLE_NETIF + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */ + LWIP_UNUSED_ARG(dest); + + /* iterate through netifs */ + NETIF_FOREACH(netif) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif +#endif /* !LWIP_SINGLE_NETIF */ + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + +#ifdef LWIP_HOOK_IP4_CANFORWARD + int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr); + if (ret >= 0) { + return ret; + } +#endif /* LWIP_HOOK_IP4_CANFORWARD */ + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) { + /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1)); + } else { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100))); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip4_input_accept(struct netif *netif) +{ + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#endif /* LWIP_AUTOIP */ + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + const struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in bytes */ + iphdr_hlen = IPH_HL_BYTES(iphdr); + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip4_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (!ip4_addr_isloopback(ip4_current_dest_addr())) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + { +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip4_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } + } + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, (struct ip_hdr *)p->payload, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (const struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) +#endif /* LWIP_RAW */ + { + pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + MIB2_STATS_INC(mib2.ipindelivers); + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */ + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + pbuf_free(p); + break; + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + if (optlen > (IP_HLEN_MAX - IP_HLEN)) { + /* optlen too long */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_VAL; + } + /* round up to a multiple of 4 */ + optlen_aligned = (u16_t)((optlen + 3) & ~3); + ip_hlen = (u16_t)(ip_hlen + optlen_aligned); + /* First write in the IP options */ + if (pbuf_add_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen)); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned / 2; i++) { + chk_sum += ((u16_t *)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_add_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + if (p->len < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_USE_HINTS +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16_val(iphdr->src), + ip4_addr2_16_val(iphdr->src), + ip4_addr3_16_val(iphdr->src), + ip4_addr4_16_val(iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16_val(iphdr->dest), + ip4_addr2_16_val(iphdr->dest), + ip4_addr3_16_val(iphdr->dest), + ip4_addr4_16_val(iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 000000000..33204d114 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,321 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!lwip_isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (lwip_isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && lwip_isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !lwip_isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 000000000..a445530e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,894 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed); + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata * +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata *ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL; + struct pbuf *q; + u16_t offset, len; + u8_t hlen; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr *)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + len = (u16_t)(len - hlen); + offset = IPH_OFFSET_BYTES(fraghdr); + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper *)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = (u16_t)(offset + len); + if (iprh->end < offset) { + /* u16_t overflow, cannot handle this */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper *)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper *)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper *)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + u8_t hlen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr *)p->payload; + + if (IPH_HL_BYTES(fraghdr) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = IPH_OFFSET_BYTES(fraghdr); + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + goto nullreturn; + } + len = (u16_t)(len - hlen); + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn_ipr; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn_ipr; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN); + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr *)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper *)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_remove_header(r, IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn_ipr: + LWIP_ASSERT("ipr != NULL", ipr != NULL); + if (ipr->p == NULL) { + /* dropped pbuf after creating a new datagram entry: remove the entry, too */ + LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams); + ip_reass_dequeue_datagram(ipr, NULL); + } + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref * +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8); + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + int mf_set; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + if (IPH_HL_BYTES(iphdr) != IP_HLEN) { + /* ip4_frag() does not support IP options */ + return ERR_VAL; + } + LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + /* already fragmented? if so, the last fragment we create must have MF, too */ + mf_set = tmp & IP_MF; + + left = (u16_t)(p->tot_len - IP_HLEN); + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, (u16_t)(nfb * 8)); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = (u16_t)(p->len - poff); + LWIP_ASSERT("p->len >= poff", p->len >= poff); + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t *)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff = (u16_t)(poff + newpbuflen); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last || mf_set) { + /* the last fragment has MF set if the input frame had it */ + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN))); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - fragsize); + ofo = (u16_t)(ofo + nfb); + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 000000000..7cf98a521 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,812 @@ +/** + * @file + * + * @defgroup dhcp6 DHCPv6 + * @ingroup ip6 + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + * + * For now, only stateless DHCPv6 is implemented! + * + * TODO: + * - enable/disable API to not always start when RA is received + * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works) + * - create Client Identifier? + * - only start requests if a valid local address is available on the netif + * - only start information requests if required (not for every RA) + * + * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n + * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n + * dhcp6_disable() disable DHCPv6 for a netif + * + * When enabled, requests are only issued after receipt of RA with the + * corresponding bits set. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/dhcp6.h" +#include "lwip/prot/dhcp6.h" +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif +#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION +#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0 +#endif + + +/** Option handling: options are parsed in dhcp6_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp6 (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp6_option_idx { + DHCP6_OPTION_IDX_CLI_ID = 0, + DHCP6_OPTION_IDX_SERVER_ID, +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + DHCP6_OPTION_IDX_DNS_SERVER, + DHCP6_OPTION_IDX_DOMAIN_LIST, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + DHCP6_OPTION_IDX_NTP_SERVER, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP6_OPTION_IDX_MAX +}; + +struct dhcp6_option_info { + u8_t option_given; + u16_t val_start; + u16_t val_length; +}; + +/** Holds the decoded option info, only valid while in dhcp6_recv. */ +struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX]; + +#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0) +#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1) +#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0) +#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options))) +#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start) +#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length) +#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0) + + +const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002); +const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003); + +static struct udp_pcb *dhcp6_pcb; +static u8_t dhcp6_pcb_refcount; + + +/* receive, unfold, parse and free incoming messages */ +static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp6_inc_pcb_refcount(void) +{ + if (dhcp6_pcb_refcount == 0) { + LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL); + + /* allocate UDP PCB */ + dhcp6_pcb = udp_new_ip6(); + + if (dhcp6_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp6_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT); + udp_recv(dhcp6_pcb, dhcp6_recv, NULL); + } + + dhcp6_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp6_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0)); + dhcp6_pcb_refcount--; + + if (dhcp6_pcb_refcount == 0) { + udp_remove(dhcp6_pcb); + dhcp6_pcb = NULL; + } +} + +/** + * @ingroup dhcp6 + * Set a statically allocated struct dhcp6 to work with. + * Using this prevents dhcp6_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application + */ +void +dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL); + LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); +} + +/** + * @ingroup dhcp6 + * Removes a struct dhcp6 from a netif. + * + * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the + * struct dhcp6 since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp6_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp6_data(netif) != NULL) { + mem_free(netif_dhcp6_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + } +} + +static struct dhcp6* +dhcp6_get_struct(struct netif *netif, const char *dbg_requester) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester)); + dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6)); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester)); + return NULL; + } + + /* clear data structure, this implies DHCP6_STATE_OFF */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* store this dhcp6 client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); + } else { + /* already has DHCP6 client attached */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester)); + } + + if (!dhcp6->pcb_allocated) { + if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */ + mem_free(dhcp6); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + return NULL; + } + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester)); + dhcp6->pcb_allocated = 1; + } + return dhcp6; +} + +/* + * Set the DHCPv6 state + * If the state changed, reset the number of tries. + */ +static void +dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller) +{ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n", + dhcp6->state, new_state, dbg_caller)); + if (new_state != dhcp6->state) { + dhcp6->state = new_state; + dhcp6->tries = 0; + dhcp6->request_timeout = 0; + } +} + +static int +dhcp6_stateless_enabled(struct dhcp6 *dhcp6) +{ + if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) || + (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) { + return 1; + } + return 0; +} + +/*static int +dhcp6_stateful_enabled(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_OFF) { + return 0; + } + if (dhcp6_stateless_enabled(dhcp6)) { + return 0; + } + return 1; +}*/ + +/** + * @ingroup dhcp6 + * Enable stateful DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + * + * @todo: stateful DHCPv6 not supported, yet + */ +err_t +dhcp6_enable_stateful(struct netif *netif) +{ + LWIP_UNUSED_ARG(netif); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet")); + return ERR_VAL; +} + +/** + * @ingroup dhcp6 + * Enable stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + */ +err_t +dhcp6_enable_stateless(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()"); + if (dhcp6 == NULL) { + return ERR_MEM; + } + if (dhcp6_stateless_enabled(dhcp6)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled")); + return ERR_OK; + } else if (dhcp6->state != DHCP6_STATE_OFF) { + /* stateful running */ + /* @todo: stop stateful once it is implemented */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6")); + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n")); + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless"); + return ERR_OK; +} + +/** + * @ingroup dhcp6 + * Disable stateful or stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + */ +void +dhcp6_disable(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 != NULL) { + if (dhcp6->state != DHCP6_STATE_OFF) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n", + (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful"))); + dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable"); + if (dhcp6->pcb_allocated != 0) { + dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */ + dhcp6->pcb_allocated = 0; + } + } + } +} + +/** + * Create a DHCPv6 request, fill in common headers + * + * @param netif the netif under DHCPv6 control + * @param dhcp6 dhcp6 control struct + * @param message_type message type of the request + * @param opt_len_alloc option length to allocate + * @param options_out_len option length on exit + * @return a pbuf for the message + */ +static struct pbuf * +dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type, + u16_t opt_len_alloc, u16_t *options_out_len) +{ + struct pbuf *p_out; + struct dhcp6_msg *msg_out; + + LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp6_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg", + (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc)); + + /* @todo: limit new xid for certain message types? */ + /* reuse transaction identifier in retransmissions */ + if (dhcp6->tries == 0) { + dhcp6->xid = LWIP_RAND() & 0xFFFFFF; + } + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", dhcp6->xid)); + + msg_out = (struct dhcp6_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc); + + msg_out->msgtype = message_type; + msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16); + msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8); + msg_out->transaction_id[2] = (u8_t)dhcp6->xid; + *options_out_len = 0; + return p_out; +} + +static u16_t +dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options, + u16_t num_req_options, u16_t max_len) +{ + size_t i; + u16_t ret; + + LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len", + sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len); + LWIP_UNUSED_ARG(max_len); + + ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO); + ret = dhcp6_option_short(ret, options, 2 * num_req_options); + for (i = 0; i < num_req_options; i++) { + ret = dhcp6_option_short(ret, options, req_options[i]); + } + return ret; +} + +/* All options are added, shrink the pbuf to the required size */ +static void +dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out) +{ + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len)); +} + + +#if LWIP_IPV6_DHCP6_STATELESS +static void +dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6) +{ + const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS}; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n")); + /* create and initialize the DHCP message header */ + p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len); + if (p_out != NULL) { + err_t err; + struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload; + u8_t *options = (u8_t *)(msg_out + 1); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n")); + + options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options, + LWIP_ARRAYSIZE(requested_options), p_out->len); + LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out, + DHCP6_INFOREQUEST, options_out_len, p_out->len); + dhcp6_msg_finalize(options_out_len, p_out); + + err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err)); + LWIP_UNUSED_ARG(err); + } else { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n")); + } + dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request"); + if (dhcp6->tries < 255) { + dhcp6->tries++; + } + msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000); + dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs)); +} + +static err_t +dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6) +{ + /* stateless mode enabled and no request running? */ + if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) { + /* send Information-request and wait for answer; setup receive timeout */ + dhcp6_information_request(netif, dhcp6); + } + + return ERR_OK; +} + +static void +dhcp6_abort_config_request(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + /* abort running request */ + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request"); + } +} + +/* Handle a REPLY to INFOREQUEST + * This parses DNS and NTP server addresses from the reply. + */ +static void +dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(dhcp6); + LWIP_UNUSED_ARG(p_msg_in); + +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) { + ip_addr_t dns_addr; + ip6_addr_t *dns_addr6; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t idx; + u8_t n; + + memset(&dns_addr, 0, sizeof(dns_addr)); + dns_addr6 = ip_2_ip6(&dns_addr); + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif); + /* @todo: do we need a different offset than DHCP(v4)? */ + dns_setserver(n, &dns_addr); + } + } + /* @ todo: parse and set Domain Search List */ +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ + +#if LWIP_DHCP6_GET_NTP_SRV + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) { + ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS]; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t idx; + u8_t n; + + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied; + ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]); + ip_addr_set_zero_ip6(&ntp_server_addrs[n]); + copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif); + } + dhcp6_set_ntp_servers(n, ntp_server_addrs); + } +#endif /* LWIP_DHCP6_GET_NTP_SRV */ +} +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + +/** This function is called from nd6 module when an RA messsage is received + * It triggers DHCPv6 requests (if enabled). + */ +void +dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config) +{ + struct dhcp6 *dhcp6; + + LWIP_ASSERT("netif != NULL", netif != NULL); + dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(managed_addr_config); + LWIP_UNUSED_ARG(other_config); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + if (dhcp6 != NULL) { + if (dhcp6_stateless_enabled(dhcp6)) { + if (other_config) { + dhcp6_request_config(netif, dhcp6); + } else { + dhcp6_abort_config_request(dhcp6); + } + } + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * Parse the DHCPv6 message and extract the DHCPv6 options. + * + * Extract the DHCPv6 options (offset + length) so that we can later easily + * check for them or extract the contents. + */ +static err_t +dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6) +{ + u16_t offset; + u16_t offset_max; + u16_t options_idx; + struct dhcp6_msg *msg_in; + + LWIP_UNUSED_ARG(dhcp6); + + /* clear received options */ + dhcp6_clear_all_options(dhcp6); + msg_in = (struct dhcp6_msg *)p->payload; + + /* parse options */ + + options_idx = sizeof(struct dhcp6_msg); + /* parse options to the end of the received packet */ + offset_max = p->tot_len; + + offset = options_idx; + /* at least 4 byte to read? */ + while ((offset + 4 <= offset_max)) { + u8_t op_len_buf[4]; + u8_t *op_len; + u16_t op; + u16_t len; + u16_t val_offset = (u16_t)(offset + 4); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* copy option + length, might be split accross pbufs */ + op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset); + if (op_len == NULL) { + /* failed to get option and length */ + return ERR_VAL; + } + op = (op_len[0] << 8) | op_len[1]; + len = (op_len[2] << 8) | op_len[3]; + offset = val_offset + len; + if (offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + + switch (op) { + case (DHCP6_OPTION_CLIENTID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len); + break; + case (DHCP6_OPTION_SERVERID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len); + break; +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + case (DHCP6_OPTION_DNS_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len); + break; + case (DHCP6_OPTION_DOMAIN_LIST): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len); + break; +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + case (DHCP6_OPTION_SNTP_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len); + break; +#endif /* LWIP_DHCP6_GET_NTP_SRV*/ + default: + LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op)); + LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in, + msg_in->msgtype, op, len, q, val_offset); + break; + } + } + return ERR_OK; +} + +static void +dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload; + u8_t msg_type; + u32_t xid; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */ + if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;); + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p, + ipaddr_ntoa(addr), port)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < sizeof(struct dhcp6_msg)) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + /* match transaction ID against what we expected */ + xid = reply_msg->transaction_id[0] << 16; + xid |= reply_msg->transaction_id[1] << 8; + xid |= reply_msg->transaction_id[2]; + if (xid != dhcp6->xid) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCPv6 message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = reply_msg->msgtype; + /* message type is DHCP6 REPLY? */ + if (msg_type == DHCP6_REPLY) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n")); +#if LWIP_IPV6_DHCP6_STATELESS + /* in info-requesting state? */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv"); + dhcp6_handle_config_reply(netif, p); + } else +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + { + /* @todo: handle reply in other states? */ + } + } else { + /* @todo: handle other message types */ + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * A DHCPv6 request has timed out. + * + * The timer that was started with the DHCPv6 request has + * timed out, indicating no response was received in time. + */ +static void +dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n")); + + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + /* back-off period has passed, or server selection timed out */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n")); + dhcp6_information_request(netif, dhcp6); + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * DHCPv6 timeout handling (this function must be called every 500ms, + * see @ref DHCP6_TIMER_MSECS). + * + * A DHCPv6 server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCPv6 request is timed out. + */ +void +dhcp6_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + /* only act on DHCPv6 configured interfaces */ + if (dhcp6 != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp6->request_timeout > 1) { + dhcp6->request_timeout--; + } else if (dhcp6->request_timeout == 1) { + dhcp6->request_timeout--; + /* { dhcp6->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp6_timeout(netif, dhcp6); + } + } + } +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 000000000..fec8b28a9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,123 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + + /* The destination IP address must be properly zoned from here on down. */ + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 000000000..167738a05 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,425 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#if LWIP_ICMP6_DATASIZE == 0 +#undef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); +static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'time exceeded' packet, with explicit source and destination + * addresses. + * + * This function may be used to send a response sometime after receiving the + * packet for which this response is meant. The provided source and destination + * addresses are used primarily to retain their zone information. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + * @param src_addr source address of the original packet, with zone information + * @param dest_addr destination address of the original packet, with zone + * information + */ +void +icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost and the calculated + * offset would be wrong (calculated against ip6_current_header()). + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer) +{ + u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header()); + icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * The packet is sent *to* ip_current_src_addr() on ip_current_netif(). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif = ip_current_netif(); + + LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * Call this function if the packet is NOT sent as a direct response to an + * incoming packet, but rather sometime later (e.g. for a fragment reassembly + * timeout). The caller must provide the zoned source and destination addresses + * from the original packet with the src_addr and dest_addr parameters. The + * reason for this approach is that while the addresses themselves are part of + * the original packet, their zone information is not, thus possibly resulting + * in a link-local response being sent over the wrong link. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param src_addr original source address + * @param dest_addr original destination address + */ +static void +icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif; + + /* Get the destination address and netif for this ICMP message. */ + LWIP_ASSERT("must provide both source and destination", src_addr != NULL); + LWIP_ASSERT("must provide both source and destination", dest_addr != NULL); + + /* Special case, as ip6_current_xxx is either NULL, or points + to a different packet than the one that expired. */ + IP6_ADDR_ZONECHECK(src_addr); + IP6_ADDR_ZONECHECK(dest_addr); + /* Swap source and destination for the reply. */ + reply_dest = src_addr; + reply_src = dest_addr; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, + reply_dest, netif); +} + +/** + * Send an ICMPv6 packet (with srd/dst address and netif given). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param reply_src source address of the packet to send + * @param reply_dest destination address of the packet to send + * @param netif netif to send the packet + */ +static void +icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = lwip_htonl(data); + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 000000000..d9a992c22 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 000000000..eda11dc88 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1492 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a zoned address, match its zone to a netif + * 3) if the either the source or destination address is a scoped address, + * match the source address's zone (if set) or address (if not) to a netif + * 4) tries to match the destination subnet to a configured address + * 5) tries to find a router-announced route + * 6) tries to match the (unscoped) source address to the netif + * 7) returns the default netif, if configured + * + * Note that each of the two given addresses may or may not be properly zoned. + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ +#if LWIP_SINGLE_NETIF + LWIP_UNUSED_ARG(src); + LWIP_UNUSED_ARG(dest); +#else /* LWIP_SINGLE_NETIF */ + struct netif *netif; + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) || + (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) { + return NULL; + } + return netif_list; + } + +#if LWIP_IPV6_SCOPES + /* Special processing for zoned destination addresses. This includes link- + * local unicast addresses and interface/link-local multicast addresses. Use + * the zone to find a matching netif. If the address is not zoned, then there + * is technically no "wrong" netif to choose, and we leave routing to other + * rules; in most cases this should be the scoped-source rule below. */ + if (ip6_addr_has_zone(dest)) { + IP6_ADDR_ZONECHECK(dest); + /* Find a netif based on the zone. For custom mappings, one zone may map + * to multiple netifs, so find one that can actually send a packet. */ + NETIF_FOREACH(netif) { + if (ip6_addr_test_zone(dest, netif) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + /* No matching netif found. Do no try to route to a different netif, + * as that would be a zone violation, resulting in any packets sent to + * that netif being dropped on output. */ + return NULL; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* Special processing for scoped source and destination addresses. If we get + * here, the destination address does not have a zone, so either way we need + * to look at the source address, which may or may not have a zone. If it + * does, the zone is restrictive: there is (typically) only one matching + * netif for it, and we should avoid routing to any other netif as that would + * result in guaranteed zone violations. For scoped source addresses that do + * not have a zone, use (only) a netif that has that source address locally + * assigned. This case also applies to the loopback source address, which has + * an implied link-local scope. If only the destination address is scoped + * (but, again, not zoned), we still want to use only the source address to + * determine its zone because that's most likely what the user/application + * wants, regardless of whether the source address is scoped. Finally, some + * of this story also applies if scoping is disabled altogether. */ +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_scope(dest, IP6_UNKNOWN) || + ip6_addr_has_scope(src, IP6_UNICAST) || +#else /* LWIP_IPV6_SCOPES */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) || + ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) || +#endif /* LWIP_IPV6_SCOPES */ + ip6_addr_isloopback(src)) { +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(src)) { + /* Find a netif matching the source zone (relatively cheap). */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif) && netif_is_link_up(netif) && + ip6_addr_test_zone(src, netif)) { + return netif; + } + } + } else +#endif /* LWIP_IPV6_SCOPES */ + { + /* Find a netif matching the source address (relatively expensive). */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + /* Again, do not use any other netif in this case, as that could result in + * zone boundary violations. */ + return NULL; + } + + /* We come here only if neither source nor destination is scoped. */ + IP6_ADDR_ZONECHECK(src); + +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. In accordance + * with RFC 5942, dynamically configured addresses do not have an implied + * local subnet, and thus should be considered /128 assignments. However, as + * such, the destination address may still match a local address, and so we + * still need to check for exact matches here. By (lwIP) policy, statically + * configured addresses do always have an implied local /64 subnet. */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) && + (netif_ip6_addr_isstatic(netif, i) || + ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) { + return netif; + } + } + } + + /* Get the netif for a suitable router-announced route. */ + netif = nd6_find_route(dest); + if (netif != NULL) { + return netif; + } + + /* Try with the netif that matches the source address. Given the earlier rule + * for scoped source addresses, this applies to unscoped addresses only. */ + if (!ip6_addr_isany(src)) { + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ +#endif /* !LWIP_SINGLE_NETIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination IPv6 address. + * + * This implementation follows RFC 6724 Sec. 5 to the following extent: + * - Rules 1, 2, 3: fully implemented + * - Rules 4, 5, 5.5: not applicable + * - Rule 6: not implemented + * - Rule 7: not applicable + * - Rule 8: limited to "prefer /64 subnet match over non-match" + * + * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering + * ULAs to be of smaller scope than global addresses, to avoid that a preferred + * ULA is picked over a deprecated global address when given a global address + * as destination, as that would likely result in broken two-way communication. + * + * As long as temporary addresses are not supported (as used in Rule 7), a + * proper implementation of Rule 8 would obviate the need to implement Rule 6. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach (possibly not properly + * zoned) + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *best_addr; + const ip6_addr_t *cand_addr; + s8_t dest_scope, cand_scope; + s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED; + u8_t i, cand_pref, cand_bits; + u8_t best_pref = 0; + u8_t best_bits = 0; + + /* Start by determining the scope of the given destination address. These + * tests are hopefully (roughly) in order of likeliness to match. */ + if (ip6_addr_isglobal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_ismulticast(dest)) { + dest_scope = ip6_addr_multicast_scope(dest); + } else if (ip6_addr_issitelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, consider scope global */ + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } + + best_addr = NULL; + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + /* Consider only valid (= preferred and deprecated) addresses. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + continue; + } + /* Determine the scope of this candidate address. Same ordering idea. */ + cand_addr = netif_ip6_addr(netif, i); + if (ip6_addr_isglobal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_issitelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, treat as low-priority global scope */ + cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF; + } + cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)); + /* @todo compute the actual common bits, for longest matching prefix. */ + /* We cannot count on the destination address having a proper zone + * assignment, so do not compare zones in this case. */ + cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */ + if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) { + return netif_ip_addr6(netif, i); /* Rule 1 */ + } + if ((best_addr == NULL) || /* no alternative yet */ + ((cand_scope < best_scope) && (cand_scope >= dest_scope)) || + ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */ + ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */ + ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */ + /* We found a new "winning" candidate. */ + best_addr = netif_ip_addr6(netif, i); + best_scope = cand_scope; + best_pref = cand_pref; + best_bits = cand_bits; + } + } + + return best_addr; /* may be NULL */ +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#if LWIP_IPV6_SCOPES + /* Do not forward packets with a zoned (e.g., link-local) source address + * outside of their zone. We determined the zone a bit earlier, so we know + * that the address is properly zoned here, so we can safely use has_zone. + * Also skip packets with a loopback source address (link-local implied). */ + if ((ip6_addr_has_zone(ip6_current_src_addr()) && + !ip6_addr_test_zone(ip6_current_src_addr(), netif)) || + ip6_addr_isloopback(ip6_current_src_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#endif /* LWIP_IPV6_SCOPES */ + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip6_input_accept(struct netif *netif) +{ + /* interface is up? */ + if (netif_is_up(netif)) { + u8_t i; + /* unicast to this interface address? address configured? */ + /* If custom scopes are used, the destination zone will be tested as + * part of the local-address comparison, but we need to test the source + * scope as well (e.g., is this interface on the same link?). */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i)) +#if IPV6_CUSTOM_SCOPES + && (!ip6_addr_has_zone(ip6_current_src_addr()) || + ip6_addr_test_zone(ip6_current_src_addr(), netif)) +#endif /* IPV6_CUSTOM_SCOPES */ + ) { + /* accept on this netif */ + return 1; + } + } + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + const u8_t *nexth; + u16_t hlen, hlen_tot; /* the current header length */ +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr))); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Set the appropriate zone identifier on the addresses. */ + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp); + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp); + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + u8_t i; + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip6_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !IPV6_CUSTOM_SCOPES + /* Shortcut: stop looking for other interfaces if either the source or + * the destination has a scope constrained to this interface. Custom + * scopes may break the 1:1 link/interface mapping, however. */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_islinklocal(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !IPV6_CUSTOM_SCOPES */ +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* The loopback address is to be considered link-local. Packets to it + * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. + * Its implied scope means packets *from* the loopback address should + * not be accepted on other interfaces, either. These requirements + * cannot be implemented in the case that loopback traffic is sent + * across a non-loopback interface, however. */ + if (ip6_addr_isloopback(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip6_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = &IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = hlen_tot = IP6_HLEN; + + /* Move to payload. */ + pbuf_remove_header(p, IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (*nexth != IP6_NEXTH_NONE) + { + switch (*nexth) { + case IP6_NEXTH_HOPBYHOP: + { + s32_t opt_offset; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + + /* Get and check the header length, while staying in packet bounds. */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_HBH_NEXTH(hbh_hdr); + + /* Get the header length. */ + hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen)); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Hop-by-Hop header. */ + opt_offset = IP6_HBH_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) { + /* @todo: process IPV6 Hop-by-Hop option data */ + case IP6_PAD1_OPTION: + /* PAD1 option doesn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Hop-by-Hop header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_DESTOPTS: + { + s32_t opt_offset; + struct ip6_dest_hdr *dest_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + + dest_hdr = (struct ip6_dest_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_DEST_NEXTH(dest_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + dest_hdr->_hlen); + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Destination header. */ + opt_offset = IP6_DEST_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) + { + /* @todo: process IPV6 Destination option data */ + case IP6_PAD1_OPTION: + /* PAD1 option deosn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_HOME_ADDRESS_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Destination header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) + { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_ROUTING: + { + struct ip6_rout_hdr *rout_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + + rout_hdr = (struct ip6_rout_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_ROUT_NEXTH(rout_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + rout_hdr->_hlen); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Skip over this header. */ + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* if segment left value is 0 in routing header, ignore the option */ + if (IP6_ROUT_SEG_LEFT(rout_hdr)) { + /* The length field of routing option header must be even */ + if (rout_hdr->_hlen & 0x1) { + /* Discard and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + switch (IP6_ROUT_TYPE(rout_hdr)) + { + /* TODO: process routing by the type */ + case IP6_ROUT_TYPE2: + break; + case IP6_ROUT_RPL: + break; + default: + /* Discard unrecognized routing type and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_FRAG_NEXTH(frag_hdr); + + /* Fragment Header length. */ + hlen = 8; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* check payload length is multiple of 8 octets when mbit is set */ + if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) { + /* ipv6 payload length is not multiple of 8 octets */ + icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet. Skip this header and continue. */ + pbuf_remove_header(p, hlen); + } else { +#if LWIP_IPV6_REASS + /* reassemble the packet */ + ip_data.current_ip_header_tot_len = hlen_tot; + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = &IP6H_NEXTH(ip6hdr); + hlen = hlen_tot = IP6_HLEN; + pbuf_remove_header(p, IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + } + + if (*nexth == IP6_NEXTH_HOPBYHOP) { + /* Hop-by-Hop header comes only as a first option */ + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + +options_done: + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_ip_header_tot_len = hlen_tot; + +#if LWIP_RAW + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) + { + /* Point to payload. */ + pbuf_remove_header(p, hlen_tot); +#else /* LWIP_RAW */ + { +#endif /* LWIP_RAW */ + switch (*nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + /* @todo: ipv6 mib in-delivers? */ + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP6 + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + } + pbuf_free(p); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not + * properly zoned) + * @param dest the destination IPv6 address to send the packet to (possibly not + * properly zoned) + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output_ip6 + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { +#if LWIP_IPV6_SCOPES + /* If the destination address is scoped but lacks a zone, add a zone now, + * based on the outgoing interface. The lower layers (e.g., nd6) absolutely + * require addresses to be properly zoned for correctness. In some cases, + * earlier attempts will have been made to add a zone to the destination, + * but this function is the only one that is called in all (other) cases, + * so we must do this here. */ + if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) { + ip6_addr_copy(dest_addr, *dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* generate IPv6 header */ + if (pbuf_add_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN)); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_USE_HINTS +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + u8_t *opt_data; + u32_t offset = 0; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + + /* fixed 4 bytes for router alert option and 2 bytes padding */ + const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN; + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + /* Set fields of Hop-by-Hop header */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + IP6_HBH_NEXTH(hbh_hdr) = nexth; + hbh_hdr->_hlen = 0; + offset = IP6_HBH_HLEN; + + /* Set router alert options to Hop-by-Hop extended option header */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION; + IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN; + offset += IP6_OPT_HLEN; + + /* Set router alert option data */ + opt_data = (u8_t *)hbh_hdr + offset; + opt_data[0] = value; + opt_data[1] = 0; + offset += IP6_OPT_DLEN(opt_hdr); + + /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION; + IP6_OPT_DLEN(opt_hdr) = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 000000000..687c02f76 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,343 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +#include + +#if LWIP_IPV4 +#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */ +#endif /* LWIP_IPV4 */ + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10)) + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; +#if LWIP_IPV4 + int check_ipv4_mapped = 0; +#endif /* LWIP_IPV4 */ + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; +#if LWIP_IPV4 + } else if (*s == '.') { + if ((zero_blocks == 5) ||(zero_blocks == 2)) { + check_ipv4_mapped = 1; + /* last block could be the start of an IPv4 address */ + zero_blocks--; + } else { + /* invalid format */ + return 0; + } + break; +#endif /* LWIP_IPV4 */ + } else if (!lwip_isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; +#if LWIP_IPV4 + if (check_ipv4_mapped) { + if (current_block_index == 6) { + ip4_addr_t ip4; + int ret = ip4addr_aton(s + 1, &ip4); + if (ret) { + if (addr) { + addr->addr[3] = lwip_htonl(ip4.addr); + current_block_index++; + goto fix_byte_order_and_return; + } + return 1; + } + } + } +#endif /* LWIP_IPV4 */ + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (lwip_isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (lwip_isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } +#if LWIP_IPV4 +fix_byte_order_and_return: +#endif + /* convert to network byte order. */ + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + + ip6_addr_clear_zone(addr); + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + +#if LWIP_IPV4 + if (ip6_addr_isipv4mappedipv6(addr)) { + /* This is an IPv4 mapped address */ + ip4_addr_t addr4; + char *ret; +#define IP4MAPPED_HEADER "::FFFF:" + char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1; + int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1; + if (buflen < (int)sizeof(IP4MAPPED_HEADER)) { + return NULL; + } + memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER)); + addr4.addr = addr->addr[3]; + ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4); + if (ret != buf_ip4) { + return NULL; + } + return buf; + } +#endif /* LWIP_IPV4 */ + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = lwip_xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 000000000..d6c5d223c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,862 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header + * that precedes the fragment header for reassembly pruposes. */ +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Restore the part that we've overwritten with our helper structure, or we + * might send garbage (and disclose a pointer) in the ICMPv6 reply. */ + MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh)); + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + /* Reconstruct the zoned source and destination addresses, so that we do + * not end up sending the ICMP response over the wrong link. */ + ip6_addr_t src_addr, dest_addr; + ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr)); + ip6_addr_set_zone(&src_addr, ipr->src_zone); + ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr)); + ip6_addr_set_zone(&dest_addr, ipr->dest_zone); + /* Send the actual ICMP response. */ + icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed); +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len, start, end; + ptrdiff_t hdrdiff; + u16_t clen; + u8_t valid = 1; + struct pbuf *q, *next_pbuf; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */ + LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf", + p->len >= sizeof(struct ip6_frag_hdr)); + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header(); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN); + hdrdiff -= IP6_HLEN; + hdrdiff += IP6_FRAG_HLEN; + if (hdrdiff > len) { + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + len = (u16_t)(len - hdrdiff); + start = (offset & IP6_FRAG_OFFSET_MASK); + if (start > (0xFFFF - len)) { + /* u16_t overflow, cannot handle this */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) && + ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IPV6_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src)); + MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest)); +#endif /* IPV6_FRAG_COPYHEADER */ +#if LWIP_IPV6_SCOPES + /* Also store the address zone information. + * @todo It is possible that due to netif destruction and recreation, the + * stored zones end up resolving to a different interface. In that case, we + * risk sending a "time exceeded" ICMP response over the wrong link. + * Ideally, netif destruction would clean up matching pending reassembly + * structures, but custom zone mappings would make that non-trivial. */ + ipr->src_zone = ip6_addr_zone(ip6_current_src_addr()); + ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr()); +#endif /* LWIP_IPV6_SCOPES */ + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + + /* Prepare the pointer to the helper structure, and its initial values. + * Do not yet write to the structure itself, as we still have to make a + * backup of the original data, and we should not do that until we know for + * sure that we are going to add this packet to the list. */ + iprh = (struct ip6_reass_helper *)p->payload; + next_pbuf = NULL; + end = (u16_t)(start + len); + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen); + + /* Remember IPv6 header if this is the first fragment. */ + if (start == 0) { + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; + /* Make a backup of the part of the packet data that we are about to + * overwrite, so that we can restore the original later. */ + MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh)); + /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they + * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies + * to the source/destination zones. */ + } + /* Only after the backup do we get to fill in the actual helper structure. */ + iprh->next_pbuf = next_pbuf; + iprh->start = start; + iprh->end = end; + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + + /* Get the first pbuf. */ + p = ipr->p; + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + u8_t hdrerr; + /* Restore (only) the bytes that we overwrote beyond the fragment header. + * Those bytes may belong to either the IPv6 header or an extension + * header placed before the fragment header. */ + MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM); + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + + /* We need to get rid of the fragment header itself, which is somewhere in + * the middle of the packet (but still in the first pbuf of the chain). + * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary + * in order to be able to reassemble packets that are close to full size + * (i.e., around 65535 bytes). We simply move up all the headers before the + * fragment header, including the IPv6 header, and adjust the payload start + * accordingly. This works because all these headers are in the first pbuf + * of the chain, and because the caller adjusts all its pointers on + * successful reassembly. */ + MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr, + (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr)); + + /* This is where the IPv6 header is now. */ + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr + + sizeof(struct ip6_frag_hdr)); + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr) + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* With the fragment header gone, we now need to adjust the next-header + * field of whatever header was originally before it. Since the packet made + * it through the original header processing routines at least up to the + * fragment header, we do not need any further sanity checks here. */ + if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) { + iphdr_ptr->_nexth = ipr->nexth; + } else { + u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN; + while (*ptr != IP6_NEXTH_FRAGMENT) { + ptr += 8 * (1 + ptr[1]); + } + *ptr = ipr->nexth; + } + + /* release the resources allocated for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen); + + /* Move pbuf back to IPv6 header. This should never fail. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + IP6_FRAG_STATS_INC(ip6_frag.drop); + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t left, cop; + const u16_t mtu = nd6_get_destination_mtu(dest, netif); + const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK); + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN); + left = (u16_t)(p->tot_len - IP6_HLEN); + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len = (u16_t)(p->len - poff); + p->tot_len = (u16_t)(p->tot_len - poff); + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG))); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN)); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - cop); + fragment_offset = (u16_t)(fragment_offset + cop); + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 000000000..6387d468c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,626 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your + * netif since it must always be received for correct IPv6 operation (e.g. SLAAC). + * Ensure the netif filters are configured accordingly!\n + * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a + * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n + * To be called from TCPIP thread. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on one or all network interfaces. + * + * If the group is to be joined on all interfaces, the given group address must + * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE). + * If the group is to be joined on one particular interface, the given group + * address may or may not have a zone set. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * join a new group. If IP6_ADDR_ANY6, join on all netifs + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + /* If the address has a particular scope but no zone set, use the netif to + * set one now. Within the mld6 module, all addresses are properly zoned. */ + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * Zoning of address follows the same rules as @ref mld6_joingroup. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * leave the group. If IP6_ADDR_ANY6, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp_in the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp_in) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = (u16_t)(LWIP_RAND() % maxresp); + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_remove_header(p, MLD6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 000000000..db0c132e4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2434 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static netif_addr_idx_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +static u8_t nd6_tmr_rs_reduction; + +/* Static buffer to parse RA packet options */ +union ra_options { + struct lladdr_option lladdr; + struct mtu_option mtu; + struct prefix_option prefix; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + struct rdnss_option rdnss; +#endif +}; +static union ra_options nd6_ra_buffer; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s16_t nd6_new_destination_cache_entry(void); +static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +#define ND6_SEND_FLAG_ANY_SRC 0x04 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * A local address has been determined to be a duplicate. Take the appropriate + * action(s) on the address and the interface as a whole. + * + * @param netif the netif that owns the address + * @param addr_idx the index of the address detected to be a duplicate + */ +static void +nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx) +{ + + /* Mark the address as duplicate, but leave its lifetimes alone. If this was + * a manually assigned address, it will remain in existence as duplicate, and + * as such be unusable for any practical purposes until manual intervention. + * If this was an autogenerated address, the address will follow normal + * expiration rules, and thus disappear once its valid lifetime expires. */ + netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED); + +#if LWIP_IPV6_AUTOCONFIG + /* If the affected address was the link-local address that we use to generate + * all other addresses, then we should not continue to use those derived + * addresses either, so mark them as duplicate as well. For autoconfig-only + * setups, this will make the interface effectively unusable, approaching the + * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */ + if (addr_idx == 0) { + s8_t i; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + !netif_ip6_addr_isstatic(netif, i)) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED); + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +#if LWIP_IPV6_AUTOCONFIG +/** + * We received a router advertisement that contains a prefix with the + * autoconfiguration flag set. Add or update an associated autogenerated + * address. + * + * @param netif the netif on which the router advertisement arrived + * @param prefix_opt a pointer to the prefix option data + * @param prefix_addr an aligned copy of the prefix address + */ +static void +nd6_process_autoconfig_prefix(struct netif *netif, + struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr) +{ + ip6_addr_t ip6addr; + u32_t valid_life, pref_life; + u8_t addr_state; + s8_t i, free_idx; + + /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do + * the rest, starting with checks for (c) and (d) here. */ + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + pref_life = lwip_htonl(prefix_opt->preferred_lifetime); + if (pref_life > valid_life || prefix_opt->prefix_length != 64) { + return; /* silently ignore this prefix for autoconfiguration purposes */ + } + + /* If an autogenerated address already exists for this prefix, update its + * lifetimes. An address is considered autogenerated if 1) it is not static + * (i.e., manually assigned), and 2) there is an advertised autoconfiguration + * prefix for it (the one we are processing here). This does not necessarily + * exclude the possibility that the address was actually assigned by, say, + * DHCPv6. If that distinction becomes important in the future, more state + * must be kept. As explained elsewhere we also update lifetimes of tentative + * and duplicate addresses. Skip address slot 0 (the link-local address). */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + addr_state = netif_ip6_addr_state(netif, i); + if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) { + /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e). + * The valid lifetime will never drop to zero as a result of this. */ + u32_t remaining_life = netif_ip6_addr_valid_life(netif, i); + if (valid_life > ND6_2HRS || valid_life > remaining_life) { + netif_ip6_addr_set_valid_life(netif, i, valid_life); + } else if (remaining_life > ND6_2HRS) { + netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS); + } + LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i)); + /* Update the preferred lifetime. No bounds checks are needed here. In + * rare cases the advertisement may un-deprecate the address, though. + * Deprecation is left to the timer code where it is handled anyway. */ + if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + } + netif_ip6_addr_set_pref_life(netif, i, pref_life); + return; /* there should be at most one matching address */ + } + } + + /* No autogenerated address exists for this prefix yet. See if we can add a + * new one. However, if IPv6 autoconfiguration is administratively disabled, + * do not generate new addresses, but do keep updating lifetimes for existing + * addresses. Also, when adding new addresses, we must protect explicitly + * against a valid lifetime of zero, because again, we use that as a special + * value. The generated address would otherwise expire immediately anyway. + * Finally, the original link-local address must be usable at all. We start + * creating addresses even if the link-local address is still in tentative + * state though, and deal with the fallout of that upon DAD collision. */ + addr_state = netif_ip6_addr_state(netif, 0); + if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC || + ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) { + return; + } + + /* Construct the new address that we intend to use, and then see if that + * address really does not exist. It might have been added manually, after + * all. As a side effect, find a free slot. Note that we cannot use + * netif_add_ip6_address() here, as it would return ERR_OK if the address + * already did exist, resulting in that address being given lifetimes. */ + IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1], + netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]); + ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif); + + free_idx = 0; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) { + return; /* formed address already exists */ + } + } else if (free_idx == 0) { + free_idx = i; + } + } + if (free_idx == 0) { + return; /* no address slots available, try again on next advertisement */ + } + + /* Assign the new address to the interface. */ + ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr); + netif_ip6_addr_set_valid_life(netif, free_idx, valid_life); + netif_ip6_addr_set_pref_life(netif, free_idx, pref_life); + netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE); +} +#endif /* LWIP_IPV6_AUTOCONFIG */ + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + s16_t dest_idx; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, na_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + nd6_duplicate_addr_detected(inp, i); + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, ns_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */ + /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */ + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + nd6_duplicate_addr_detected(inp, i); + } + } + } + } else { + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can be multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } else { + inp->rs_count = 1; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + +#if LWIP_IPV6_DHCP6 + /* Trigger DHCPv6 if enabled */ + dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG, + ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG); +#endif + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) >= 2) { + u8_t option_type; + u16_t option_len; + int option_len8 = pbuf_try_get_at(p, offset + 1); + if (option_len8 <= 0) { + /* read beyond end or zero length */ + goto lenerr_drop_free_return; + } + option_len = ((u8_t)option_len8) << 3; + if (option_len > p->tot_len - offset) { + /* short packet (option does not fit in) */ + goto lenerr_drop_free_return; + } + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + /* check if this option fits into our buffer */ + if (option_len > sizeof(nd6_ra_buffer)) { + option_type = pbuf_get_at(p, offset); + /* invalid option length */ + if (option_type != ND6_OPTION_TYPE_RDNSS) { + goto lenerr_drop_free_return; + } + /* we allow RDNSS option to be longer - we'll just drop some servers */ + option_len = sizeof(nd6_ra_buffer); + } + buffer = (u8_t*)&nd6_ra_buffer; + option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset); + } + option_type = buffer[0]; + switch (option_type) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + if (option_len < sizeof(struct lladdr_option)) { + goto lenerr_drop_free_return; + } + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + u32_t mtu32; + if (option_len < sizeof(struct mtu_option)) { + goto lenerr_drop_free_return; + } + mtu_opt = (struct mtu_option *)buffer; + mtu32 = lwip_htonl(mtu_opt->mtu); + if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) { +#if LWIP_ND6_ALLOW_RA_UPDATES + if (inp->mtu) { + /* don't set the mtu for IPv6 higher than the netif driver supports */ + inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32); + } else { + inp->mtu6 = (u16_t)mtu32; + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + ip6_addr_t prefix_addr; + if (option_len < sizeof(struct prefix_option)) { + goto lenerr_drop_free_return; + } + + prefix_opt = (struct prefix_option *)buffer; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix); + ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp); + + if (!ip6_addr_islinklocal(&prefix_addr)) { + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64)) { + /* Add to on-link prefix list. */ + u32_t valid_life; + s8_t prefix; + + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0 && valid_life > 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = valid_life; + } + } +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Perform processing for autoconfiguration. */ + nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr); + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE; + struct rdnss_option * rdnss_opt; + if (option_len < SIZEOF_RDNSS_OPTION_BASE) { + goto lenerr_drop_free_return; + } + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */ + if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) { + IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6); + ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * (u8_t)option_len8; + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t destination_address, target_address; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address); + ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || + redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: IP source address equals first-hop router for destination_address */ + /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Find dest address in cache */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, redir_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Set the new target address. */ + ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t destination_address; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, ip6hdr->dest); + ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp); + + /* Look for entry in destination cache. */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); + return; +lenerr_drop_free_return: + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Update lifetimes of our addresses + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* No more than 1 second remaining. Clear this entry. Also clear any of + * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */ + s8_t j; + for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) { + if (ip6_addr_cmp(&destination_cache[j].next_hop_addr, + &default_router_list[i].neighbor_entry->next_hop_address)) { + ip6_addr_set_any(&destination_cache[j].destination_addr); + } + } + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } else { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + prefix_list[i].netif = NULL; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process our own addresses, updating address lifetimes and/or DAD state. */ + NETIF_FOREACH(netif) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /* Step 1: update address lifetimes (valid and preferred). */ + addr_state = netif_ip6_addr_state(netif, i); + /* RFC 4862 is not entirely clear as to whether address lifetimes affect + * tentative addresses, and is even less clear as to what should happen + * with duplicate addresses. We choose to track and update lifetimes for + * both those types, although for different reasons: + * - for tentative addresses, the line of thought of Sec. 5.7 combined + * with the potentially long period that an address may be in tentative + * state (due to the interface being down) suggests that lifetimes + * should be independent of external factors which would include DAD; + * - for duplicate addresses, retiring them early could result in a new + * but unwanted attempt at marking them as valid, while retiring them + * late/never could clog up address slots on the netif. + * As a result, we may end up expiring addresses of either type here. + */ + if (!ip6_addr_isinvalid(addr_state) && + !netif_ip6_addr_isstatic(netif, i)) { + u32_t life = netif_ip6_addr_valid_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* The address has expired. */ + netif_ip6_addr_set_valid_life(netif, i, 0); + netif_ip6_addr_set_pref_life(netif, i, 0); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID); + } else { + if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC); + netif_ip6_addr_set_valid_life(netif, i, life); + } + /* The address is still here. Update the preferred lifetime too. */ + life = netif_ip6_addr_pref_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* This case must also trigger if 'life' was already zero, so as to + * deal correctly with advertised preferred-lifetime reductions. */ + netif_ip6_addr_set_pref_life(netif, i, 0); + if (addr_state == IP6_ADDR_PREFERRED) + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED); + } else if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + netif_ip6_addr_set_pref_life(netif, i, life); + } + } + } + /* The address state may now have changed, so reobtain it next. */ +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + /* Step 2: update DAD state. */ + addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. For dynamic + * addresses with an expired preferred lifetime, the state is set to + * deprecated right away. That should almost never happen, though. */ + addr_state = IP6_ADDR_PREFERRED; +#if LWIP_IPV6_ADDRESS_LIFETIMES + if (!netif_ip6_addr_isstatic(netif, i) && + netif_ip6_addr_pref_life(netif, i) == 0) { + addr_state = IP6_ADDR_DEPRECATED; + } +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + netif_ip6_addr_set_state(netif, i, addr_state); + } else if (netif_is_up(netif) && netif_is_link_up(netif)) { + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* Send a NS for this address. Use the unspecified address as source + * address in all cases (RFC 4862 Sec. 5.4.2), not in the least + * because as it is, we only consider multicast replies for DAD. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), + ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC); + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + if (!nd6_tmr_rs_reduction) { + nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1; + NETIF_FOREACH(netif) { + if ((netif->rs_count > 0) && netif_is_up(netif) && + netif_is_link_up(netif) && + !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } + } else { + nd6_tmr_rs_reduction--; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + if (!(flags & ND6_SEND_FLAG_ANY_SRC) && + ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s16_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s16_t i; + + IP6_ADDR_ZONECHECK(ip6addr); + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s16_t +nd6_new_destination_cache_entry(void) +{ + s16_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix or the subnet of a + * statically assigned address. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static int +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + /* Check to see if the address matches an on-link prefix. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a manually configured (= static) + * address. Static addresses have an implied /64 subnet assignment. Dynamic + * addresses (from autoconfiguration) have no implied subnet assignment, and + * are thus effectively /128 assignments. See RFC 5942 for more on this. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * This function is used both for routing and for finding a next-hop target for + * a packet. In the former case, the given netif is NULL, and the returned + * router entry must be for a netif suitable for sending packets (up, link up). + * In the latter case, the given netif is not NULL and restricts router choice. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + struct netif *router_netif; + s8_t i, j, valid_router; + static s8_t last_router; + + LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for valid routers. A reachable router is preferred. */ + valid_router = -1; + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + /* Is the router netif both set and apppropriate? */ + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + /* Is the router valid, i.e., reachable or probably reachable as per + * RFC 4861 Sec. 6.3.6? Note that we will never return a router that + * has no neighbor cache entry, due to the netif association tests. */ + if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) { + /* Is the router known to be reachable? */ + if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) { + return i; /* valid and reachable - done! */ + } else if (valid_router < 0) { + valid_router = i; /* valid but not known to be reachable */ + } + } + } + } + } + if (valid_router >= 0) { + return valid_router; + } + + /* Look for any router for which we have any information at all. */ + /* last_router is used for round-robin selection of incomplete routers, as + * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a + * route, to select the same router as next-hop target in the common case. */ + if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) { + last_router = 0; + } + i = last_router; + for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) { + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + return i; + } + } + if (++i >= LWIP_ND6_NUM_ROUTERS) { + i = 0; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. This route may be + * based on an on-link prefix or a default router. + * + * If a suitable route is found, the returned netif is guaranteed to be in a + * suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + struct netif *netif; + s8_t i; + + /* @todo decide if it makes sense to check the destination cache first */ + + /* Check if there is a matching on-link prefix. There may be multiple + * matches. Pick the first one that is associated with a suitable netif. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + netif = prefix_list[i].netif; + if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + + /* No on-link prefix match. Find a router that can forward the packet. */ + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + LWIP_ASSERT("selected router must have a neighbor entry", + default_router_list[i].neighbor_entry != NULL); + return default_router_list[i].neighbor_entry->netif; + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + s16_t dst_idx; + + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t addr_hint = netif->hints->addr_hint; + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + dst_idx = nd6_find_destination_cache_entry(ip6addr); + if (dst_idx >= 0) { + /* found destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Not found. Create a new destination entry. */ + dst_idx = nd6_new_destination_cache_entry(); + if (dst_idx >= 0) { + /* got new destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif->hints->addr_hint = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a pbuf that must be copied, we have to copy the whole chain + * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s16_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif_mtu6(netif); + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + s16_t dst_idx; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + dst_idx = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + dst_idx = nd6_find_destination_cache_entry(ip6addr); + } + if (dst_idx < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } + /* Clear the destination cache, since many entries may now have become + * invalid for one of several reasons. As destination cache entries have no + * netif association, use a sledgehammer approach (this can be improved). */ + nd6_clear_destination_cache(); +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +/** Netif was added, set up, or reconnected (link up) */ +void +nd6_restart_netif(struct netif *netif) +{ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +} + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/mem.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 000000000..315fb3c5d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,1017 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +/* This is overridable for tests only... */ +#ifndef LWIP_MEM_ILLEGAL_FREE +#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0) +#endif + +#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x)) +#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) +#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) + +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void * +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC_LOCKED(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t *)ret = size; + ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED_LOCKED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper *)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC_LOCKED(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC_LOCKED(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED_LOCKED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED_LOCKED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t *)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) +#define LWIP_MEM_LFREE_VOLATILE volatile + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a mutex */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() +#define LWIP_MEM_LFREE_VOLATILE + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/** pointer to the lowest free block, this is used for faster search */ +static struct mem * LWIP_MEM_LFREE_VOLATILE lfree; + +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = ptr_to_mem(mem->next); + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + if (nmem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); + } + } + + /* plug hole backward */ + pmem = ptr_to_mem(mem->prev); + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + if (mem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); + } + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/* Check if a struct mem is correctly linked. + * If not, double-free is a possible reason. + */ +static int +mem_link_valid(struct mem *mem) +{ + struct mem *nmem, *pmem; + mem_size_t rmem_idx; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); + if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || + ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || + ((nmem != ram_end) && (nmem->prev != rmem_idx))) { + return 0; + } + return 1; +} + +#if MEM_SANITY_CHECK +static void +mem_sanity(void) +{ + struct mem *mem; + u8_t last_used; + + /* begin with first element here */ + mem = (struct mem *)ram; + LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1)); + last_used = mem->used; + LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + /* check all elements before the end of the heap */ + for (mem = ptr_to_mem(mem->next); + ((u8_t *)mem > ram) && (mem < ram_end); + mem = ptr_to_mem(mem->next)) { + LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem); + LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev))); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + if (last_used == 0) { + /* 2 unused elements in a row? */ + LWIP_ASSERT("heap element unused?", mem->used == 1); + } else { + LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1)); + } + + LWIP_ASSERT("heap element link valid", mem_link_valid(mem)); + + /* used/unused altering */ + last_used = mem->used; + } + LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED)); + LWIP_ASSERT("heap element used valid", mem->used == 1); + LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED); +} +#endif /* MEM_SANITY_CHECK */ + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) { + LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* Get the corresponding struct mem: */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); + + if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* mem has to be in a used state */ + if (!mem->used) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + if (!mem_link_valid(mem)) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* mem is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param new_size required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t new_size) +{ + mem_size_t size, newsize; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* ... and its offset pointer */ + ptr = mem_to_ptr(mem); + + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = ptr_to_mem(mem->next); + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + if (lfree == mem2) { + lfree = ptr_to_mem(ptr2); + } + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + mem2 = ptr_to_mem(ptr2); + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size_in is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size_in) +{ + mem_size_t ptr, ptr2, size; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size_in == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); + /* create mem2 struct */ + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = ptr_to_mem(cur->next); + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); + +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + size_t alloc_size = (size_t)count * (size_t)size; + + if ((size_t)(mem_size_t)alloc_size != alloc_size) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size)); + return NULL; + } + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc((mem_size_t)alloc_size); + if (p) { + /* zero the memory */ + memset(p, 0, alloc_size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/memp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 000000000..352ce5a55 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,447 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/altcp.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc *const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp *)LWIP_MEM_ALIGN(desc->base); +#if MEMP_MEM_INIT + /* force memset on pool memory */ + memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + )); +#endif + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void * +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t *)memp + MEMP_SIZE); + } else { +#if MEMP_STATS + desc->stats->err++; +#endif + SYS_ARCH_UNPROTECT(old_level); + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); + } + + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char *file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc *desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc *desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/netif.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 000000000..15200a274 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1795 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include /* memset */ +#include /* atoi */ + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +static netif_ext_callback_t *ext_callback; +#endif + +#if !LWIP_SINGLE_NETIF +struct netif *netif_list; +#endif /* !LWIP_SINGLE_NETIF */ +struct netif *netif_default; + +#define netif_index_to_num(index) ((index) - 1) +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif *netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 +static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL); + + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif_set_flags(netif, NETIF_FLAG_IGMP); +#endif + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL); + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127, 0, 0, 1); + IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1); + IP4_ADDR(&loop_netmask, 255, 0, 0, 0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_input: invalid pbuf", p != NULL); + LWIP_ASSERT("netif_input: invalid netif", inp != NULL); + +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * Same as @ref netif_add but without IPv4 addresses + */ +struct netif * +netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input) +{ + return netif_add(netif, +#if LWIP_IPV4 + NULL, NULL, NULL, +#endif /* LWIP_IPV4*/ + state, init, input); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_SINGLE_NETIF + if (netif_default != NULL) { + LWIP_ASSERT("single netif already set", 0); + return NULL; + } +#endif + + LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL); + LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = ip_2_ip4(IP4_ADDR_ANY); + } + if (netmask == NULL) { + netmask = ip_2_ip4(IP4_ADDR_ANY); + } + if (gw == NULL) { + gw = ip_2_ip4(IP4_ADDR_ANY); + } + + /* reset new interface configuration state */ + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); + netif->output = netif_null_output_ip4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; +#if LWIP_IPV6_ADDRESS_LIFETIMES + netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC; + netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->mtu = 0; + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6 +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num; + netif->input = input; + + NETIF_RESET_HINTS(netif); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /* Initialize the MTU for IPv6 to the one set by the netif driver. + This can be updated later by RA. */ + netif->mtu6 = netif->mtu; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + +#if !LWIP_SINGLE_NETIF + /* Assign a unique netif number in the range [0..254], so that (num+1) can + serve as an interface index that fits in a u8_t. + We assume that the new netif has not yet been added to the list here. + This algorithm is O(n^2), but that should be OK for lwIP. + */ + { + struct netif *netif2; + int num_netifs; + do { + if (netif->num == 255) { + netif->num = 0; + } + num_netifs = 0; + for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) { + LWIP_ASSERT("netif already added", netif2 != netif); + num_netifs++; + LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255); + if (netif2->num == netif->num) { + netif->num++; + break; + } + } + } while (netif2 != NULL); + } + if (netif->num == 254) { + netif_num = 0; + } else { + netif_num = (u8_t)(netif->num + 1); + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; +#endif /* "LWIP_SINGLE_NETIF */ + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL); + + return netif; +} + +static void +netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ +#if LWIP_TCP + tcp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_RAW */ +} + +#if LWIP_IPV4 +static int +netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr) +{ + LWIP_ASSERT("invalid pointer", ipaddr != NULL); + LWIP_ASSERT("invalid pointer", old_addr != NULL); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) { + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = *ipaddr; + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + ip_addr_copy(*old_addr, *netif_ip_addr4(netif)); + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); + netif_do_ip_addr_changed(old_addr, &new_addr); + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + return 1; /* address changed */ + } + return 0; /* address unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t old_addr; + + LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_nm != NULL); + ip_addr_copy(*old_nm, *netif_ip_netmask4(netif)); +#else + LWIP_UNUSED_ARG(old_nm); +#endif + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); + return 1; /* netmask changed */ + } + return 0; /* netmask unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_nm_val; + ip_addr_t *old_nm = &old_nm_val; +#else + ip_addr_t *old_nm = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_netmask = old_nm; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_gw != NULL); + ip_addr_copy(*old_gw, *netif_ip_gw4(netif)); +#else + LWIP_UNUSED_ARG(old_gw); +#endif + + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); + return 1; /* gateway changed */ + } + return 0; /* gateway unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_gw_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_gw = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_gw = old_gw; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args); +#endif + } +} + +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_nsc_reason_t change_reason = LWIP_NSC_NONE; + netif_ext_callback_args_t cb_args; + ip_addr_t old_nm_val; + ip_addr_t old_gw_val; + ip_addr_t *old_nm = &old_nm_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_nm = NULL; + ip_addr_t *old_gw = NULL; +#endif + ip_addr_t old_addr; + int remove; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + remove = ip4_addr_isany(ipaddr); + if (remove) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED; + cb_args.ipv4_changed.old_netmask = old_nm; +#endif + } + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED; + cb_args.ipv4_changed.old_gw = old_gw; +#endif + } + if (!remove) { + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + if (change_reason != LWIP_NSC_NONE) { + change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED; + netif_invoke_ext_callback(netif, change_reason, &cb_args); + } +#endif +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + return; + } + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL); + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL); + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL); + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } +#if !LWIP_SINGLE_NETIF + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif *tmp_netif; + NETIF_FOREACH(tmp_netif) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } +#endif /* !LWIP_SINGLE_NETIF */ + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_UP)) { + netif_set_flags(netif, NETIF_FLAG_UP); + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif *netif, u8_t report_type) +{ + LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL); + + /* Only send reports when both link and admin states are up */ + if (!(netif->flags & NETIF_FLAG_LINK_UP) || + !(netif->flags & NETIF_FLAG_UP)) { + return; + } + +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_UP) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_clear_flags(netif, NETIF_FLAG_UP); + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif_set_flags(netif, NETIF_FLAG_LINK_UP); + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_clear_flags(netif, NETIF_FLAG_LINK_UP); + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + u8_t schedule_poll = 0; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL); + LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* No existing packets queued, schedule poll */ + schedule_poll = 1; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + if (schedule_poll) { + tcpip_try_callback((tcpip_callback_fn)netif_poll, netif); + } +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_poll: invalid netif", netif != NULL); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + in->if_idx = netif_get_index(netif); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif; + /* loop through netifs */ + NETIF_FOREACH(netif) { + netif_poll(netif); + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NUM_NETIF_CLIENT_DATA > 256 +#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256 +#endif + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX); +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL); + LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL); + + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + ip_addr_t old_addr; + ip_addr_t new_ipaddr; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx)); + IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6); + + /* address is actually being changed? */ + if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) || + (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); + ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); + } + /* @todo: remove/readd mib2 ip6 entries? */ + + ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_set.addr_index = addr_idx; + args.ipv6_set.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args); + } +#endif + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* This is a good moment to check that the address is properly zoned. */ + IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif); + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) != + (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) { + /* address state has changed -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_addr_state_changed.addr_index = addr_idx; + args.ipv6_addr_state_changed.old_state = old_state; + args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx); + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args); + } +#endif + } + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific local address is present on the netif and returns its + * index. Depending on its state, it may or may not be assigned to the + * interface (as per RFC terminology). + * + * The given address may or may not be zoned (i.e., have a zone index other + * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone + * for the given netif, or no match will be found. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL); + LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL); + +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) { + return -1; /* wrong zone, no match */ + } +#endif /* LWIP_IPV6_SCOPES */ + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL); + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03))); + } + } + + /* Set a link-local zone. Even though the zone is implied by the owning + * netif, setting the zone anyway has two important conceptual advantages: + * 1) it avoids the need for a ton of exceptions in internal code, allowing + * e.g. ip6_addr_cmp() to be used on local addresses; + * 2) the properly zoned address is visible externally, e.g. when any outside + * code enumerates available addresses or uses one to bind a socket. + * Any external code unaware of address scoping is likely to just ignore the + * zone field, so this should not create any compatibility problems. */ + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif); + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL); + LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL); + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot. The first one is reserved for link-local addresses. */ + for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 +/** Dummy IPv4 output function for netifs not supporting IPv4 + */ +static err_t +netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV4 */ + +/** +* @ingroup netif +* Return the interface index for the netif with name +* or NETIF_NO_INDEX if not found/on error +* +* @param name the name of the netif +*/ +u8_t +netif_name_to_index(const char *name) +{ + struct netif *netif = netif_find(name); + if (netif != NULL) { + return netif_get_index(netif); + } + /* No name found, return invalid index */ + return NETIF_NO_INDEX; +} + +/** +* @ingroup netif +* Return the interface name for the netif matching index +* or NULL if not found/on error +* +* @param idx the interface index of the netif +* @param name char buffer of at least NETIF_NAMESIZE bytes +*/ +char * +netif_index_to_name(u8_t idx, char *name) +{ + struct netif *netif = netif_get_by_index(idx); + + if (netif != NULL) { + name[0] = netif->name[0]; + name[1] = netif->name[1]; + lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx)); + return name; + } + return NULL; +} + +/** +* @ingroup netif +* Return the interface for the netif index +* +* @param idx index of netif to find +*/ +struct netif * +netif_get_by_index(u8_t idx) +{ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + if (idx != NETIF_NO_INDEX) { + NETIF_FOREACH(netif) { + if (idx == netif_get_index(netif)) { + return netif; /* found! */ + } + } + } + + return NULL; +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + LWIP_ASSERT_CORE_LOCKED(); + + if (name == NULL) { + return NULL; + } + + num = (u8_t)atoi(&name[2]); + + NETIF_FOREACH(netif) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +/** + * @ingroup netif + * Add extended netif events listener + * @param callback pointer to listener structure + * @param fn callback function + */ +void +netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + LWIP_ASSERT("fn must be != NULL", fn != NULL); + + callback->callback_fn = fn; + callback->next = ext_callback; + ext_callback = callback; +} + +/** + * @ingroup netif + * Remove extended netif events listener + * @param callback pointer to listener structure + */ +void +netif_remove_ext_callback(netif_ext_callback_t* callback) +{ + netif_ext_callback_t *last, *iter; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + + if (ext_callback == NULL) { + return; + } + + if (callback == ext_callback) { + ext_callback = ext_callback->next; + } else { + last = ext_callback; + for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) { + if (iter == callback) { + LWIP_ASSERT("last != NULL", last != NULL); + last->next = callback->next; + callback->next = NULL; + return; + } + } + } +} + +/** + * Invoke extended netif status event + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +void +netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + netif_ext_callback_t *callback = ext_callback; + + LWIP_ASSERT("netif must be != NULL", netif != NULL); + + while (callback != NULL) { + callback->callback_fn(netif, reason, args); + callback = callback->next; + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 000000000..a209e0ca8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1514 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage: @ref zerocopyrx + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/netif.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset); + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb *pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (pcb->ooseq != NULL) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_free_ooseq(pcb); + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/* Initialize members of struct pbuf after allocation */ +static void +pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags) +{ + p->next = NULL; + p->payload = payload; + p->tot_len = tot_len; + p->len = len; + p->type_internal = (u8_t)type; + p->flags = flags; + p->ref = 1; + p->if_idx = NETIF_NO_INDEX; +} + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p; + u16_t offset = (u16_t)layer; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + switch (type) { + case PBUF_REF: /* fall through */ + case PBUF_ROM: + p = pbuf_alloc_reference(NULL, length, type); + break; + case PBUF_POOL: { + struct pbuf *q, *last; + u16_t rem_len; /* remaining length */ + p = NULL; + last = NULL; + rem_len = length; + do { + u16_t qlen; + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + if (p) { + pbuf_free(p); + } + /* bail out unsuccessfully */ + return NULL; + } + qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset))); + pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)), + rem_len, qlen, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + if (p == NULL) { + /* allocated head of pbuf chain (into p) */ + p = q; + } else { + /* make previous pbuf point to this pbuf */ + last->next = q; + } + last = q; + rem_len = (u16_t)(rem_len - qlen); + offset = 0; + } while (rem_len > 0); + break; + } + case PBUF_RAM: { + u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length)); + mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) || + (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf *)mem_malloc(alloc_len); + if (p == NULL) { + return NULL; + } + pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)), + length, length, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + } + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +/** + * @ingroup pbuf + * Allocates a pbuf for referenced data. + * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param payload referenced payload + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_ROM: It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * + * @return the allocated pbuf. + */ +struct pbuf * +pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type) +{ + struct pbuf *p; + LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM)); + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + pbuf_init_alloced_pbuf(p, payload, length, length, type, 0); + return p; +} + + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * Example of custom pbuf usage: @ref zerocopyrx + * + * @param l header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf * +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset = (u16_t)l; + void *payload; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + if (payload_mem != NULL) { + payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + payload = NULL; + } + pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM); + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + u16_t shrink; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + shrink = (u16_t)(p->tot_len - new_len); + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len = (u16_t)(rem_len - q->len); + /* decrease total length indicator */ + q->tot_len = (u16_t)(q->tot_len - shrink); + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len)); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * @see pbuf_add_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force) +{ + u16_t type_internal; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_increment > 0xFFFF)) { + return 1; + } + if (header_size_increment == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_increment; + /* Do not allow tot_len to wrap as a result. */ + if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) { + return 1; + } + + type_internal = p->type_internal; + + /* pbuf types containing payloads? */ + if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) { + /* set new payload pointer */ + payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n", + (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else { + /* hide a header in the payload? */ + if (force) { + payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n", + (void *)p->payload, (void *)payload, increment_magnitude)); + + /* modify pbuf fields */ + p->payload = payload; + p->len = (u16_t)(p->len + increment_magnitude); + p->tot_len = (u16_t)(p->tot_len + increment_magnitude); + + + return 0; +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_add_header(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 0); +} + +/** + * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_add_header_force(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 1); +} + +/** + * Adjusts the payload pointer to hide headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * disappears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_decrement Number of bytes to decrement header size which + * decreases the size of the pbuf. + * If header_size_decrement is 0, this function does nothing and returns successful. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_remove_header(struct pbuf *p, size_t header_size_decrement) +{ + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_decrement > 0xFFFF)) { + return 1; + } + if (header_size_decrement == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_decrement; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + + /* remember current payload pointer */ + payload = p->payload; + LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */ + + /* increase payload pointer (guarded by length check above) */ + p->payload = (u8_t *)p->payload + header_size_decrement; + /* modify pbuf length fields */ + p->len = (u16_t)(p->len - increment_magnitude); + p->tot_len = (u16_t)(p->tot_len - increment_magnitude); + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n", + (void *)payload, (void *)p->payload, increment_magnitude)); + + return 0; +} + +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + if (header_size_increment < 0) { + return pbuf_remove_header(p, (size_t) - header_size_increment); + } else { + return pbuf_add_header_impl(p, (size_t)header_size_increment, force); + } +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len) + * + * @param q pbufs to operate on + * @param size The number of bytes to remove from the beginning of the pbuf list. + * While size >= p->len, pbufs are freed. + * ATTENTION: this is the opposite direction as @ref pbuf_header, but + * takes an u16_t not s16_t! + * @return the new head pbuf + */ +struct pbuf * +pbuf_free_header(struct pbuf *q, u16_t size) +{ + struct pbuf *p = q; + u16_t free_left = size; + while (free_left && p) { + if (free_left >= p->len) { + struct pbuf *f = p; + free_left = (u16_t)(free_left - p->len); + p = p->next; + f->next = 0; + pbuf_free(f); + } else { + pbuf_remove_header(p, free_left); + free_left = 0; + } + } + return p; +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u8_t alloc_src; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + LWIP_PBUF_REF_T ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + alloc_src = pbuf_get_allocsrc(p); +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom *)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) { + mem_free(p); + } else { + /* @todo: support freeing other types */ + LWIP_ASSERT("invalid pbuf type", 0); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1)); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * This function explicitly does not check for tot_len overflow to prevent + * failing to queue too long pbufs. This can produce invalid pbufs, so + * handle with care! + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = (u16_t)(p->tot_len - p->len); + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + size_t offset_to = 0, offset_from = 0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void *)p_to, (const void *)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left = 0; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset = (u16_t)(offset - p->len); + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = (u16_t)(p->len - offset); + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len); + copied_total = (u16_t)(copied_total + buf_copy_len); + left = (u16_t)(left + buf_copy_len); + len = (u16_t)(len - buf_copy_len); + offset = 0; + } + } + return copied_total; +} + +/** + * @ingroup pbuf + * Get part of a pbuf's payload as contiguous memory. The returned memory is + * either a pointer into the pbuf's payload or, if split over multiple pbufs, + * a copy into the user-supplied buffer. + * + * @param p the pbuf from which to copy data + * @param buffer the application supplied buffer + * @param bufsize size of the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +void * +pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset) +{ + const struct pbuf *q; + u16_t out_offset; + + LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;); + + q = pbuf_skip_const(p, offset, &out_offset); + if (q != NULL) { + if (q->len >= (out_offset + len)) { + /* all data in this pbuf, return zero-copy */ + return (u8_t *)q->payload + out_offset; + } + /* need to copy */ + if (pbuf_copy_partial(q, buffer, len, out_offset) != len) { + /* copying failed: pbuf is too short */ + return NULL; + } + return buffer; + } + /* pbuf is too short (offset does not fit in) */ + return NULL; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) { + tot_len_front = (u16_t)(tot_len_front + r->len); + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len = (u16_t)(i->tot_len - r->tot_len); + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf *q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left = (u16_t)(offset_left - q->len); + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf * +pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf *, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + size_t buf_copy_len; + size_t total_copy_len = len; + size_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf *q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t *src_ptr = (const u8_t *)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len; + LWIP_ASSERT("check pbuf_skip result", target_offset < q->len); + first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len = (u16_t)(remaining_len - first_copy_len); + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf * +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + if (p->next == NULL) { + return p; + } + q = pbuf_clone(layer, PBUF_RAM, p); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + pbuf_free(p); + return q; +} + +/** + * @ingroup pbuf + * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source + * pbuf into this new pbuf (using pbuf_copy()). + * + * @param layer pbuf_layer of the new pbuf + * @param type this parameter decides how and where the pbuf should be allocated + * (@see pbuf_alloc()) + * @param p the source pbuf + * + * @return a new pbuf or NULL if allocation fails + */ +struct pbuf * +pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p) +{ + struct pbuf *q; + err_t err; + q = pbuf_alloc(layer, p->tot_len, type); + if (q == NULL) { + return NULL; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char *)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf *p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf *p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t *)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf *q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t *)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf *q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if (p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start = (u16_t)(start - q->len); + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, (u16_t)(start + i)); + u8_t b = ((const u8_t *)s2)[i]; + if (a != b) { + return (u16_t)LWIP_MIN(i + 1, 0xFFFF); + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len); + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max_cmp_start; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf *p, const char *substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/raw.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 000000000..3b34544b0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,671 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +raw_input_state_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + raw_input_state_t ret = RAW_INPUT_NONE; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while (pcb != NULL) { + if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) && + (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* receive callback function available? */ + if (pcb->recv != NULL) { + u8_t eaten; +#ifndef LWIP_NOASSERT + void *old_payload = p->payload; +#endif + ret = RAW_INPUT_DELIVERED; + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return RAW_INPUT_EATEN; + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return ret; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. */ + if (IP_IS_V6(&pcb->local_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Bind an RAW PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb RAW PCB to be bound with netif. + * @param netif netif to bind to. Can be NULL. + * + * @see raw_disconnect() + */ +void +raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + raw_set_flags(pcb, RAW_FLAGS_CONNECTED); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Disconnect a RAW PCB. + * + * @param pcb the raw pcb to disconnect. + */ +void +raw_disconnect(struct raw_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + raw_clear_flags(pcb, RAW_FLAGS_CONNECTED); +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. An IP header will be prepended + * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that + * case, the packet must include an IP header, which will then be sent as is. + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + struct netif *netif; + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(ipaddr)) { + /* For multicast-destined packets, use the user-provided interface index to + * determine the outgoing interface, if an interface index is set and a + * matching netif can be found. Otherwise, fall back to regular routing. */ + netif = netif_get_by_index(pcb->mcast_ifindex); + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + netif = ip_route(&pcb->local_ip, ipaddr); + } + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + return ERR_RTE; + } + + if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + + return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip); +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address, using a particular outgoing + * netif and source IP address. An IP header will be prepended to the packet, + * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the + * packet must include an IP header, which will then be sent as is. + * + * @param pcb RAW PCB used to send the data + * @param p chain of pbufs to be sent + * @param dst_ip destination IP address + * @param netif the netif used for sending + * @param src_ip source IP address + */ +err_t +raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + struct netif *netif, const ip_addr_t *src_ip) +{ + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u16_t header_size; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* Handle the HDRINCL option as an exception: none of the code below applies + * to this case, and sending the packet needs to be done differently too. */ + if (pcb->flags & RAW_FLAGS_HDRINCL) { + /* A full header *must* be present in the first pbuf of the chain, as the + * output routines may access its fields directly. */ + if (p->len < header_size) { + return ERR_VAL; + } + /* @todo multicast loop support, if at all desired for this scenario.. */ + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif); + NETIF_RESET_HINTS(netif); + return err; + } + + /* packet too large to add an IP header without causing an overflow? */ + if ((u16_t)(p->tot_len + header_size) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_remove_header(q, header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(dst_ip)) { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif); + NETIF_RESET_HINTS(netif); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + LWIP_ASSERT_CORE_LOCKED(); + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + raw_set_multicast_ttl(pcb, RAW_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + LWIP_ASSERT_CORE_LOCKED(); + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct raw_pcb *rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/stats.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 000000000..34e9b2709 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used)); + LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int idx) +{ + if (idx < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/sys.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 000000000..5f08352bf --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,148 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * The OS-specific code should be implemented in arch/sys_arch.h + * and sys_arch.c of your port. + * + * The operating system emulation layer provides a common interface + * between the lwIP code and the underlying operating system kernel. The + * general idea is that porting lwIP to new architectures requires only + * small changes to a few header files and a new sys_arch + * implementation. It is also possible to do a sys_arch implementation + * that does not rely on any underlying operating system. + * + * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full + * lwIP functionality, multiple threads support can be implemented in the + * sys_arch, but this is not required for the basic lwIP + * functionality. Timer scheduling is implemented in lwIP, but can be implemented + * by the sys_arch port (LWIP_TIMERS_CUSTOM==1). + * + * In addition to the source file providing the functionality of sys_arch, + * the OS emulation layer must provide several header files defining + * macros used throughout lwip. The files required and the macros they + * must define are listed below the sys_arch description. + * + * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that + * allows both using pointers or actual OS structures to be used. This way, memory + * required for such types can be either allocated in place (globally or on the + * stack) or on the heap (allocated internally in the "*_new()" functions). + * + * Note: + * ----- + * Be careful with using mem_malloc() in sys_arch. When malloc() refers to + * mem_malloc() you can run into a circular function call problem. In mem.c + * mem_init() tries to allocate a semaphore using mem_malloc, which of course + * can't be performed when sys_arch uses mem_malloc. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * Semaphores can be either counting or binary - lwIP works with both + * kinds. + * Semaphores are represented by the type "sys_sem_t" which is typedef'd + * in the sys_arch.h file. Mailboxes are equivalently represented by the + * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t". + * lwIP does not place any restrictions on how these types are represented + * internally. + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * Mailboxes should be implemented as a queue which allows multiple messages + * to be posted (implementing as a rendez-vous point where only one message can be + * posted at a time can have a highly negative impact on performance). A message + * in a mailbox is just a pointer, nothing more. + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 000000000..bd7d64ec3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2686 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref api + * + * Common functions for the TCP implementation, such as functions + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + * + * TCP connection setup + * -------------------- + * The functions used for setting up connections is similar to that of + * the sequential API and of the BSD socket API. A new TCP connection + * identifier (i.e., a protocol control block - PCB) is created with the + * tcp_new() function. This PCB can then be either set to listen for new + * incoming connections or be explicitly connected to another host. + * - tcp_new() + * - tcp_bind() + * - tcp_listen() and tcp_listen_with_backlog() + * - tcp_accept() + * - tcp_connect() + * + * Sending TCP data + * ---------------- + * TCP data is sent by enqueueing the data with a call to tcp_write() and + * triggering to send by calling tcp_output(). When the data is successfully + * transmitted to the remote host, the application will be notified with a + * call to a specified callback function. + * - tcp_write() + * - tcp_output() + * - tcp_sent() + * + * Receiving TCP data + * ------------------ + * TCP data reception is callback based - an application specified + * callback function is called when new data arrives. When the + * application has taken the data, it has to call the tcp_recved() + * function to indicate that TCP can advertise increase the receive + * window. + * - tcp_recv() + * - tcp_recved() + * + * Application polling + * ------------------- + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + * - tcp_poll() + * + * Closing and aborting connections + * -------------------------------- + * - tcp_close() + * - tcp_abort() + * - tcp_err() + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char *const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = +{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; +/* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs +}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); +#if LWIP_TCP_PCB_NUM_EXT_ARGS +static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args); +#endif + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#ifdef LWIP_RAND + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** Free a tcp pcb */ +void +tcp_free(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB, pcb); +} + +/** Free a tcp listen pcb */ +static void +tcp_free_listen(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB_LISTEN, pcb); +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL); + + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + tcp_set_flags(pcb, TF_BACKLOGPEND); + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + tcp_clear_flags(pcb, TF_BACKLOGPEND); + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL); + + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + /* Deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + tcp_free(pcb); + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + tcp_free(pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + tcp_free_listen(pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + tcp_free(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + tcp_set_flags(pcb, TF_CLOSEPEND); + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * The function may return ERR_MEM if no memory + * was available for closing the connection. If so, the application + * should wait and try again either by using the acknowledgment + * callback or the polling functionality. If the close succeeds, the + * function returns ERR_OK. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG); + + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + tcp_free(pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + tcp_free(pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is + * bound to all local IP addresses. + * If another connection is bound to the same port, the function will + * return ERR_USE, otherwise ERR_OK is returned. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr) +#if LWIP_IPV4 && LWIP_IPV6 + || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Binds the connection to a netif and IP address. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb the tcp_pcb to bind. + * @param netif the netif to bind to. Can be NULL. + */ +void +tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * When an incoming connection is accepted, the function specified with + * the tcp_accept() function will be called. The pcb has to be bound + * to a local port with the tcp_bind() function. + * + * The tcp_listen() function returns a new connection identifier, and + * the one passed as an argument to the function will be + * deallocated. The reason for this behavior is that less memory is + * needed for a connection that is listening, so tcp_listen() will + * reclaim the memory needed for the original connection and allocate a + * new smaller memory block for the listening connection. + * + * tcp_listen() may return NULL if no memory was available for the + * listening connection. If so, the memory associated with the pcb + * passed as an argument to tcp_listen() will not be deallocated. + * + * The backlog limits the number of outstanding connections + * in the listen queue to the value specified by the backlog argument. + * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + LWIP_ASSERT_CORE_LOCKED(); + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done); + LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen *)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->netif_idx = NETIF_NO_INDEX; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } +#if LWIP_TCP_PCB_NUM_EXT_ARGS + /* copy over ext_args to listening pcb */ + memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args)); +#endif + tcp_free(pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge; + + LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL); + new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + u32_t wnd_inflation; + tcpwnd_size_t rcv_wnd; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len); + if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) { + /* window got too big or tcpwnd_size_t overflow */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n")); + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + pcb->rcv_wnd = rcv_wnd; + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + tcp_port++; + if (tcp_port == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + n++; + if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * Sets up the pcb to connect to the remote host and sends the + * initial SYN segment which opens the connection. + * + * The tcp_connect() function returns immediately; it does not wait for + * the connection to be properly setup. Instead, it will call the + * function specified as the fourth argument (the "connected" argument) + * when the connection is established. If the connection could not be + * properly established, either because the other host refused the + * connection or because the other host didn't answer, the "err" + * callback function of this pcb (registered with tcp_err, see below) + * will be called. + * + * The tcp_connect() function can return ERR_MEM if no memory is + * available for enqueueing the SYN segment. If the SYN indeed was + * enqueued successfully, the tcp_connect() function returns ERR_OK. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + struct netif *netif = NULL; + err_t ret; + u32_t iss; + u16_t old_local_port; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { + /* check if we have a route to the remote host */ + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + } + if (netif == NULL) { + /* Don't even try to send a SYN packet if we have no route since that will fail. */ + return ERR_RTE; + } + + /* check if local IP has been assigned to pcb, if not, get one */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * Given that we already have the target netif, this is easy and cheap. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) { + ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + prev = pcb; + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL); + LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL); + if (pcb->persist_probe >= TCP_MAXRTX) { + ++pcb_remove; /* max probes reached */ + } else { + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + int next_slot = 1; /* increment timer to next slot */ + /* If snd_wnd is zero, send 1 byte probes */ + if (pcb->snd_wnd == 0) { + if (tcp_zero_window_probe(pcb) != ERR_OK) { + next_slot = 0; /* try probe again with current slot */ + } + /* snd_wnd not fully closed, split unsent head and fill window */ + } else { + if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) { + if (tcp_output(pcb) == ERR_OK) { + /* sending will cancel persist timer, else retry with current slot */ + next_slot = 0; + } + } + } + if (next_slot) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) { + ++pcb->rtime; + } + + if (pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + /* If prepare phase fails but we have unsent data but no unacked data, + still execute the backoff calculations below, as this means we somehow + failed to send segment. */ + if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) { + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1); + int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF); + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + pcb->bytes_acked = 0; + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto_commit(pcb); + } + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs or pending FINs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG); + +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if ((refused_flags & PBUF_FLAG_TCP_FIN) +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * @ingroup tcp + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return); + + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL); + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG); + + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has a lower priority than 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We want to kill connections with a lower prio, so bail out if + * supplied prio is 0 - there can never be a lower prio + */ + if (mprio == 0) { + return; + } + + /* We only want kill connections with a lower prio, so decrement prio by one + * and start searching for oldest connection with same or lower priority than mprio. + * We want to find the connections with the lowest possible prio, and among + * these the one with the longest inactivity time. + */ + mprio--; + + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + /* lower prio is always a kill candidate */ + if ((pcb->prio < mprio) || + /* longer inactivity is also a kill candidate */ + ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/* Called when allocating a pcb fails. + * In this case, we want to handle all pcbs that want to close first: if we can + * now send the FIN (which failed before), the pcb might be in a state that is + * OK for us to now free it. + */ +static void +tcp_handle_closepend(void) +{ + struct tcp_pcb *pcb = tcp_active_pcbs; + + while (pcb != NULL) { + struct tcp_pcb *next = pcb->next; + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + pcb = next; + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */ + tcp_handle_closepend(); + + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest active connection with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * If memory is not available for creating the new pcb, NULL is returned. + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb *pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Specifies the program specific state that should be passed to all + * other callback functions. The "pcb" argument is the current TCP + * connection control block, and the "arg" argument is the argument + * that will be passed to the callbacks. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Sets the callback function that will be called when new data + * arrives. The callback function will be passed a NULL pbuf to + * indicate that the remote host has closed the connection. If the + * callback function returns ERR_OK or ERR_ABRT it must have + * freed the pbuf, otherwise it must not have freed it. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Specifies the callback function that should be called when data has + * successfully been received (i.e., acknowledged) by the remote + * host. The len argument passed to the callback function gives the + * amount bytes that was acknowledged by the last acknowledgment. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * If a connection is aborted because of an error, the application is + * alerted of this event by the err callback. Errors that might abort a + * connection are when there is a shortage of memory. The callback + * function to be called is set using the tcp_err() function. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Specifies the polling interval and the callback function that should + * be called to poll the application. The interval is specified in + * number of TCP coarse grained timer shots, which typically occurs + * twice a second. An interval of 10 means that the application would + * be polled every 5 seconds. + * + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return); + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); + +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return); + + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL); + + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if ((pcb->state != TIME_WAIT) && + (pcb->state != LISTEN) && + (pcb->flags & TF_ACK_DELAY)) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by calculating the minimum of TCP_MSS and the mtu (if set) of the target + * netif (if not NULL). + */ +u16_t +tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest) +{ + u16_t mss_s; + u16_t mtu; + + LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */ + + LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { + u16_t offset; +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + offset = IP6_HLEN + TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + offset = IP_HLEN + TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0; + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + + LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL); + + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct tcp_pcb_listen *lpcb; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char * +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +err_t +tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port) +{ + if (pcb) { + if (local) { + if (addr) { + *addr = pcb->local_ip; + } + if (port) { + *port = pcb->local_port; + } + } else { + if (addr) { + *addr = pcb->remote_ip; + } + if (port) { + *port = pcb->remote_port; + } + } + return ERR_OK; + } + return ERR_VAL; +} + +#if TCP_QUEUE_OOSEQ +/* Free all ooseq pbufs (and possibly reset SACK state) */ +void +tcp_free_ooseq(struct tcp_pcb *pcb) +{ + if (pcb->ooseq) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#if LWIP_TCP_SACK_OUT + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); +#endif /* LWIP_TCP_SACK_OUT */ + } +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/** + * @defgroup tcp_raw_extargs ext arguments + * @ingroup tcp_raw + * Additional data storage per tcp pcb\n + * @see @ref tcp_raw + * + * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + */ + +static u8_t tcp_ext_arg_id; + +/** + * @ingroup tcp_raw_extargs + * Allocate an index to store data in ext_args member of struct tcp_pcb. + * Returned value is an index in mentioned array. + * The index is *global* over all pcbs! + * + * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + * + * @return a unique index into struct tcp_pcb.ext_args + */ +u8_t +tcp_ext_arg_alloc_id(void) +{ + u8_t result = tcp_ext_arg_id; + tcp_ext_arg_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255 +#error LWIP_TCP_PCB_NUM_EXT_ARGS +#endif + LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS); + return result; +} + +/** + * @ingroup tcp_raw_extargs + * Set callbacks for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the callback + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param callbacks callback table (const since it is referenced, not copied!) + */ +void +tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + LWIP_ASSERT("callbacks != NULL", callbacks != NULL); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].callbacks = callbacks; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param arg data pointer to set + */ +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].data = arg; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @return data pointer at the given index + */ +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + return pcb->ext_args[id].data; +} + +/** This function calls the "destroy" callback for all ext_args once a pcb is + * freed. + */ +static void +tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args) +{ + int i; + LWIP_ASSERT("ext_args != NULL", ext_args != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (ext_args[i].callbacks != NULL) { + if (ext_args[i].callbacks->destroy != NULL) { + ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data); + } + } + } +} + +/** This function calls the "passive_open" callback for all ext_args if a connection + * is in the process of being accepted. This is called just after the SYN is + * received and before a SYN/ACK is sent, to allow to modify the very first + * segment sent even on passive open. Naturally, the "accepted" callback of the + * pcb has not been called yet! + */ +err_t +tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb) +{ + int i; + LWIP_ASSERT("lpcb != NULL", lpcb != NULL); + LWIP_ASSERT("cpcb != NULL", cpcb != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (lpcb->ext_args[i].callbacks != NULL) { + if (lpcb->ext_args[i].callbacks->passive_open != NULL) { + err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb); + if (err != ERR_OK) { + return err; + } + } + } + } + return ERR_OK; +} +#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */ + +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 000000000..428a6f48d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,2178 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U))) + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t *tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +#if LWIP_TCP_SACK_OUT +static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right); +static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq); +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq); +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* LWIP_TCP_SACK_OUT */ + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr); + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN); + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_remove_header(p, TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len); + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_remove_header(p, tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t *)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_remove_header(p->next, opt2len); + p->tot_len = (u16_t)(p->tot_len - opt2len); + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len; + if (flags & (TCP_FIN | TCP_SYN)) { + tcplen++; + if (tcplen < p->tot_len) { + /* u16_t overflow, cannot handle this */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n")); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + } + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = pcb; + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len, + tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_timewait_input(pcb); + } + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* check if PCB is bound to specific netif */ + if ((lpcb->netif_idx != NETIF_NO_INDEX) && + (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = (struct tcp_pcb *)lpcb; + continue; + } + + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_listen_input(lpcb); + } + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) { + pbuf_free(p); + return; + } +#endif + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL); + + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL); + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + tcp_set_flags(npcb, TF_BACKLOGPEND); +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + npcb->netif_idx = pcb->netif_idx; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS + if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } +#endif + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + + LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL); + + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + tcp_ack_now(pcb); + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL); + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we send a challenge ACK + and wait for a re-send with matching sequence number. + This follows RFC 5961 section 3.2 and addresses CVE-2004-0230 + (RST spoofing attack), which is present in RFC 793 RST handling. */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + tcp_clear_flags(pcb, TF_ACK_DELAY); + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + pcb->persist_probe = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL); +#endif + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL); + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** Remove segments from a list if the incoming ACK acknowledges them */ +static struct tcp_seg * +tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name, + struct tcp_seg *dbg_other_seg_list) +{ + struct tcp_seg *next; + u16_t clen; + + LWIP_UNUSED_ARG(dbg_list_name); + LWIP_UNUSED_ARG(dbg_other_seg_list); + + while (seg_list != NULL && + TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) + + TCP_TCPLEN(seg_list), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n", + lwip_ntohl(seg_list->tcphdr->seqno), + lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list), + dbg_list_name)); + + next = seg_list; + seg_list = seg_list->next; + + clen = pbuf_clen(next->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", + (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen)); + + pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen); + recv_acked = (tcpwnd_size_t)(recv_acked + next->len); + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n", + (tcpwnd_size_t)pcb->snd_queuelen, + dbg_list_name)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + seg_list != NULL || dbg_other_seg_list != NULL); + } + } + return seg_list; +} + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + s16_t m; + u32_t right_wnd_edge; + int found_dupack = 0; + + LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window */ + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + if (pcb->dupacks >= 3) { + /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + tcpwnd_size_t acked; + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + tcp_clear_flags(pcb, TF_INFR); + pcb->cwnd = pcb->ssthresh; + pcb->bytes_acked = 0; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + /* Record how much data this ACK acks */ + acked = (tcpwnd_size_t)(ackno - pcb->lastack); + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + tcpwnd_size_t increase; + /* limit to 1 SMSS segment during period following RTO */ + u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2; + /* RFC 3465, section 2.2 Slow Start */ + increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss)); + TCP_WND_INC(pcb->cwnd, increase); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + /* RFC 3465, section 2.1 Congestion Avoidance */ + TCP_WND_INC(pcb->bytes_acked, acked); + if (pcb->bytes_acked >= pcb->cwnd) { + pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd); + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent); + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked); + /* check if this ACK ends our retransmission of in-flight data */ + if (pcb->flags & TF_RTO) { + /* RTO is done if + 1) both queues are empty or + 2) unacked is empty and unsent head contains data not part of RTO or + 3) unacked head contains data not part of RTO */ + if (pcb->unacked == NULL) { + if ((pcb->unsent == NULL) || + (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) { + tcp_clear_flags(pcb, TF_RTO); + } + } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) { + tcp_clear_flags(pcb, TF_RTO); + } + } + /* End of ACK for new data processing. */ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = (s16_t)(m - (pcb->sa >> 3)); + pcb->sa = (s16_t)(pcb->sa + m); + if (m < 0) { + m = (s16_t) - m; + } + m = (s16_t)(m - (pcb->sv >> 2)); + pcb->sv = (s16_t)(pcb->sv + m); + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + u32_t off32 = pcb->rcv_nxt - seqno; + u16_t new_tot_len, off; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off32 < 0xffff)); + off = (u16_t)off32; + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + inseg.len -= off; + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* all pbufs up to and including this one have len==0, so tot_len is equal */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + /* cannot fail... */ + pbuf_remove_header(p, off); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + struct tcp_seg *next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + struct tcp_seg *tmp; + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + tmp = next; + next = next->next; + tcp_seg_free(tmp); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + struct tcp_seg *cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (pcb->ooseq != NULL) { + /* Some segments may have been removed from ooseq, let's remove all SACKs that + describe anything before the new beginning of that list. */ + tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno); + } else if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* ooseq has been cleared. Nothing to SACK */ + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); + } + } +#endif /* LWIP_TCP_SACK_OUT */ +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_TCP_SACK_OUT + if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* Normally the ACK for the data received could be piggy-backed on a data packet, + but lwIP currently does not support including SACKs in data packets. So we force + it to respond with an empty ACK packet (only if there is at least one SACK to be sent). + NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */ + tcp_send_empty_ack(pcb); + } +#endif /* LWIP_TCP_SACK_OUT */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* All the SACKs should be invalid, so we can simply store the most recent one: */ + pcb->rcv_sacks[0].left = seqno; + pcb->rcv_sacks[0].right = seqno + inseg.len; + } +#endif /* LWIP_TCP_SACK_OUT */ + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + +#if LWIP_TCP_SACK_OUT + /* This is the left edge of the lowest possible SACK range. + It may start before the newly received segment (possibly adjusted below). */ + u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno; +#endif /* LWIP_TCP_SACK_OUT */ + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + +#if LWIP_TCP_SACK_OUT + /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers + between 'prev' and the beginning of 'next', we want to move sackbeg. */ + if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } +#endif /* LWIP_TCP_SACK_OUT */ + + /* We don't use 'prev' below, so let's set it to current 'next'. + This way even if we break the loop below, 'prev' will be pointing + at the segment right in front of the newly added one. */ + prev = next; + + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + } + +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (prev == NULL) { + /* The new segment is at the beginning. sackbeg should already be set properly. + We need to find the right edge. */ + next = pcb->ooseq; + } else if (prev->next != NULL) { + /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next', + we need to move sackbeg. After that we should find the right edge. */ + next = prev->next; + if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } + } else { + next = NULL; + } + if (next != NULL) { + u32_t sackend = next->tcphdr->seqno; + for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) { + sackend += next->len; + } + tcp_add_sack(pcb, sackbeg, sackend); + } + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) + { + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ +#ifdef TCP_OOSEQ_BYTES_LIMIT + const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb); + u32_t ooseq_blen = 0; +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb); + u16_t ooseq_qlen = 0; +#endif + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + int stop_here = 0; +#ifdef TCP_OOSEQ_BYTES_LIMIT + ooseq_blen += p->tot_len; + if (ooseq_blen > ooseq_max_blen) { + stop_here = 1; + } +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + ooseq_qlen += pbuf_clen(p); + if (ooseq_qlen > ooseq_max_qlen) { + stop_here = 1; + } +#endif + if (stop_here) { +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* Let's remove all SACKs from next's seqno up. */ + tcp_remove_sacks_gt(pcb, next->tcphdr->seqno); + } +#endif /* LWIP_TCP_SACK_OUT */ + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } + } +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* TCP_QUEUE_OOSEQ */ + + /* We send the ACK packet after we've (potentially) dealt with SACKs, + so they can be included in the acknowledgment. */ + tcp_send_empty_ack(pcb); + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_get_next_optbyte(void) +{ + u16_t optidx = tcp_optidx++; + if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) { + u8_t *opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[optidx]; + } else { + u8_t idx = (u8_t)(optidx - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL); + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_get_next_optbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (u16_t)(tcp_get_next_optbyte() << 8); + mss |= tcp_get_next_optbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_get_next_optbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + tcp_set_flags(pcb, TF_WND_SCALE); + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_get_next_optbyte(); + tsval |= (tcp_get_next_optbyte() << 8); + tsval |= (tcp_get_next_optbyte() << 16); + tsval |= (tcp_get_next_optbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + tcp_set_flags(pcb, TF_TIMESTAMP); + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif /* LWIP_TCP_TIMESTAMPS */ +#if LWIP_TCP_SACK_OUT + case LWIP_TCP_OPT_SACK_PERM: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP SACK_PERM option with valid length */ + if (flags & TCP_SYN) { + /* We only set it if we receive it in a SYN (or SYN+ACK) packet */ + tcp_set_flags(pcb, TF_SACK); + } + break; +#endif /* LWIP_TCP_SACK_OUT */ + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_get_next_optbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#if LWIP_TCP_SACK_OUT +/** + * Called by tcp_receive() to add new SACK entry. + * + * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one. + * Existing SACK entries will be "pushed back", to preserve their order. + * This is the behavior described in RFC 2018, section 4. + * + * @param pcb the tcp_pcb for which a segment arrived + * @param left the left side of the SACK (the first sequence number) + * @param right the right side of the SACK (the first sequence number past this SACK) + */ +static void +tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right) +{ + u8_t i; + u8_t unused_idx; + + if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) { + return; + } + + /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one), + while moving all other SACKs forward. + We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at [i] if it doesn't overlap with left:right range. + It does not overlap if its right side is before the newly added SACK, + or if its left side is after the newly added SACK. + NOTE: The equality should not really happen, but it doesn't hurt. */ + if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) { + if (unused_idx != i) { + /* We don't need to copy if it's already in the right spot */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + ++unused_idx; + } + } + + /* Now 'unused_idx' is the index of the first invalid SACK entry, + anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid). + We want to clear this and all following SACKs. + However, we will be adding another one in the front (and shifting everything else back). + So let's just iterate from the back, and set each entry to the one to the left if it's valid, + or to 0 if it is not. */ + for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) { + /* [i] is the index we are setting, and the value should be at index [i-1], + or 0 if that index is unused (>= unused_idx). */ + if (i - 1 >= unused_idx) { + /* [i-1] is unused. Let's clear [i]. */ + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } else { + pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1]; + } + } + + /* And now we can store the newest SACK */ + pcb->rcv_sacks[0].left = left; + pcb->rcv_sacks[0].right = right; +} + +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are less than 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the lowest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its right side is > 'seq'. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) { + pcb->rcv_sacks[unused_idx].left = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} + +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the highest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its left side is < 'seq'. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) { + pcb->rcv_sacks[unused_idx].right = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ + +#endif /* LWIP_TCP_SACK_OUT */ + +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 000000000..724df1097 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,2190 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + * There are two distinct ways for TCP segments to get sent: + * - queued data: these are segments transferring data or segments containing + * SYN or FIN (which both count as one sequence number). They are created as + * struct @ref pbuf together with a struct tcp_seg and enqueue to the + * unsent list of the pcb. They are sent by tcp_output: + * - @ref tcp_write : creates data segments + * - @ref tcp_split_unsent_seg : splits a data segment + * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments + * - @ref tcp_output / tcp_output_segment : finalize the tcp header + * (e.g. sequence numbers, options, checksum) and output to IP + * - the various tcp_rexmit functions shuffle around segments between the + * unsent an unacked lists to retransmit them + * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and + * segment for these functions + * - direct send: these segments don't contain data but control the connection + * behaviour. They are created as pbuf only and sent directly without + * enqueueing them: + * - @ref tcp_send_empty_ack sends an ACK-only segment + * - @ref tcp_rst sends a RST segment + * - @ref tcp_keepalive sends a keepalive segment + * - @ref tcp_zero_window_probe sends a window probe segment + * - tcp_output_alloc_header allocates a header-only pbuf for these functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* Allow to add custom TCP header options by defining this hook */ +#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags)) +#else +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags) +#endif + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/* tcp_route: common code that returns a fixed bound netif or calls ip_route */ +static struct netif * +tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst) +{ + LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */ + + if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) { + return netif_get_by_index(pcb->netif_idx); + } else { + return ip_route(src, dst); + } +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param hdrflags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen; + + LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_add_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * Called by @ref tcp_write + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + + LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL); + LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL); + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment. + * + * Called by tcp_write and tcp_split_unsent_seg. + */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL); + + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * This function enqueues the data pointed to by the argument dataptr. The length of + * the data is passed as the len parameter. The apiflags can be one or more of: + * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated + * for the data to be copied into. If this flag is not given, no new memory + * should be allocated and the data should only be referenced by pointer. This + * also means that the memory behind dataptr must not change until the data is + * ACKed by the remote host + * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted, + * the PSH flag is set in the last segment created by this call to tcp_write. + * If this flag is given, the PSH flag is not set. + * + * The tcp_write() function will fail and return ERR_MEM if the length + * of the data exceeds the current send buffer size or if the length of + * the queue of outgoing segment is larger than the upper limit defined + * in lwipopts.h. The number of bytes available in the output queue can + * be retrieved with the tcp_sndbuf() function. + * + * The proper way to use this function is to call the function with at + * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM, + * the application should wait until some of the currently enqueued + * data has been successfully received by the other host and try again. + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + u16_t mss_local; + + LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG); + + /* don't allocate segments bigger than half the maximum window we ever received */ + mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2)); + mss_local = mss_local ? mss_local : pcb->mss; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } else +#endif /* LWIP_TCP_TIMESTAMPS */ + { + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + } + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + +#if !LWIP_NETIF_TX_SINGLE_PBUF + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + * + * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute + * it after rexmit puts a segment from unacked to unsent and at this point, + * oversize info is lost. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) && + (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t *)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Split segment on the head of the unsent queue. If return is not + * ERR_OK, existing head remains intact + * + * The split is accomplished by creating a new TCP segment and pbuf + * which holds the remainder payload after the split. The original + * pbuf is trimmed to new length. This allows splitting of read-only + * pbufs + * + * @param pcb the tcp_pcb for which to split the unsent head + * @param split the amount of payload to remain in the head + */ +err_t +tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split) +{ + struct tcp_seg *seg = NULL, *useg = NULL; + struct pbuf *p = NULL; + u8_t optlen; + u8_t optflags; + u8_t split_flags; + u8_t remainder_flags; + u16_t remainder; + u16_t offset; +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; + struct pbuf *q; +#endif /* TCP_CHECKSUM_ON_COPY */ + + LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL); + + useg = pcb->unsent; + if (useg == NULL) { + return ERR_MEM; + } + + if (split == 0) { + LWIP_ASSERT("Can't split segment into length 0", 0); + return ERR_VAL; + } + + if (useg->len <= split) { + return ERR_OK; + } + + LWIP_ASSERT("split <= mss", split <= pcb->mss); + LWIP_ASSERT("useg->len > 0", useg->len > 0); + + /* We should check that we don't exceed TCP_SND_QUEUELEN but we need + * to split this packet so we may actually exceed the max value by + * one! + */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen)); + + optflags = useg->flags; +#if TCP_CHECKSUM_ON_COPY + /* Remove since checksum is not stored until after tcp_create_segment() */ + optflags &= ~TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + remainder = useg->len - split; + + /* Create new pbuf for the remainder of the split */ + p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder)); + goto memerr; + } + + /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */ + offset = useg->p->tot_len - useg->len + split; + /* Copy remainder into new pbuf, headers and options will not be filled out */ + if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder)); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum on remainder data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder, + &chksum, &chksum_swapped); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Options are created when calling tcp_output() */ + + /* Migrate flags from original segment */ + split_flags = TCPH_FLAGS(useg->tcphdr); + remainder_flags = 0; /* ACK added in tcp_output() */ + + if (split_flags & TCP_PSH) { + split_flags &= ~TCP_PSH; + remainder_flags |= TCP_PSH; + } + if (split_flags & TCP_FIN) { + split_flags &= ~TCP_FIN; + remainder_flags |= TCP_FIN; + } + /* SYN should be left on split, RST should not be present with data */ + + seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags); + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not create new TCP segment\n")); + goto memerr; + } + +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Remove this segment from the queue since trimming it may free pbufs */ + pcb->snd_queuelen -= pbuf_clen(useg->p); + + /* Trim the original pbuf into our split size. At this point our remainder segment must be setup + successfully because we are modifying the original segment */ + pbuf_realloc(useg->p, useg->p->tot_len - remainder); + useg->len -= remainder; + TCPH_SET_FLAG(useg->tcphdr, split_flags); +#if TCP_OVERSIZE_DBGCHECK + /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */ + useg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* Add back to the queue with new trimmed pbuf */ + pcb->snd_queuelen += pbuf_clen(useg->p); + +#if TCP_CHECKSUM_ON_COPY + /* The checksum on the split segment is now incorrect. We need to re-run it over the split */ + useg->chksum = 0; + useg->chksum_swapped = 0; + q = useg->p; + offset = q->tot_len - useg->len; /* Offset due to exposed headers */ + + /* Advance to the pbuf where the offset ends */ + while (q != NULL && offset > q->len) { + offset -= q->len; + q = q->next; + } + LWIP_ASSERT("Found start of payload pbuf", q != NULL); + /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */ + for (; q != NULL; offset = 0, q = q->next) { + tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset, + &useg->chksum, &useg->chksum_swapped); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Update number of segments on the queues. Note that length now may + * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf + * because the total amount of data is constant when packet is split */ + pcb->snd_queuelen += pbuf_clen(seg->p); + + /* Finally insert remainder into queue after split (which stays head) */ + seg->next = useg->next; + useg->next = seg; + +#if TCP_OVERSIZE + /* If remainder is last segment on the unsent, ensure we clear the oversize amount + * because the remainder is always sized to the exact remaining amount */ + if (seg->next == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + return ERR_OK; +memerr: + TCP_STATS_INC(tcp.memerr); + + LWIP_ASSERT("seg == NULL", seg == NULL); + if (p != NULL) { + pbuf_free(p); + } + + return ERR_MEM; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * This FIN may be added to an existing segment or a new, otherwise empty + * segment is enqueued. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL); + + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + tcp_set_flags(pcb, TF_FIN); + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Enqueue SYN or FIN for transmission. + * + * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close + * (via @ref tcp_send_fin) + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL); + + /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */ + + /* Get options for this segment. This is a special case since this is the + only place where a SYN can be sent. */ + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_SACK_OUT + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) { + /* In a (sent in state SYN_RCVD), the SACK_PERM option may only + be sent if we received a SACK_PERM option from the remote host. */ + optflags |= TF_SEG_OPTS_SACK_PERM; + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host (and in active open SYN segments). */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + tcp_set_flags(pcb, TF_FIN); + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts) +{ + LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL); + + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_TCP_SACK_OUT +/** + * Calculates the number of SACK entries that should be generated. + * It takes into account whether TF_SACK flag is set, + * the number of SACK entries in tcp_pcb that are valid, + * as well as the available options size. + * + * @param pcb tcp_pcb + * @param optlen the length of other TCP options (in bytes) + * @return the number of SACK ranges that can be used + */ +static u8_t +tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen) +{ + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL); + + if (pcb->flags & TF_SACK) { + u8_t i; + + /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options), + each additional one - 8 bytes. */ + optlen += 12; + + /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */ + for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) && + LWIP_TCP_SACK_VALID(pcb, i); ++i) { + ++num_sacks; + optlen += 8; + } + } + + return num_sacks; +} + +/** Build a SACK option (12 or more bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the SACK option + * @param num_sacks the number of SACKs to store + */ +static void +tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks) +{ + u8_t i; + + LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL); + + /* Pad with two NOP options to make everything nicely aligned. + We add the length (of just the SACK option, not the NOPs in front of it), + which is 2B of header, plus 8B for each SACK. */ + *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8); + + for (i = 0; i < num_sacks; ++i) { + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left); + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right); + } +} + +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL); + + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL); + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void *)pcb->unsent)); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + + /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct + * an empty ACK segment and send it. */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + /* nothing to send: shortcut out of here */ + goto output_done; + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } + + netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + + /* Handle the current segment not fitting within the window */ + if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) { + /* We need to start the persistent timer when the next unsent segment does not fit + * within the remaining (could be 0) send window and RTO timer is not running (we + * have no in-flight data). If window is still too small after persist timer fires, + * then we split the segment. We don't consider the congestion window since a cwnd + * smaller than 1 SMSS implies in-flight data + */ + if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + pcb->persist_probe = 0; + } + /* We need an ACK, but can't send data now, so send an empty ACK */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + goto output_done; + } + /* Stop persist timer, above conditions are not active */ + pcb->persist_backoff = 0; + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return err; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +output_done: + tcp_clear_flags(pcb, TF_NAGLEMEMERR); + return ERR_OK; +} + +/** Check if a segment's pbufs are used by someone else than TCP. + * This can happen on retransmission if the pbuf of this segment is still + * referenced by the netif driver due to deferred transmission. + * This is the case (only!) if someone down the TX call path called + * pbuf_ref() on one of the pbufs! + * + * @arg seg the tcp segment to check + * @return 1 if ref != 1, 0 if ref == 1 + */ +static int +tcp_output_segment_busy(const struct tcp_seg *seg) +{ + LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL); + + /* We only need to check the first pbuf here: + If a pbuf is queued for transmission, a driver calls pbuf_ref(), + which only changes the ref count of the first pbuf */ + if (seg->p->ref != 1) { + /* other reference found */ + return 1; + } + /* no other references found */ + return 0; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; +#if TCP_CHECKSUM_ON_COPY + int seg_chksum_was_swapped = 0; +#endif + + LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL); + LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL); + + if (tcp_output_segment_busy(seg)) { + /* This should not happen: rexmit functions should have checked this. + However, since this function modifies p->len, we must not continue in this case. */ + LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n")); + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif +#if LWIP_TCP_SACK_OUT + if (seg->flags & TF_SEG_OPTS_SACK_PERM) { + /* Pad with two NOP options to make everything nicely aligned + * NOTE: When we send both timestamp and SACK_PERM options, + * we could use the first two NOPs before the timestamp to store SACK_PERM option, + * but that would complicate the code. + */ + *(opts++) = PP_HTONL(0x01010402); + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts); +#endif + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb)); + +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg_chksum_was_swapped = 1; + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc = (u16_t)~acc + seg->chksum; + seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + +#if TCP_CHECKSUM_ON_COPY + if (seg_chksum_was_swapped) { + /* if data is added to this segment later, chksum needs to be swapped, + so restore this now */ + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 1; + } +#endif + + return err; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +err_t +tcp_rexmit_rto_prepare(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + /* Move all unacked segments to the head of the unsent queue. + However, give up if any of the unsent pbufs are still referenced by the + netif driver due to deferred transmission. No point loading the link further + if it is struggling to flush its buffered writes. */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) { + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + } + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* Mark RTO in-progress */ + tcp_set_flags(pcb, TF_RTO); + /* Record the next byte following retransmit */ + pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + return ERR_OK; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto_commit(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL); + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_process() only, tcp_slowtmr() needs to do some things between + * "prepare" and "commit". + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL); + + if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) { + tcp_rexmit_rto_commit(pcb); + } +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +err_t +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + seg = pcb->unacked; + + /* Give up if the segment is still referenced by the netif driver + due to deferred transmission. */ + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n")); + return ERR_VAL; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ + return ERR_OK; +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL); + + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + if (tcp_rexmit(pcb) == ERR_OK) { + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2 * pcb->mss))); + pcb->ssthresh = 2 * pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + tcp_set_flags(pcb, TF_INFR); + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } + } +} + +static struct pbuf * +tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */, + u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p; + + p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(src_port); + tcphdr->dest = lwip_htons(dst_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags); + tcphdr->wnd = lwip_htons(wnd); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + } + return p; +} + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct pbuf *p; + + LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL); + + p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen, + seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK, + TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + if (p != NULL) { + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/* Fill in options for control segments */ +static void +tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks) +{ + struct tcp_hdr *tcphdr; + u32_t *opts; + u16_t sacks_len = 0; + + LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL); + + tcphdr = (struct tcp_hdr *)p->payload; + opts = (u32_t *)(void *)(tcphdr + 1); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ + +#if LWIP_TCP_TIMESTAMPS + if (optflags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif + +#if LWIP_TCP_SACK_OUT + if (pcb && (num_sacks > 0)) { + tcp_build_sack_option(pcb, opts, num_sacks); + /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */ + sacks_len = 1 + num_sacks * 2; + opts += sacks_len; + } +#else + LWIP_UNUSED_ARG(num_sacks); +#endif + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts); +#endif + + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(sacks_len); + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb)); + LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */ +} + +/** Output a control segment pbuf to IP. + * + * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe, + * this function combines selecting a netif for transmission, generating the tcp + * header checksum and calling ip_output_if while handling netif hints and stats. + */ +static err_t +tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p, + const ip_addr_t *src, const ip_addr_t *dst) +{ + err_t err; + struct netif *netif; + + LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL); + + netif = tcp_route(pcb, src, dst); + if (netif == NULL) { + err = ERR_RTE; + } else { + u8_t ttl, tos; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + src, dst); + } +#endif + if (pcb != NULL) { + NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints))); + ttl = pcb->ttl; + tos = pcb->tos; + } else { + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ttl = TCP_TTL; + tos = 0; + } + TCP_STATS_INC(tcp.xmit); + err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + } + pbuf_free(p); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param pcb TCP pcb (may be NULL if no pcb is available) + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + u16_t wnd; + u8_t optlen; + + LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL); + LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + +#if LWIP_WND_SCALE + wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + wnd = PP_HTONS(TCP_WND); +#endif + + p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port, + remote_port, TCP_RST | TCP_ACK, wnd); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + MIB2_STATS_INC(mib2.tcpoutrsts); + + tcp_output_control_segment(pcb, p, local_ip, remote_ip); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen, optflags = 0; + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL); + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optflags = TF_SEG_OPTS_TS; + } +#endif + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + +#if LWIP_TCP_SACK_OUT + /* For now, SACKs are only sent with empty ACKs */ + if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) { + optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */ + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } + tcp_output_fill_options(pcb, p, optflags, num_sacks); + +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; +#endif + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + tcp_output_fill_options(pcb, p, 0, optlen); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + /* Only consider unsent, persist timer should be off when there is data in-flight */ + seg = pcb->unsent; + if (seg == NULL) { + /* Not expected, persist timer should be off when the send buffer is empty */ + return ERR_OK; + } + + /* increment probe count. NOTE: we record probe even if it fails + to actually transmit due to an error. This ensures memory exhaustion/ + routing problem doesn't leave a zero-window pcb as an indefinite zombie. + RTO mechanism has similar behavior, see pcb->nrtx */ + if (pcb->persist_probe < 0xFF) { + ++pcb->persist_probe; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 000000000..f37acfec9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,451 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +#define LWIP_MAX_TIMEOUT 0x7fffffff + +/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */ +#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 ) + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_DHCP6 + {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)}, +#endif /* LWIP_IPV6_DHCP6 */ +#endif /* LWIP_IPV6 */ +}; +const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers); + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; + +static u32_t current_timeout_due_time; + +#if LWIP_TESTMODE +struct sys_timeo** +sys_timeouts_get_next_timeout(void) +{ + return &next_timeout; +} +#endif + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +static void +#if LWIP_DEBUG_TIMERNAMES +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg) +#endif +{ + struct sys_timeo *timeout, *t; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = abs_time; + +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, abs_time, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + if (TIME_LESS_THAN(timeout->time, next_timeout->time)) { + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) { + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Timer callback function that calls cyclic->handler() and reschedules itself. + * + * @param arg unused argument + */ +#if !LWIP_TESTMODE +static +#endif +void +lwip_cyclic_timer(void *arg) +{ + u32_t now; + u32_t next_timeout_time; + const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg; + +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + + now = sys_now(); + next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */ + if (TIME_LESS_THAN(next_timeout_time, now)) { + /* timer would immediately expire again -> "overload" -> restart without any correction */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg); +#endif + + } else { + /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg); +#endif + } +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i])); + } +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + u32_t next_timeout_time; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4)); + + next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */ + +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, handler, arg, handler_name); +#else + sys_timeout_abs(next_timeout_time, handler, arg); +#endif +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +void +sys_check_timeouts(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Process only timers expired at the start of the function. */ + now = sys_now(); + + do { + struct sys_timeo *tmptimeout; + sys_timeout_handler handler; + void *arg; + + PBUF_CHECK_FREE_OOSEQ(); + + tmptimeout = next_timeout; + if (tmptimeout == NULL) { + return; + } + + if (TIME_LESS_THAN(now, tmptimeout->time)) { + return; + } + + /* Timeout has expired */ + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; + current_timeout_due_time = tmptimeout->time; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n", + tmptimeout->handler_name, sys_now() - tmptimeout->time, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { + handler(arg); + } + LWIP_TCPIP_THREAD_ALIVE(); + + /* Repeat until all expired timers have been called */ + } while (1); +} + +/** Rebase the timeout times to the current time. + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + u32_t now; + u32_t base; + struct sys_timeo *t; + + if (next_timeout == NULL) { + return; + } + + now = sys_now(); + base = next_timeout->time; + + for (t = next_timeout; t != NULL; t = t->next) { + t->time = (t->time - base) + now; + } +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return SYS_TIMEOUTS_SLEEPTIME_INFINITE; + } + now = sys_now(); + if (TIME_LESS_THAN(next_timeout->time, now)) { + return 0; + } else { + u32_t ret = (u32_t)(next_timeout->time - now); + LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT); + return ret; + } +} + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/udp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 000000000..9d2cb4af7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1314 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#ifdef LWIP_RAND + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL); + LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("udp_input: invalid pbuf", p != NULL); + LWIP_ASSERT("udp_input: invalid netif", inp != NULL); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) { + if (uncon_pcb == NULL) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; +#if LWIP_IPV4 + } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) { + /* global broadcast address (only valid for IPv4; match was checked before) */ + if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) { + /* uncon_pcb does not match the input netif, check this pcb */ + if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) { + /* better match */ + uncon_pcb = pcb; + } + } +#endif /* LWIP_IPV4 */ + } +#if SO_REUSE + else if (!ip_addr_isany(&pcb->local_ip)) { + /* prefer specific IPs over catch-all */ + uncon_pcb = pcb; + } +#endif /* SO_REUSE */ + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_remove_header(p, UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + q = pbuf_clone(PBUF_RAW, PBUF_POOL, p); + if (q != NULL) { + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Sends the pbuf p using UDP. The pbuf is not deallocated. + * + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + + LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(dst_ip)) { + /* For IPv6, the interface to use for packets with a multicast destination + * is specified using an interface index. The same approach may be used for + * IPv4 as well, in which case it overrides the IPv4 multicast override + * address below. Here we have to look up the netif by going through the + * list, but by doing so we skip a route lookup. If the interface index has + * gone stale, we fall through and do the regular route lookup after all. */ + if (pcb->mcast_ifindex != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->mcast_ifindex); + } +#if LWIP_IPV4 + else +#if LWIP_IPV6 + if (IP_IS_V4(dst_ip)) +#endif /* LWIP_IPV6 */ + { + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->mcast_ip4 that is used for routing. If this routing lookup + fails, we try regular routing as though no override was set. */ + if (!ip4_addr_isany_val(pcb->mcast_ip4) && + !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) { + netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4); + } + } +#endif /* LWIP_IPV4 */ + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + /* find the outgoing network interface for this packet */ + netif = ip_route(&pcb->local_ip, dst_ip); + } + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR or multicast? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) || + ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* packet too large to add a UDP header without causing an overflow? */ + if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_RESET_HINTS(netif); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches or any IP used? */ + (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) || + ip_addr_isany(&ipcb->local_ip))) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb UDP PCB to be bound. + * @param netif netif to bind udp pcb to. Can be NULL. + * + * @see udp_disconnect() + */ +void +udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup udp_raw + * Sets the remote end of the pcb. This function does not generate any + * network traffic, but only sets the remote address of the pcb. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Remove the remote end of the pcb. This function does not generate + * any network traffic, but only removes the remote address of the pcb. + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return); + + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + udp_clear_flags(pcb, UDP_FLAGS_CONNECTED); +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB. + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return); + + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Removes and deallocates the pcb. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return); + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Creates a new UDP pcb which can be used for UDP communication. The + * pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * The pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct udp_pcb *upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h new file mode 100644 index 000000000..0ed9baf3d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h new file mode 100644 index 000000000..6b8e63a52 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h @@ -0,0 +1,36 @@ +/** + * @file + * This file is a posix wrapper for lwip/if_api.h. + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/if_api.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h new file mode 100644 index 000000000..12d4c7f56 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/netdb.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/netdb.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h new file mode 100644 index 000000000..0ed9baf3d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h new file mode 100644 index 000000000..98a9aec99 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix/stdc wrapper for lwip/errno.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/errno.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h new file mode 100644 index 000000000..97abc54d0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h @@ -0,0 +1,201 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * + * This file contains the generic API. + * For more details see @ref altcp_api. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_H +#define LWIP_HDR_ALTCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb; +struct altcp_functions; + +typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err); +typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err); +typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err); +typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn); +typedef void (*altcp_err_fn)(void *arg, err_t err); + +typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type); + +struct altcp_pcb { + const struct altcp_functions *fns; + struct altcp_pcb *inner_conn; + void *arg; + void *state; + /* application callbacks */ + altcp_accept_fn accept; + altcp_connected_fn connected; + altcp_recv_fn recv; + altcp_sent_fn sent; + altcp_poll_fn poll; + altcp_err_fn err; + u8_t pollinterval; +}; + +/** @ingroup altcp */ +typedef struct altcp_allocator_s { + /** Allocator function */ + altcp_new_fn alloc; + /** Argument to allocator function */ + void *arg; +} altcp_allocator_t; + +struct altcp_pcb *altcp_new(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type); + +void altcp_arg(struct altcp_pcb *conn, void *arg); +void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept); +void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv); +void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent); +void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval); +void altcp_err(struct altcp_pcb *conn, altcp_err_fn err); + +void altcp_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +/* return conn for source code compatibility to tcp callback API only */ +struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err); +#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL) +/** @ingroup altcp */ +#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL) + +void altcp_abort(struct altcp_pcb *conn); +err_t altcp_close(struct altcp_pcb *conn); +err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_output(struct altcp_pcb *conn); + +u16_t altcp_mss(struct altcp_pcb *conn); +u16_t altcp_sndbuf(struct altcp_pcb *conn); +u16_t altcp_sndqueuelen(struct altcp_pcb *conn); +void altcp_nagle_disable(struct altcp_pcb *conn); +void altcp_nagle_enable(struct altcp_pcb *conn); +int altcp_nagle_disabled(struct altcp_pcb *conn); + +void altcp_setprio(struct altcp_pcb *conn, u8_t prio); + +err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_get_port(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#else /* LWIP_ALTCP */ + +/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */ + +#include "lwip/tcp.h" + +#define altcp_accept_fn tcp_accept_fn +#define altcp_connected_fn tcp_connected_fn +#define altcp_recv_fn tcp_recv_fn +#define altcp_sent_fn tcp_sent_fn +#define altcp_poll_fn tcp_poll_fn +#define altcp_err_fn tcp_err_fn + +#define altcp_pcb tcp_pcb +#define altcp_tcp_new_ip_type tcp_new_ip_type +#define altcp_tcp_new tcp_new +#define altcp_tcp_new_ip6 tcp_new_ip6 + +#define altcp_new(allocator) tcp_new() +#define altcp_new_ip6(allocator) tcp_new_ip6() +#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type) + +#define altcp_arg tcp_arg +#define altcp_accept tcp_accept +#define altcp_recv tcp_recv +#define altcp_sent tcp_sent +#define altcp_poll tcp_poll +#define altcp_err tcp_err + +#define altcp_recved tcp_recved +#define altcp_bind tcp_bind +#define altcp_connect tcp_connect + +#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err +#define altcp_listen_with_backlog tcp_listen_with_backlog +#define altcp_listen tcp_listen + +#define altcp_abort tcp_abort +#define altcp_close tcp_close +#define altcp_shutdown tcp_shutdown + +#define altcp_write tcp_write +#define altcp_output tcp_output + +#define altcp_mss tcp_mss +#define altcp_sndbuf tcp_sndbuf +#define altcp_sndqueuelen tcp_sndqueuelen +#define altcp_nagle_disable tcp_nagle_disable +#define altcp_nagle_enable tcp_nagle_enable +#define altcp_nagle_disabled tcp_nagle_disabled +#define altcp_setprio tcp_setprio + +#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo +#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip)) + +#ifdef LWIP_DEBUG +#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h new file mode 100644 index 000000000..dbde58468 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TCP_H +#define LWIP_HDR_ALTCP_TCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type); + +#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4) +#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6) + +struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type); + +struct tcp_pcb; +struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TCP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h new file mode 100644 index 000000000..7b17c6080 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h @@ -0,0 +1,117 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * @defgroup altcp_tls TLS layer + * @ingroup altcp + * This file contains function prototypes for a TLS layer. + * A port to ARM mbedtls is provided in the apps/ tree + * (LWIP_ALTCP_TLS_MBEDTLS option). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_H +#define LWIP_HDR_ALTCP_TLS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#if LWIP_ALTCP_TLS + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup altcp_tls + * ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls) + */ +struct altcp_tls_config; + +/** @ingroup altcp_tls + * Create an ALTCP_TLS server configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle with two-way server/client authentication + */ +struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Free an ALTCP_TLS configuration handle + */ +void altcp_tls_free_config(struct altcp_tls_config *conf); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP) + */ +struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS pcb and its inner tcp pcb + */ +struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer pcb and its inner tcp pcb. + * Same as @ref altcp_tls_new but this allocator function fits to + * @ref altcp_allocator_t / @ref altcp_new.\n + 'arg' must contain a struct altcp_tls_config *. + */ +struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type); + +/** @ingroup altcp_tls + * Return pointer to internal TLS context so application can tweak it. + * Real type depends on port (e.g. mbedtls) + */ +void *altcp_tls_context(struct altcp_pcb *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_TLS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 000000000..c2afaf26d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,431 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 +#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */ +#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */ + +/* Flags for struct netconn.flags (u8_t) */ +/** This netconn had an error, don't block on recvmbox/acceptmbox any more */ +#define NETCONN_FLAG_MBOXCLOSED 0x01 +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +#if LWIP_NETCONN_FULLDUPLEX + /** The mbox of this netconn is being deallocated, don't use it anymore */ +#define NETCONN_FLAG_MBOXINVALID 0x08 +#endif /* LWIP_NETCONN_FULLDUPLEX */ +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ +#if LWIP_NETBUF_RECVINFO +/** Received packet info will be recorded for this netconn */ +#define NETCONN_FLAG_PKTINFO 0x40 +#endif /* LWIP_NETBUF_RECVINFO */ +/** A FIN has been received but not passed to the application yet */ +#define NETCONN_FIN_RX_PENDING 0x80 + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last asynchronous unreported error this netconn had */ + err_t pending_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ +#if LWIP_NETCONN_FULLDUPLEX + /** number of threads waiting on an mbox. This is required to unblock + all threads when closing while threads are waiting. */ + int mbox_threads_waiting; +#endif + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + u32_t recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** This vector type is passed to @ref netconn_write_vectors_partly to send + * multiple buffers at once. + * ATTENTION: This type has to directly map struct iovec since one is casted + * into the other! + */ +struct netvector { + /** pointer to the application buffer that contains the data to send */ + const void *ptr; + /** size of the application data to send */ + size_t len; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_prepare_delete(struct netconn *conn); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_bind_if(struct netconn *conn, u8_t if_idx); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags); +err_t netconn_tcp_recvd(struct netconn *conn, size_t len); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr, + u8_t if_idx, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +err_t netconn_err(struct netconn *conn); +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0) +#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h new file mode 100644 index 000000000..65d31279d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h @@ -0,0 +1,79 @@ +/** + * @file + * Application layered TCP connection API that executes a proxy-connect. + * + * This file provides a starting layer that executes a proxy-connect e.g. to + * set up TLS connections through a http proxy. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H +#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_proxyconnect_config { + ip_addr_t proxy_addr; + u16_t proxy_port; +}; + + +struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb); +struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type); + +struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type); + +#if LWIP_ALTCP_TLS +struct altcp_proxyconnect_tls_config { + struct altcp_proxyconnect_config proxy; + struct altcp_tls_config *tls_config; +}; + +struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type); +#endif /* LWIP_ALTCP_TLS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h new file mode 100644 index 000000000..36cddd93f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h @@ -0,0 +1,67 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains options for an mbedtls port of the TLS layer. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H +#define LWIP_HDR_ALTCP_TLS_OPTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API + * mbedtls include directory must be reachable via include search path + */ +#ifndef LWIP_ALTCP_TLS_MBEDTLS +#define LWIP_ALTCP_TLS_MBEDTLS 0 +#endif + +/** Configure debug level of this file */ +#ifndef ALTCP_MBEDTLS_DEBUG +#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF +#endif + +/** Set a session timeout in seconds for the basic session cache + * ATTENTION: Using a session cache can lower security by reusing keys! + */ +#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS +#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0 +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 000000000..67b9a60a9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 +#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04 +#define FS_FILE_FLAGS_SSI 0x08 + +/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private + * pointer type (defaults to 'void' so the default usage is 'void*') + */ +#ifndef FS_FILE_EXTENSION_T_DEFINED +typedef void fs_file_extension; +#endif + +struct fs_file { + const char *data; + int len; + int index; + /* pextension is free for implementations to hold private (extensional) + arbitrary data, e.g. holding some file state or file system handle */ + fs_file_extension *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +struct fsdata_file { + const struct fsdata_file *next; + const unsigned char *name; + const unsigned char *data; + int len; + u8_t flags; +#if HTTPD_PRECALCULATED_CHECKSUM + u16_t chksum_count; + const struct fsdata_chksum *chksum; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h new file mode 100644 index 000000000..8a0630833 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h @@ -0,0 +1,160 @@ +/** + * @file + * HTTP client + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H +#define LWIP_HDR_APPS_HTTP_CLIENT_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/altcp.h" +#include "lwip/prot/iana.h" +#include "lwip/pbuf.h" + +#if LWIP_TCP && LWIP_CALLBACK_API + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup httpc + * HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly + * to disk via fopen/fwrite. + * These functions are example implementations of the interface only. + */ +#ifndef LWIP_HTTPC_HAVE_FILE_IO +#define LWIP_HTTPC_HAVE_FILE_IO 0 +#endif + +/** + * @ingroup httpc + * The default TCP port used for HTTP + */ +#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP + +/** + * @ingroup httpc + * HTTP client result codes + */ +typedef enum ehttpc_result { + /** File successfully received */ + HTTPC_RESULT_OK = 0, + /** Unknown error */ + HTTPC_RESULT_ERR_UNKNOWN = 1, + /** Connection to server failed */ + HTTPC_RESULT_ERR_CONNECT = 2, + /** Failed to resolve server hostname */ + HTTPC_RESULT_ERR_HOSTNAME = 3, + /** Connection unexpectedly closed by remote server */ + HTTPC_RESULT_ERR_CLOSED = 4, + /** Connection timed out (server didn't respond in time) */ + HTTPC_RESULT_ERR_TIMEOUT = 5, + /** Server responded with an error code */ + HTTPC_RESULT_ERR_SVR_RESP = 6, + /** Local memory error */ + HTTPC_RESULT_ERR_MEM = 7, + /** Local abort */ + HTTPC_RESULT_LOCAL_ABORT = 8, + /** Content length mismatch */ + HTTPC_RESULT_ERR_CONTENT_LEN = 9 +} httpc_result_t; + +typedef struct _httpc_state httpc_state_t; + +/** + * @ingroup httpc + * Prototype of a http client callback function + * + * @param arg argument specified when initiating the request + * @param httpc_result result of the http transfer (see enum httpc_result_t) + * @param rx_content_len number of bytes received (without headers) + * @param srv_res this contains the http status code received (if any) + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err); + +/** + * @ingroup httpc + * Prototype of http client callback: called when the headers are received + * + * @param connection http client connection + * @param arg argument specified when initiating the request + * @param hdr header pbuf(s) (may contain data also) + * @param hdr_len length of the heders in 'hdr' + * @param content_len content length as received in the headers (-1 if not received) + * @return if != ERR_OK is returned, the connection is aborted + */ +typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len); + +typedef struct _httpc_connection { + ip_addr_t proxy_addr; + u16_t proxy_port; + u8_t use_proxy; + /* @todo: add username:pass? */ + +#if LWIP_ALTCP + altcp_allocator_t *altcp_allocator; +#endif + + /* this callback is called when the transfer is finished (or aborted) */ + httpc_result_fn result_fn; + /* this callback is called after receiving the http headers + It can abort the connection by returning != ERR_OK */ + httpc_headers_done_fn headers_done_fn; +} httpc_connection_t; + +err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); +err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); + +#if LWIP_HTTPC_HAVE_FILE_IO +err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +#endif /* LWIP_HTTPC_HAVE_FILE_IO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ + +#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 000000000..e872429f2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,255 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/** + * @ingroup httpd + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handlers. The iIndex parameter provides the index of the + * CGI within the cgis array passed to http_set_cgi_handlers. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in + * the incoming HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/** + * @ingroup httpd + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/* we have to prototype this struct here to make it available for the handler */ +struct fs_file; + +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to the object passed as connection_state, which + * is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom(). + * Content creation via SSI or complete dynamic files can retrieve the CGI params from there. + */ +extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams, + char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/** + * @ingroup httpd + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in files with extensions mentioned in the g_pcSSIExtensions + * array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the tags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client, should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the tags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized. + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. In order to avoid these problems, define + * LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript + * style block comments in the form / * # name * / (without the spaces). + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** + * @ingroup httpd + * Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** + * @ingroup httpd + * Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** + * @ingroup httpd + * Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + +#if HTTPD_ENABLE_HTTPS +struct altcp_tls_config; +void httpd_inits(struct altcp_tls_config *conf); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_HTTPD_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 000000000..8723961fd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,396 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style). + * + * This old style CGI support works by registering an array of URLs and + * associated CGI handler functions (@ref http_set_cgi_handlers). + * This list is scanned just before fs_open is called from request handling. + * The handler can return a new URL that is used internally by the httpd to + * load the returned page (passed to fs_open). + * + * Use this CGI type e.g. to execute specific actions and return a page that + * does not depend on the CGI parameters. + */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style). + * + * This new style CGI support works by calling a global function + * (@ref tCGIHandler) for all URLs that are found. fs_open is called first + * and the URL can not be written by the CGI handler. Instead, this handler gets + * passed the http file state, an object where it can store information derived + * from the CGI URL or parameters. This file state is later passed to SSI, so + * the SSI code can return data depending on CGI input. + * + * Use this CGI handler if you want CGI information passed on to SSI. + */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) + * + * In contrast to other http servers, this only calls a preregistered callback + * function (@see http_set_ssi_handler) for each tag (in the format of + * ) encountered in SSI-enabled pages. + * SSI-enabled pages must have one of the predefined SSI-enabled file extensions. + * All files with one of these extensions are parsed when sent. + * + * A downside of the current SSI implementation is that persistent connections + * don't work, as the file length is not known in advance (and httpd currently + * relies on the Content-Length header for persistent connections). + * + * To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN). + * To save memory, the maximum insertion string length is limited (@see + * LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART + * can be used. + */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) + * If this is 0, the SSI handler callback function is only called pre-registered tags. + */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 0 to prevent parsing the file extension at runtime to decide + * if a file should be scanned for SSI tags or not. + * Default is 1 (file extensions are checked using the g_pcSSIExtensions array) + * Set to 2 to override this runtime test function. + * + * This is enabled by default, but if you only use a newer version of makefsdata + * supporting the "-ssi" option, this info is already present in + */ +#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the SSI tag name + * ATTENTION: tags longer than this are ignored, not truncated! + */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag + * If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART. + */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + * If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS + * (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of + * the pool(s). + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP +#endif + +/** The https server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS +#endif + +/** Enable https support? */ +#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__ +#define HTTPD_ENABLE_HTTPS 0 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers + * (who uses this at all??) */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct altcp_pcb* pcb); + The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb))) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Filename (including path) to use as FS data file */ +#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__ +/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */ +#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0) +#define HTTPD_FSDATA_FILE "fsdata_custom.c" +#else +#define HTTPD_FSDATA_FILE "fsdata.c" +#endif +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 000000000..cc86e7f87 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Control */ +enum lwiperf_client_type +{ + /** Unidirectional tx only test */ + LWIPERF_CLIENT, + /** Do a bidirectional test simultaneously */ + LWIPERF_DUAL, + /** Do a bidirectional test individually */ + LWIPERF_TRADEOFF +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port, + enum lwiperf_client_type type, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr, + lwiperf_report_fn report_fn, void* report_arg); + +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 000000000..20d7ee2a2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,105 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_H +#define LWIP_HDR_APPS_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_PROBING_CONFLICT 0 +#define MDNS_PROBING_SUCCESSFUL 1 + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +/** Callback function to let application know the result of probing network for name + * uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed + * use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT + * if another node is already using it and mdns is disabled on this interface */ +typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result); + +void mdns_resp_init(void); + +void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); +err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname); + +s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_del_service(struct netif *netif, s8_t slot); +err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name); + +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); + +void mdns_resp_restart(struct netif *netif); +void mdns_resp_announce(struct netif *netif); + +/** + * @ingroup mdns + * Announce IP settings have changed on netif. + * Call this in your callback registered by netif_set_status_callback(). + * No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1, + * this handled automatically for you. + * @param netif The network interface where settings have changed. + */ +#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif) + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MDNS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 000000000..45f2c50ca --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,81 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif + * to automatically restart probing/announcing on status or address change. + */ +#ifndef MDNS_RESP_USENETIF_EXTCALLBACK +#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 000000000..9635b5b10 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 000000000..c2bb2288f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,205 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" +#include "lwip/prot/iana.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_s mqtt_client_t; + +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +#endif + +/** @ingroup mqtt + * Default MQTT port (non-TLS) */ +#define MQTT_PORT LWIP_IANA_PORT_MQTT +/** @ingroup mqtt + * Default MQTT TLS port */ +#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name, set to NULL if not used */ + const char* client_user; + /** Password, set to NULL if not used */ + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + /** will_msg, see will_topic */ + const char* will_msg; + /** will_qos, see will_topic */ + u8_t will_qos; + /** will_retain, see will_topic */ + u8_t will_retain; +#if LWIP_ALTCP && LWIP_ALTCP_TLS + /** TLS configuration for secure connections */ + struct altcp_tls_config *tls_config; +#endif +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + /** Accepted */ + MQTT_CONNECT_ACCEPTED = 0, + /** Refused protocol version */ + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + /** Refused identifier */ + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + /** Refused server */ + MQTT_CONNECT_REFUSED_SERVER = 3, + /** Refused user credentials */ + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + /** Refused not authorized */ + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + /** Disconnected */ + MQTT_CONNECT_DISCONNECTED = 256, + /** Timeout */ + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_client_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +void mqtt_disconnect(mqtt_client_t *client); + +mqtt_client_t *mqtt_client_new(void); +void mqtt_client_free(mqtt_client_t* client); + +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 000000000..4226d21e8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h new file mode 100644 index 000000000..b775913ca --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h @@ -0,0 +1,104 @@ +/** + * @file + * MQTT client (private interface) + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_PRIV_H +#define LWIP_HDR_APPS_MQTT_PRIV_H + +#include "lwip/apps/mqtt.h" +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Pending request item, binds application callback to pending server requests */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_s +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct altcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 000000000..a326d33b1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,51 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 000000000..1f51ab02a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,66 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** Respond to NetBIOS name queries + * Default is disabled + */ +#if !defined LWIP_NETBIOS_RESPOND_NAME_QUERY || defined __DOXYGEN__ +#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h new file mode 100644 index 000000000..fb4a4a7cc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h @@ -0,0 +1,128 @@ +#ifndef LWIP_HDR_APPS_SMTP_H +#define LWIP_HDR_APPS_SMTP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/apps/smtp_opts.h" +#include "lwip/err.h" +#include "lwip/prot/iana.h" + +/** The default TCP port used for SMTP */ +#define SMTP_DEFAULT_PORT LWIP_IANA_PORT_SMTP +/** The default TCP port used for SMTPS */ +#define SMTPS_DEFAULT_PORT LWIP_IANA_PORT_SMTPS + +/** Email successfully sent */ +#define SMTP_RESULT_OK 0 +/** Unknown error */ +#define SMTP_RESULT_ERR_UNKNOWN 1 +/** Connection to server failed */ +#define SMTP_RESULT_ERR_CONNECT 2 +/** Failed to resolve server hostname */ +#define SMTP_RESULT_ERR_HOSTNAME 3 +/** Connection unexpectedly closed by remote server */ +#define SMTP_RESULT_ERR_CLOSED 4 +/** Connection timed out (server didn't respond in time) */ +#define SMTP_RESULT_ERR_TIMEOUT 5 +/** Server responded with an unknown response code */ +#define SMTP_RESULT_ERR_SVR_RESP 6 +/** Out of resources locally */ +#define SMTP_RESULT_ERR_MEM 7 + +/** Prototype of an smtp callback function + * + * @param arg argument specified when initiating the email + * @param smtp_result result of the mail transfer (see defines SMTP_RESULT_*) + * @param srv_err if aborted by the server, this contains the error code received + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*smtp_result_fn)(void *arg, u8_t smtp_result, u16_t srv_err, err_t err); + +/** This structure is used as argument for smtp_send_mail_int(), + * which in turn can be used with tcpip_callback() to send mail + * from interrupt context, e.g. like this: + * struct smtp_send_request *req; (to be filled) + * tcpip_try_callback(smtp_send_mail_int, (void*)req); + * + * For member description, see parameter description of smtp_send_mail(). + * When using with tcpip_callback, this structure has to stay allocated + * (e.g. using mem_malloc/mem_free) until its 'callback_fn' is called. + */ +struct smtp_send_request { + const char *from; + const char* to; + const char* subject; + const char* body; + smtp_result_fn callback_fn; + void* callback_arg; + /** If this is != 0, data is *not* copied into an extra buffer + * but used from the pointers supplied in this struct. + * This means less memory usage, but data must stay untouched until + * the callback function is called. */ + u8_t static_data; +}; + + +#if SMTP_BODYDH + +#ifndef SMTP_BODYDH_BUFFER_SIZE +#define SMTP_BODYDH_BUFFER_SIZE 256 +#endif /* SMTP_BODYDH_BUFFER_SIZE */ + +struct smtp_bodydh { + u16_t state; + u16_t length; /* Length of content in buffer */ + char buffer[SMTP_BODYDH_BUFFER_SIZE]; /* buffer for generated content */ +#ifdef SMTP_BODYDH_USER_SIZE + u8_t user[SMTP_BODYDH_USER_SIZE]; +#endif /* SMTP_BODYDH_USER_SIZE */ +}; + +enum bdh_retvals_e { + BDH_DONE = 0, + BDH_WORKING +}; + +/** Prototype of an smtp body callback function + * It receives a struct smtp_bodydh, and a buffer to write data, + * must return BDH_WORKING to be called again and BDH_DONE when + * it has finished processing. This one tries to fill one TCP buffer with + * data, your function will be repeatedly called until that happens; so if you + * know you'll be taking too long to serve your request, pause once in a while + * by writing length=0 to avoid hogging system resources + * + * @param arg argument specified when initiating the email + * @param smtp_bodydh state handling + buffer structure + */ +typedef int (*smtp_bodycback_fn)(void *arg, struct smtp_bodydh *bodydh); + +err_t smtp_send_mail_bodycback(const char *from, const char* to, const char* subject, + smtp_bodycback_fn bodycback_fn, smtp_result_fn callback_fn, void* callback_arg); + +#endif /* SMTP_BODYDH */ + + +err_t smtp_set_server_addr(const char* server); +void smtp_set_server_port(u16_t port); +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +void smtp_set_tls_config(struct altcp_tls_config *tls_config); +#endif +err_t smtp_set_auth(const char* username, const char* pass); +err_t smtp_send_mail(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +err_t smtp_send_mail_static(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +void smtp_send_mail_int(void *arg); +#ifdef LWIP_DEBUG +const char* smtp_result_str(u8_t smtp_result); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SMTP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h new file mode 100644 index 000000000..bc743f679 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h @@ -0,0 +1,81 @@ +#ifndef LWIP_HDR_APPS_SMTP_OPTS_H +#define LWIP_HDR_APPS_SMTP_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup smtp_opts Options + * @ingroup smtp + * + * @{ + */ + +/** Set this to 1 to enable data handler callback on BODY */ +#ifndef SMTP_BODYDH +#define SMTP_BODYDH 0 +#endif + +/** SMTP_DEBUG: Enable debugging for SNTP. */ +#ifndef SMTP_DEBUG +#define SMTP_DEBUG LWIP_DBG_OFF +#endif + +/** Maximum length reserved for server name including terminating 0 byte */ +#ifndef SMTP_MAX_SERVERNAME_LEN +#define SMTP_MAX_SERVERNAME_LEN 256 +#endif + +/** Maximum length reserved for username */ +#ifndef SMTP_MAX_USERNAME_LEN +#define SMTP_MAX_USERNAME_LEN 32 +#endif + +/** Maximum length reserved for password */ +#ifndef SMTP_MAX_PASS_LEN +#define SMTP_MAX_PASS_LEN 32 +#endif + +/** Set this to 0 if you know the authentication data will not change + * during the smtp session, which saves some heap space. */ +#ifndef SMTP_COPY_AUTHDATA +#define SMTP_COPY_AUTHDATA 1 +#endif + +/** Set this to 0 to save some code space if you know for sure that all data + * passed to this module conforms to the requirements in the SMTP RFC. + * WARNING: use this with care! + */ +#ifndef SMTP_CHECK_DATA +#define SMTP_CHECK_DATA 1 +#endif + +/** Set this to 1 to enable AUTH PLAIN support */ +#ifndef SMTP_SUPPORT_AUTH_PLAIN +#define SMTP_SUPPORT_AUTH_PLAIN 1 +#endif + +/** Set this to 1 to enable AUTH LOGIN support */ +#ifndef SMTP_SUPPORT_AUTH_LOGIN +#define SMTP_SUPPORT_AUTH_LOGIN 1 +#endif + +/* Memory allocation/deallocation can be overridden... */ +#ifndef SMTP_STATE_MALLOC +#define SMTP_STATE_MALLOC(size) mem_malloc(size) +#define SMTP_STATE_FREE(ptr) mem_free(ptr) +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SMTP_OPTS_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 000000000..a3f8eb150 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,135 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +u8_t snmp_v1_enabled(void); +u8_t snmp_v2c_enabled(void); +u8_t snmp_v3_enabled(void); +void snmp_v1_enable(u8_t enable); +void snmp_v2c_enable(u8_t enable); +void snmp_v3_enable(u8_t enable); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 000000000..6021c7222 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,377 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#if LWIP_HAVE_INT64 +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) +#endif + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +#if LWIP_HAVE_INT64 + u64_t u64; +#endif +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x4000 /* do not use 0x8000 because return value of node_instance_get_value_method is signed16 and 0x8000 would be the signed bit */ + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +#if LWIP_SNMP_V3 + u32_t unsupportedseclevels; + u32_t notintimewindows; + u32_t unknownusernames; + u32_t unknownengineids; + u32_t wrongdigests; + u32_t decryptionerrors; +#endif +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 000000000..2f4a68935 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 000000000..c892d22af --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,297 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The minimum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The maximum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3_MBEDTLS +#endif + +#ifndef LWIP_SNMP_CONFIGURE_VERSIONS +#define LWIP_SNMP_CONFIGURE_VERSIONS 0 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 000000000..40a060c64 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h new file mode 100644 index 000000000..47409cc20 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h @@ -0,0 +1,32 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H +#define LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_obj_id usmNoAuthProtocol; +extern const struct snmp_obj_id usmHMACMD5AuthProtocol; +extern const struct snmp_obj_id usmHMACSHAAuthProtocol; + +extern const struct snmp_obj_id usmNoPrivProtocol; +extern const struct snmp_obj_id usmDESPrivProtocol; +extern const struct snmp_obj_id usmAESPrivProtocol; + +extern const struct snmp_mib snmpframeworkmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h new file mode 100644 index 000000000..88cfcd8eb --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h @@ -0,0 +1,24 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H +#define LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib snmpusmmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 000000000..4988b51c2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 000000000..a25dbf2d0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 000000000..e0dda6499 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,114 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP && LWIP_SNMP_V3 + +typedef enum +{ + SNMP_V3_AUTH_ALGO_INVAL = 0, + SNMP_V3_AUTH_ALGO_MD5 = 1, + SNMP_V3_AUTH_ALGO_SHA = 2 +} snmpv3_auth_algo_t; + +typedef enum +{ + SNMP_V3_PRIV_ALGO_INVAL = 0, + SNMP_V3_PRIV_ALGO_DES = 1, + SNMP_V3_PRIV_ALGO_AES = 2 +} snmpv3_priv_algo_t; + +typedef enum +{ + SNMP_V3_USER_STORAGETYPE_OTHER = 1, + SNMP_V3_USER_STORAGETYPE_VOLATILE = 2, + SNMP_V3_USER_STORAGETYPE_NONVOLATILE = 3, + SNMP_V3_USER_STORAGETYPE_PERMANENT = 4, + SNMP_V3_USER_STORAGETYPE_READONLY = 5 +} snmpv3_user_storagetype_t; + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, snmpv3_auth_algo_t *auth_algo, u8_t *auth_key, snmpv3_priv_algo_t *priv_algo, u8_t *priv_key); +u8_t snmpv3_get_amount_of_users(void); +err_t snmpv3_get_user_storagetype(const char *username, snmpv3_user_storagetype_t *storagetype); +err_t snmpv3_get_username(char *username, u8_t index); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); +s32_t snmpv3_get_engine_time_internal(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 000000000..3c0f95f72 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,80 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_MONITOR_SERVER_REACHABILITY +u8_t sntp_getreachability(u8_t idx); +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, const char *server); +const char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 000000000..ed98040fe --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,209 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds + * instead of this one if you need the additional precision. Alternatively, + * define SNTP_SET_SYSTEM_TIME_NTP(sec, frac) in order to work with native + * NTP timestamps instead. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT LWIP_IANA_PORT_SNTP +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the VN, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** Enable round-trip delay compensation. + * Compensate for the round-trip delay by calculating the clock offset from + * the originate, receive, transmit and destination timestamps, as per RFC. + * + * The calculation requires compiler support for 64-bit integers. Also, either + * SNTP_SET_SYSTEM_TIME_US or SNTP_SET_SYSTEM_TIME_NTP has to be implemented + * for setting the system clock with sub-second precision. Likewise, either + * SNTP_GET_SYSTEM_TIME or SNTP_GET_SYSTEM_TIME_NTP needs to be implemented + * with sub-second precision. + * + * Although not strictly required, it makes sense to combine this option with + * SNTP_CHECK_RESPONSE >= 2 for sanity-checking of the received timestamps. + * Also, in order for the round-trip calculation to work, the difference + * between the local clock and the NTP server clock must not be larger than + * about 34 years. If that limit is exceeded, the implementation will fall back + * to setting the clock without compensation. In order to ensure that the local + * clock is always within the permitted range for compensation, even at first + * try, it may be necessary to store at least the current year in non-volatile + * memory. + */ +#if !defined SNTP_COMP_ROUNDTRIP || defined __DOXYGEN__ +#define SNTP_COMP_ROUNDTRIP 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#ifdef LWIP_RAND +#define SNTP_STARTUP_DELAY 1 +#else +#define SNTP_STARTUP_DELAY 0 +#endif +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC (LWIP_RAND() % 5000) +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 15 seconds. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 15000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 60 seconds by specification (i.e. 60000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. Also used for round-trip + * delay compensation if SNTP_COMP_ROUNDTRIP != 0. + * Alternatively, define SNTP_GET_SYSTEM_TIME_NTP(sec, frac) in order to + * work with native NTP timestamps instead. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** Keep a reachability shift register per server + * Default is on to conform to RFC. + */ +#if !defined SNTP_MONITOR_SERVER_REACHABILITY || defined __DOXYGEN__ +#define SNTP_MONITOR_SERVER_REACHABILITY 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 000000000..198f632b6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,106 @@ +/** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT LWIP_IANA_PORT_TFTP +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS (TFTP_TIMEOUT_MSECS / 10) +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 000000000..0a7fbee02 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,95 @@ +/** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); +void tftp_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 000000000..58dae33aa --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,393 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * If the compiler does not provide memset() this file must include a + * definition of it, or include a file which defines it. + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +/* stdint.h is C99 which should also provide support for 64-bit integers */ +#if !defined(LWIP_HAVE_INT64) && defined(UINT64_MAX) +#define LWIP_HAVE_INT64 1 +#endif +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +#if LWIP_HAVE_INT64 +typedef uint64_t u64_t; +typedef int64_t s64_t; +#endif +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX, SSIZE_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/* Do we need to define ssize_t? This is a compatibility hack: + * Unfortunately, this type seems to be unavailable on some systems (even if + * sys/types or unistd.h are available). + * Being like that, we define it to 'int' if SSIZE_MAX is not defined. + */ +#ifdef SSIZE_MAX +/* If SSIZE_MAX is defined, unistd.h should provide the type as well */ +#ifndef LWIP_NO_UNISTD_H +#define LWIP_NO_UNISTD_H 0 +#endif +#if !LWIP_NO_UNISTD_H +#include +#endif +#else /* SSIZE_MAX */ +typedef int ssize_t; +#define SSIZE_MAX INT_MAX +#endif /* SSIZE_MAX */ + +/* some maximum values needed in lwip code */ +#define LWIP_UINT32_MAX 0xffffffff + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the ctype.h header. If ctype.h is available, a few character functions + * are mapped to the appropriate functions (lwip_islower, lwip_isdigit...), if + * not, a private implementation is provided. + */ +#ifndef LWIP_NO_CTYPE_H +#define LWIP_NO_CTYPE_H 0 +#endif + +#if LWIP_NO_CTYPE_H +#define lwip_in_range(c, lo, up) ((u8_t)(c) >= (lo) && (u8_t)(c) <= (up)) +#define lwip_isdigit(c) lwip_in_range((c), '0', '9') +#define lwip_isxdigit(c) (lwip_isdigit(c) || lwip_in_range((c), 'a', 'f') || lwip_in_range((c), 'A', 'F')) +#define lwip_islower(c) lwip_in_range((c), 'a', 'z') +#define lwip_isspace(c) ((c) == ' ' || (c) == '\f' || (c) == '\n' || (c) == '\r' || (c) == '\t' || (c) == '\v') +#define lwip_isupper(c) lwip_in_range((c), 'A', 'Z') +#define lwip_tolower(c) (lwip_isupper(c) ? (c) - 'A' + 'a' : c) +#define lwip_toupper(c) (lwip_islower(c) ? (c) - 'a' + 'A' : c) +#else +#include +#define lwip_isdigit(c) isdigit((unsigned char)(c)) +#define lwip_isxdigit(c) isxdigit((unsigned char)(c)) +#define lwip_islower(c) islower((unsigned char)(c)) +#define lwip_isspace(c) isspace((unsigned char)(c)) +#define lwip_isupper(c) isupper((unsigned char)(c)) +#define lwip_tolower(c) tolower((unsigned char)(c)) +#define lwip_toupper(c) toupper((unsigned char)(c)) +#endif + +/** C++ const_cast(val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Avoid warnings/errors related to implicitly casting away packed attributes by doing a explicit cast */ +#ifndef LWIP_PACKED_CAST +#define LWIP_PACKED_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** PACK_STRUCT_USE_INCLUDES==1: Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** LWIP_PROVIDE_ERRNO==1: Let lwIP provide ERRNO values and the 'errno' variable. + * If this is disabled, cc.h must either define 'errno', include , + * define LWIP_ERRNO_STDINCLUDE to get included or + * define LWIP_ERRNO_INCLUDE to or equivalent. + */ +#if defined __DOXYGEN__ +#define LWIP_PROVIDE_ERRNO +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 000000000..1d85bccff --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 000000000..baa6a4090 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,161 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 000000000..dfb266d18 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,152 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup perf Performance measurement + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/perf.h! + * Measurement calls made throughout lwip, these can be defined to nothing. + * - PERF_START: start measuring something. + * - PERF_STOP(x): stop measuring something, and record the result. + */ + +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) ((u16_t)(x)) +#define lwip_ntohs(x) ((u16_t)(x)) +#define lwip_htonl(x) ((u32_t)(x)) +#define lwip_ntohl(x) ((u32_t)(x)) +#define PP_HTONS(x) ((u16_t)(x)) +#define PP_NTOHS(x) ((u16_t)(x)) +#define PP_HTONL(x) ((u32_t)(x)) +#define PP_NTOHL(x) ((u32_t)(x)) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((u16_t)((((x) & (u16_t)0x00ffU) << 8) | (((x) & (u16_t)0xff00U) >> 8))) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & (u32_t)0x000000ffUL) << 24) | \ + (((x) & (u32_t)0x0000ff00UL) << 8) | \ + (((x) & (u32_t)0x00ff0000UL) >> 8) | \ + (((x) & (u32_t)0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 000000000..c78aa0baf --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,139 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_release_and_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 000000000..5cc4a0159 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,104 @@ +/** + * @file + * + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in milliseconds) of the application calling dhcp6_tmr() */ +#define DHCP6_TIMER_MSECS 500 + +struct dhcp6 +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCPv6 state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; + /** if request config is triggered while another action is active, this keeps track of it */ + u8_t request_config_pending; + /** #ticks with period DHCP6_TIMER_MSECS for request timeout */ + u16_t request_timeout; +#if LWIP_IPV6_DHCP6_STATEFUL + /* @todo: add more members here to keep track of stateful DHCPv6 data, like lease times */ +#endif /* LWIP_IPV6_DHCP6_STATEFUL */ +}; + +void dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6); +/** Remove a struct dhcp6 previously set to the netif using dhcp6_set_struct() */ +#define dhcp6_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL) +void dhcp6_cleanup(struct netif *netif); + +err_t dhcp6_enable_stateful(struct netif *netif); +err_t dhcp6_enable_stateless(struct netif *netif); +void dhcp6_disable(struct netif *netif); + +void dhcp6_tmr(void); + +void dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config); + +#if LWIP_DHCP6_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP6_MAX_NTP_SERVERS */ +extern void dhcp6_set_ntp_servers(u8_t num_ntp_servers, const ip_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP6_GET_NTP_SRV */ + +#define netif_dhcp6_data(netif) ((struct dhcp6*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 000000000..091341544 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,131 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 000000000..887d9b3fd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,117 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 000000000..48d6b539d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,198 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_STDINCLUDE if you want to include here */ +#ifdef LWIP_ERRNO_STDINCLUDE +#include +#else /* LWIP_ERRNO_STDINCLUDE */ +/* Define LWIP_ERRNO_INCLUDE to an equivalent of to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ +#endif /* LWIP_ERRNO_STDINCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 000000000..2036b2464 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,105 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +ssize_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +int etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 000000000..5e88dffd0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 000000000..f5a31fd4c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 000000000..0ccb78994 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,72 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h new file mode 100644 index 000000000..39017abd3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h @@ -0,0 +1,68 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_IF_H +#define LWIP_HDR_IF_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IF_NAMESIZE NETIF_NAMESIZE + +char * lwip_if_indextoname(unsigned int ifindex, char *ifname); +unsigned int lwip_if_nametoindex(const char *ifname); + +#if LWIP_COMPAT_SOCKETS +#define if_indextoname(ifindex, ifname) lwip_if_indextoname(ifindex,ifname) +#define if_nametoindex(ifname) lwip_if_nametoindex(ifname) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_IF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 000000000..ffd80e680 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 000000000..2982a0f4b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,169 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3]; \ + ip6_addr_clear_zone(target_ip6addr);} + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 000000000..76893ef52 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) ((u32_t)(((u) >> 16) + ((u) & 0x0000ffffUL))) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 000000000..a149be184 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 1 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 2 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION ((LWIP_VERSION_MAJOR) << 24 | (LWIP_VERSION_MINOR) << 16 | \ + (LWIP_VERSION_REVISION) << 8 | (LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 000000000..653c3b2f4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,330 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_USE_HINTS +#define IP_PCB_NETIFHINT ;struct netif_hint netif_hints +#else /* LWIP_NETIF_USE_HINTS */ +#define IP_PCB_NETIFHINT +#endif /* LWIP_NETIF_USE_HINTS */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Bound netif index */ \ + u8_t netif_idx; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_NETIFHINT + +struct ip_pcb { + /* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + const struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)(((const u8_t*)ip6_current_header()) + ip_current_header_tot_len())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options | (opt))) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options & ~(opt))) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet that already includes an IP header. */ +#define ip_output_if_hdrincl(p, src, dest, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), LWIP_IP_HDRINCL, 0, 0, 0, netif) : \ + ip4_output_if(p, ip_2_ip4(src), LWIP_IP_HDRINCL, 0, 0, 0, netif)) +/** Output IP packet with netif_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(src), ip_2_ip4(dest))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip4_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip4_route_src(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip6_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 000000000..fd35a3369 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(src, dest) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 000000000..f244c4f54 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,216 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16_val(ipaddr), \ + ip4_addr2_16_val(ipaddr), \ + ip4_addr3_16_val(ipaddr), \ + ip4_addr4_16_val(ipaddr)) + +/* Get one byte from the 4-byte address */ +#define ip4_addr_get_byte(ipaddr, idx) (((const u8_t*)(&(ipaddr)->addr))[idx]) +#define ip4_addr1(ipaddr) ip4_addr_get_byte(ipaddr, 0) +#define ip4_addr2(ipaddr) ip4_addr_get_byte(ipaddr, 1) +#define ip4_addr3(ipaddr) ip4_addr_get_byte(ipaddr, 2) +#define ip4_addr4(ipaddr) ip4_addr_get_byte(ipaddr, 3) +/* Get one byte from the 4-byte address, but argument is 'ip4_addr_t', + * not a pointer */ +#define ip4_addr_get_byte_val(ipaddr, idx) ((u8_t)(((ipaddr).addr >> (idx * 8)) & 0xff)) +#define ip4_addr1_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 0) +#define ip4_addr2_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 1) +#define ip4_addr3_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 2) +#define ip4_addr4_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 3) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) +#define ip4_addr1_16_val(ipaddr) ((u16_t)ip4_addr1_val(ipaddr)) +#define ip4_addr2_16_val(ipaddr) ((u16_t)ip4_addr2_val(ipaddr)) +#define ip4_addr3_16_val(ipaddr) ((u16_t)ip4_addr3_val(ipaddr)) +#define ip4_addr4_16_val(ipaddr) ((u16_t)ip4_addr4_val(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 000000000..ed5bf14a3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 000000000..f894e063e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 000000000..29c2a34d9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,352 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_zone.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +#if LWIP_IPV6_SCOPES + u8_t zone; +#endif /* LWIP_IPV6_SCOPES */ +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; \ + ip6_addr_clear_zone(ip6addr); } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_copy_zone((dest), (src)); }while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3]; \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src)); }while(0) + +/** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ +#define ip6_addr_copy_from_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_clear_zone(&dest); }while(0) + +/** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ +#define ip6_addr_copy_to_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; }while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip6addr);}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]); \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src));}while(0) + + +/** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ +#define ip6_addr_netcmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +/** + * Determine if two IPv6 address are on the same network. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the network identifiers of both address match, 0 if not + */ +#define ip6_addr_netcmp(addr1, addr2) (ip6_addr_netcmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/* Exact-host comparison *after* ip6_addr_netcmp() succeeded, for efficiency. */ +#define ip6_addr_nethostcmp(addr1, addr2) (((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +/** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ +#define ip6_addr_cmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) +/** + * Determine if two IPv6 addresses are the same. In particular, the address + * part of both must be the same, and the zone must be compatible. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the addresses are considered equal, 0 if not + */ +#define ip6_addr_cmp(addr1, addr2) (ip6_addr_cmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/** Compare IPv6 address to packed address and zone */ +#define ip6_addr_cmp_packed(ip6addr, paddr, zone_idx) (((ip6addr)->addr[0] == (paddr)->addr[0]) && \ + ((ip6addr)->addr[1] == (paddr)->addr[1]) && \ + ((ip6addr)->addr[2] == (paddr)->addr[2]) && \ + ((ip6addr)->addr[3] == (paddr)->addr[3]) && \ + ip6_addr_equals_zone((ip6addr), (zone_idx))) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* Scoping note: while interface-local and link-local multicast addresses do + * have a scope (i.e., they are meaningful only in the context of a particular + * interface), the following functions are not assigning or comparing zone + * indices. The reason for this is backward compatibility. Any call site that + * produces a non-global multicast address must assign a multicast address as + * appropriate itself. */ + +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id)); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ +#define IP6_ADDR_DUPLICATED 0x40 /* Failed DAD test, not valid */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) +#define ip6_addr_isduplicated(addr_state) (addr_state == IP6_ADDR_DUPLICATED) + +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define IP6_ADDR_LIFE_STATIC (0) +#define IP6_ADDR_LIFE_INFINITE (0xffffffffUL) +#define ip6_addr_life_isstatic(addr_life) ((addr_life) == IP6_ADDR_LIFE_STATIC) +#define ip6_addr_life_isinfinite(addr_life) ((addr_life) == IP6_ADDR_LIFE_INFINITE) +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 000000000..87e0e86a6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,144 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, "struct + * ip6_reass_helper" is too large to be stored in the IPv6 fragment header, and + * will bleed into the header before it, which may be the IPv6 header or an + * extension header. This means that for each first fragment packet, we need to + * 1) make a copy of some IPv6 header fields (src+dest) that we need later on, + * just in case we do overwrite part of the IPv6 header, and 2) make a copy of + * the header data that we overwrote, so that we can restore it before either + * completing reassembly or sending an ICMPv6 reply. The last part is true even + * if this setting is disabled, but if it is enabled, we need to save a bit + * more data (up to the size of a pointer) because we overwrite more. */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/* With IPV6_FRAG_COPYHEADER==1, a helper structure may (or, depending on the + * presence of extensions, may not) overwrite part of the IP header. Therefore, + * we copy the fields that we need from the IP header for as long as the helper + * structure may still be in place. This is easier than temporarily restoring + * those fields in the IP header each time we need to perform checks on them. */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_SRC(ipr) ((ipr)->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->dest) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_SRC(ipr) ((ipr)->iphdr->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->iphdr->dest) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr *iphdr; /* pointer to the first (original) IPv6 header */ +#if IPV6_FRAG_COPYHEADER + ip6_addr_p_t src; /* copy of the source address in the IP header */ + ip6_addr_p_t dest; /* copy of the destination address in the IP header */ + /* This buffer (for the part of the original header that we overwrite) will + * be slightly oversized, but we cannot compute the exact size from here. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr) + sizeof(void*)]; +#else /* IPV6_FRAG_COPYHEADER */ + /* In this case we still need the buffer, for sending ICMPv6 replies. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr)]; +#endif /* IPV6_FRAG_COPYHEADER */ + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +#if LWIP_IPV6_SCOPES + u8_t src_zone; /* zone of original packet's source address */ + u8_t dest_zone; /* zone of original packet's destination address */ +#endif /* LWIP_IPV6_SCOPES */ +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h new file mode 100644 index 000000000..525790e67 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h @@ -0,0 +1,304 @@ +/** + * @file + * + * IPv6 address scopes, zones, and scoping policy. + * + * This header provides the means to implement support for IPv6 address scopes, + * as per RFC 4007. An address scope can be either global or more constrained. + * In lwIP, we say that an address "has a scope" or "is scoped" when its scope + * is constrained, in which case the address is meaningful only in a specific + * "zone." For unicast addresses, only link-local addresses have a scope; in + * that case, the scope is the link. For multicast addresses, there are various + * scopes defined by RFC 4007 and others. For any constrained scope, a system + * must establish a (potentially one-to-many) mapping between zones and local + * interfaces. For example, a link-local address is valid on only one link (its + * zone). That link may be attached to one or more local interfaces. The + * decisions on which scopes are constrained and the mapping between zones and + * interfaces is together what we refer to as the "scoping policy" - more on + * this in a bit. + * + * In lwIP, each IPv6 address has an associated zone index. This zone index may + * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an + * address "has a zone" or "is zoned" when its zone index is *not* set to "no + * zone." In lwIP, in principle, each address should be "properly zoned," which + * means that if the address has a zone if and only if has a scope. As such, it + * is a rule that an unscoped (e.g., global) address must never have a zone. + * Even though one could argue that there is always one zone even for global + * scopes, this rule exists for implementation simplicity. Violation of the + * rule will trigger assertions or otherwise result in undesired behavior. + * + * Backward compatibility prevents us from requiring that applications always + * provide properly zoned addresses. We do enforce the rule that the in the + * lwIP link layer (everything below netif->output_ip6() and in particular ND6) + * *all* addresses are properly zoned. Thus, on the output paths down the + * stack, various places deal with the case of addresses that lack a zone. + * Some of them are best-effort for efficiency (e.g. the PCB bind and connect + * API calls' attempts to add missing zones); ultimately the IPv6 output + * handler (@ref ip6_output_if_src) will set a zone if necessary. + * + * Aside from dealing with scoped addresses lacking a zone, a proper IPv6 + * implementation must also ensure that a packet with a scoped source and/or + * destination address does not leave its zone. This is currently implemented + * in the input and forward functions. However, for output, these checks are + * deliberately omitted in order to keep the implementation lightweight. The + * routing algorithm in @ref ip6_route will take decisions such that it will + * not cause zone violations unless the application sets bad addresses, though. + * + * In terms of scoping policy, lwIP implements the default policy from RFC 4007 + * using macros in this file. This policy considers link-local unicast + * addresses and (only) interface-local and link-local multicast addresses as + * having a scope. For all these addresses, the zone is equal to the interface. + * As shown below in this file, it is possible to implement a custom policy. + */ + +/* + * Copyright (c) 2017 The MINIX 3 Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: David van Moolenbroek + * + */ +#ifndef LWIP_HDR_IP6_ZONE_H +#define LWIP_HDR_IP6_ZONE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ip6_zones IPv6 Zones + * @ingroup ip6 + * @{ + */ + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +/** Identifier for "no zone". */ +#define IP6_NO_ZONE 0 + +#if LWIP_IPV6_SCOPES + +/** Zone initializer for static IPv6 address initialization, including comma. */ +#define IPADDR6_ZONE_INIT , IP6_NO_ZONE + +/** Return the zone index of the given IPv6 address; possibly "no zone". */ +#define ip6_addr_zone(ip6addr) ((ip6addr)->zone) + +/** Does the given IPv6 address have a zone set? (0/1) */ +#define ip6_addr_has_zone(ip6addr) (ip6_addr_zone(ip6addr) != IP6_NO_ZONE) + +/** Set the zone field of an IPv6 address to a particular value. */ +#define ip6_addr_set_zone(ip6addr, zone_idx) ((ip6addr)->zone = (zone_idx)) + +/** Clear the zone field of an IPv6 address, setting it to "no zone". */ +#define ip6_addr_clear_zone(ip6addr) ((ip6addr)->zone = IP6_NO_ZONE) + +/** Copy the zone field from the second IPv6 address to the first one. */ +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) ((ip6addr1).zone = (ip6addr2).zone) + +/** Is the zone field of the given IPv6 address equal to the given zone index? (0/1) */ +#define ip6_addr_equals_zone(ip6addr, zone_idx) ((ip6addr)->zone == (zone_idx)) + +/** Are the zone fields of the given IPv6 addresses equal? (0/1) + * This macro must only be used on IPv6 addresses of the same scope. */ +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) ((ip6addr1)->zone == (ip6addr2)->zone) + +/** Symbolic constants for the 'type' parameters in some of the macros. + * These exist for efficiency only, allowing the macros to avoid certain tests + * when the address is known not to be of a certain type. Dead code elimination + * will do the rest. IP6_MULTICAST is supported but currently not optimized. + * @see ip6_addr_has_scope, ip6_addr_assign_zone, ip6_addr_lacks_zone. + */ +enum lwip_ipv6_scope_type +{ + /** Unknown */ + IP6_UNKNOWN = 0, + /** Unicast */ + IP6_UNICAST = 1, + /** Multicast */ + IP6_MULTICAST = 2 +}; + +/** IPV6_CUSTOM_SCOPES: together, the following three macro definitions, + * @ref ip6_addr_has_scope, @ref ip6_addr_assign_zone, and + * @ref ip6_addr_test_zone, completely define the lwIP scoping policy. + * The definitions below implement the default policy from RFC 4007 Sec. 6. + * Should an implementation desire to implement a different policy, it can + * define IPV6_CUSTOM_SCOPES to 1 and supply its own definitions for the three + * macros instead. + */ +#ifndef IPV6_CUSTOM_SCOPES +#define IPV6_CUSTOM_SCOPES 0 +#endif /* !IPV6_CUSTOM_SCOPES */ + +#if !IPV6_CUSTOM_SCOPES + +/** + * Determine whether an IPv6 address has a constrained scope, and as such is + * meaningful only if accompanied by a zone index to identify the scope's zone. + * The given address type may be used to eliminate at compile time certain + * checks that will evaluate to false at run time anyway. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined. + * + * Even though the unicast loopback address does have an implied link-local + * scope, in this implementation it does not have an explicitly assigned zone + * index. As such it should not be tested for in this macro. + * + * @param ip6addr the IPv6 address (const); only its address part is examined. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @return 1 if the address has a constrained scope, 0 if it does not. + */ +#define ip6_addr_has_scope(ip6addr, type) \ + (ip6_addr_islinklocal(ip6addr) || (((type) != IP6_UNICAST) && \ + (ip6_addr_ismulticast_iflocal(ip6addr) || \ + ip6_addr_ismulticast_linklocal(ip6addr)))) + +/** + * Assign a zone index to an IPv6 address, based on a network interface. If the + * given address has a scope, the assigned zone index is that scope's zone of + * the given netif; otherwise, the assigned zone index is "no zone". + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, this default implementation need not distinguish between different + * constrained scopes when assigning the zone. + * + * @param ip6addr the IPv6 address; its address part is examined, and its zone + * index is assigned. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @param netif the network interface (const). + */ +#define ip6_addr_assign_zone(ip6addr, type, netif) \ + (ip6_addr_set_zone((ip6addr), \ + ip6_addr_has_scope((ip6addr), (type)) ? netif_get_index(netif) : 0)) + +/** + * Test whether an IPv6 address is "zone-compatible" with a network interface. + * That is, test whether the network interface is part of the zone associated + * with the address. For efficiency, this macro is only ever called if the + * given address is either scoped or zoned, and thus, it need not test this. + * If an address is scoped but not zoned, or zoned and not scoped, it is + * considered not zone-compatible with any netif. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, there is always only one matching netif for a specific zone index, + * but all call sites of this macro currently support multiple matching netifs + * as well (at no additional expense in the common case). + * + * @param ip6addr the IPv6 address (const). + * @param netif the network interface (const). + * @return 1 if the address is scope-compatible with the netif, 0 if not. + */ +#define ip6_addr_test_zone(ip6addr, netif) \ + (ip6_addr_equals_zone((ip6addr), netif_get_index(netif))) + +#endif /* !IPV6_CUSTOM_SCOPES */ + +/** Does the given IPv6 address have a scope, and as such should also have a + * zone to be meaningful, but does not actually have a zone? (0/1) */ +#define ip6_addr_lacks_zone(ip6addr, type) \ + (!ip6_addr_has_zone(ip6addr) && ip6_addr_has_scope((ip6addr), (type))) + +/** + * Try to select a zone for a scoped address that does not yet have a zone. + * Called from PCB bind and connect routines, for two reasons: 1) to save on + * this (relatively expensive) selection for every individual packet route + * operation and 2) to allow the application to obtain the selected zone from + * the PCB as is customary for e.g. getsockname/getpeername BSD socket calls. + * + * Ideally, callers would always supply a properly zoned address, in which case + * this function would not be needed. It exists both for compatibility with the + * BSD socket API (which accepts zoneless destination addresses) and for + * backward compatibility with pre-scoping lwIP code. + * + * It may be impossible to select a zone, e.g. if there are no netifs. In that + * case, the address's zone field will be left as is. + * + * @param dest the IPv6 address for which to select and set a zone. + * @param src source IPv6 address (const); may be equal to dest. + */ +#define ip6_addr_select_zone(dest, src) do { struct netif *selected_netif; \ + selected_netif = ip6_route((src), (dest)); \ + if (selected_netif != NULL) { \ + ip6_addr_assign_zone((dest), IP6_UNKNOWN, selected_netif); \ + } } while (0) + +/** + * @} + */ + +#else /* LWIP_IPV6_SCOPES */ + +#define IPADDR6_ZONE_INIT +#define ip6_addr_zone(ip6addr) (IP6_NO_ZONE) +#define ip6_addr_has_zone(ip6addr) (0) +#define ip6_addr_set_zone(ip6addr, zone_idx) +#define ip6_addr_clear_zone(ip6addr) +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) +#define ip6_addr_equals_zone(ip6addr, zone_idx) (1) +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) (1) +#define IPV6_CUSTOM_SCOPES 0 +#define ip6_addr_has_scope(ip6addr, type) (0) +#define ip6_addr_assign_zone(ip6addr, type, netif) +#define ip6_addr_test_zone(ip6addr, netif) (1) +#define ip6_addr_lacks_zone(ip6addr, type) (0) +#define ip6_addr_select_zone(ip6addr, src) + +#endif /* LWIP_IPV6_SCOPES */ + +#if LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG + +/** Verify that the given IPv6 address is properly zoned. */ +#define IP6_ADDR_ZONECHECK(ip6addr) LWIP_ASSERT("IPv6 zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) == ip6_addr_has_zone(ip6addr)) + +/** Verify that the given IPv6 address is properly zoned for the given netif. */ +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) LWIP_ASSERT("IPv6 netif zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) ? \ + (ip6_addr_has_zone(ip6addr) && \ + (((netif) == NULL) || ip6_addr_test_zone((ip6addr), (netif)))) : \ + !ip6_addr_has_zone(ip6addr)) + +#else /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#define IP6_ADDR_ZONECHECK(ip6addr) +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) + +#endif /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#endif /* LWIP_IPV6 */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_ZONE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h new file mode 100644 index 000000000..2f977709d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h @@ -0,0 +1,438 @@ +/** + * @file + * IP address API (common IPv4 and IPv6) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_ADDR_H +#define LWIP_HDR_IP_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include "lwip/ip4_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup ipaddr + * IP address types for use in ip_addr_t.type member. + * @see tcp_new_ip_type(), udp_new_ip_type(), raw_new_ip_type(). + */ +enum lwip_ip_addr_type { + /** IPv4 */ + IPADDR_TYPE_V4 = 0U, + /** IPv6 */ + IPADDR_TYPE_V6 = 6U, + /** IPv4+IPv6 ("dual-stack") */ + IPADDR_TYPE_ANY = 46U +}; + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ipaddr + * A union struct for both IP version's addresses. + * ATTENTION: watch out for its size when adding IPv6 address scope! + */ +typedef struct ip_addr { + union { + ip6_addr_t ip6; + ip4_addr_t ip4; + } u_addr; + /** @ref lwip_ip_addr_type */ + u8_t type; +} ip_addr_t; + +extern const ip_addr_t ip_addr_any_type; + +/** @ingroup ip4addr */ +#define IPADDR4_INIT(u32val) { { { { u32val, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V4 } +/** @ingroup ip4addr */ +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) + +/** @ingroup ip6addr */ +#define IPADDR6_INIT(a, b, c, d) { { { { a, b, c, d } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } +/** @ingroup ip6addr */ +#define IPADDR6_INIT_HOST(a, b, c, d) { { { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } + +/** @ingroup ipaddr */ +#define IP_IS_ANY_TYPE_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_ANY) +/** @ingroup ipaddr */ +#define IPADDR_ANY_TYPE_INIT { { { { 0ul, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_ANY } + +/** @ingroup ip4addr */ +#define IP_IS_V4_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4) +/** @ingroup ip6addr */ +#define IP_IS_V6_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V6) +/** @ingroup ip4addr */ +#define IP_IS_V4(ipaddr) (((ipaddr) == NULL) || IP_IS_V4_VAL(*(ipaddr))) +/** @ingroup ip6addr */ +#define IP_IS_V6(ipaddr) (((ipaddr) != NULL) && IP_IS_V6_VAL(*(ipaddr))) + +#define IP_SET_TYPE_VAL(ipaddr, iptype) do { (ipaddr).type = (iptype); }while(0) +#define IP_SET_TYPE(ipaddr, iptype) do { if((ipaddr) != NULL) { IP_SET_TYPE_VAL(*(ipaddr), iptype); }}while(0) +#define IP_GET_TYPE(ipaddr) ((ipaddr)->type) + +#define IP_ADDR_RAW_SIZE(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4 ? sizeof(ip4_addr_t) : sizeof(ip6_addr_t)) + +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) (IP_GET_TYPE(&pcb->local_ip) == IP_GET_TYPE(ipaddr)) +#define IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr) (IP_IS_ANY_TYPE_VAL(pcb->local_ip) || IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) + +/** @ingroup ip6addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip6(ipaddr) (&((ipaddr)->u_addr.ip6)) +/** @ingroup ip4addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip4(ipaddr) (&((ipaddr)->u_addr.ip4)) + +/** @ingroup ip4addr */ +#define IP_ADDR4(ipaddr,a,b,c,d) do { IP4_ADDR(ip_2_ip4(ipaddr),a,b,c,d); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V4); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) do { IP6_ADDR(ip_2_ip6(ipaddr),i0,i1,i2,i3); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V6); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_clear_no4(ipaddr) do { ip_2_ip6(ipaddr)->addr[1] = \ + ip_2_ip6(ipaddr)->addr[2] = \ + ip_2_ip6(ipaddr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip_2_ip6(ipaddr)); }while(0) + +/** @ingroup ipaddr */ +#define ip_addr_copy(dest, src) do{ IP_SET_TYPE_VAL(dest, IP_GET_TYPE(&src)); if(IP_IS_V6_VAL(src)){ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), *ip_2_ip6(&(src))); }else{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), *ip_2_ip4(&(src))); ip_clear_no4(&dest); }}while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6(dest, src) do{ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6_packed(dest, src) do{ \ + ip6_addr_copy_from_packed(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_copy_from_ip4(dest, src) do{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V4); ip_clear_no4(&dest); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32(ipaddr, val) do{if(ipaddr){ip4_addr_set_u32(ip_2_ip4(ipaddr), val); \ + IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32_val(ipaddr, val) do{ ip4_addr_set_u32(ip_2_ip4(&(ipaddr)), val); \ + IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_get_ip4_u32(ipaddr) (((ipaddr) && IP_IS_V4(ipaddr)) ? \ + ip4_addr_get_u32(ip_2_ip4(ipaddr)) : 0) +/** @ingroup ipaddr */ +#define ip_addr_set(dest, src) do{ IP_SET_TYPE(dest, IP_GET_TYPE(src)); if(IP_IS_V6(src)){ \ + ip6_addr_set(ip_2_ip6(dest), ip_2_ip6(src)); }else{ \ + ip4_addr_set(ip_2_ip4(dest), ip_2_ip4(src)); ip_clear_no4(dest); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_ipaddr(dest, src) ip_addr_set(dest, src) +/** @ingroup ipaddr */ +#define ip_addr_set_zero(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, 0); }while(0) +/** @ingroup ip5addr */ +#define ip_addr_set_zero_ip4(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_set_zero_ip6(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_hton(dest, src) do{if(IP_IS_V6(src)){ \ + ip6_addr_set_hton(ip_2_ip6(dest), ip_2_ip6(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_hton(ip_2_ip4(dest), ip_2_ip4(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_get_network(target, host, netmask) do{if(IP_IS_V6(host)){ \ + ip4_addr_set_zero(ip_2_ip4(target)); IP_SET_TYPE(target, IPADDR_TYPE_V6); } else { \ + ip4_addr_get_network(ip_2_ip4(target), ip_2_ip4(host), ip_2_ip4(netmask)); IP_SET_TYPE(target, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_netcmp(addr1, addr2, mask) ((IP_IS_V6(addr1) && IP_IS_V6(addr2)) ? \ + 0 : \ + ip4_addr_netcmp(ip_2_ip4(addr1), ip_2_ip4(addr2), mask)) +/** @ingroup ipaddr */ +#define ip_addr_cmp(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_cmp_zoneless(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp_zoneless(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_isany(ipaddr) (((ipaddr) == NULL) ? 1 : ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isany(ip_2_ip6(ipaddr)) : \ + ip4_addr_isany(ip_2_ip4(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isany_val(ipaddr) ((IP_IS_V6_VAL(ipaddr)) ? \ + ip6_addr_isany_val(*ip_2_ip6(&(ipaddr))) : \ + ip4_addr_isany_val(*ip_2_ip4(&(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isbroadcast(ipaddr, netif) ((IP_IS_V6(ipaddr)) ? \ + 0 : \ + ip4_addr_isbroadcast(ip_2_ip4(ipaddr), netif)) +/** @ingroup ipaddr */ +#define ip_addr_ismulticast(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_ismulticast(ip_2_ip6(ipaddr)) : \ + ip4_addr_ismulticast(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isloopback(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isloopback(ip_2_ip6(ipaddr)) : \ + ip4_addr_isloopback(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_islinklocal(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_islinklocal(ip_2_ip6(ipaddr)) : \ + ip4_addr_islinklocal(ip_2_ip4(ipaddr))) +#define ip_addr_debug_print(debug, ipaddr) do { if(IP_IS_V6(ipaddr)) { \ + ip6_addr_debug_print(debug, ip_2_ip6(ipaddr)); } else { \ + ip4_addr_debug_print(debug, ip_2_ip4(ipaddr)); }}while(0) +#define ip_addr_debug_print_val(debug, ipaddr) do { if(IP_IS_V6_VAL(ipaddr)) { \ + ip6_addr_debug_print_val(debug, *ip_2_ip6(&(ipaddr))); } else { \ + ip4_addr_debug_print_val(debug, *ip_2_ip4(&(ipaddr))); }}while(0) +char *ipaddr_ntoa(const ip_addr_t *addr); +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen); +int ipaddr_aton(const char *cp, ip_addr_t *addr); + +/** @ingroup ipaddr */ +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +/** @ingroup ipaddr */ +#define ip4_2_ipv4_mapped_ipv6(ip6addr, ip4addr) do { \ + (ip6addr)->addr[3] = (ip4addr)->addr; \ + (ip6addr)->addr[2] = PP_HTONL(0x0000FFFFUL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[0] = 0; \ + ip6_addr_clear_zone(ip6addr); } while(0); + +/** @ingroup ipaddr */ +#define unmap_ipv4_mapped_ipv6(ip4addr, ip6addr) \ + (ip4addr)->addr = (ip6addr)->addr[3]; + +#define IP46_ADDR_ANY(type) (((type) == IPADDR_TYPE_V6)? IP6_ADDR_ANY : IP4_ADDR_ANY) + +#else /* LWIP_IPV4 && LWIP_IPV6 */ + +#define IP_ADDR_PCB_VERSION_MATCH(addr, pcb) 1 +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) 1 + +#define ip_addr_set_any_val(is_ipv6, ipaddr) ip_addr_set_any(is_ipv6, &(ipaddr)) +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) ip_addr_set_loopback(is_ipv6, &(ipaddr)) + +#if LWIP_IPV4 + +typedef ip4_addr_t ip_addr_t; +#define IPADDR4_INIT(u32val) { u32val } +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +#define IP_IS_V4_VAL(ipaddr) 1 +#define IP_IS_V6_VAL(ipaddr) 0 +#define IP_IS_V4(ipaddr) 1 +#define IP_IS_V6(ipaddr) 0 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V4 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip4_addr_t) +#define ip_2_ip4(ipaddr) (ipaddr) +#define IP_ADDR4(ipaddr,a,b,c,d) IP4_ADDR(ipaddr,a,b,c,d) + +#define ip_addr_copy(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_copy_from_ip4(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_set_ip4_u32(ipaddr, val) ip4_addr_set_u32(ip_2_ip4(ipaddr), val) +#define ip_addr_set_ip4_u32_val(ipaddr, val) ip_addr_set_ip4_u32(&(ipaddr), val) +#define ip_addr_get_ip4_u32(ipaddr) ip4_addr_get_u32(ip_2_ip4(ipaddr)) +#define ip_addr_set(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip4(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip4_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip4_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip4_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip4_addr_get_network(target, host, mask) +#define ip_addr_netcmp(addr1, addr2, mask) ip4_addr_netcmp(addr1, addr2, mask) +#define ip_addr_cmp(addr1, addr2) ip4_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip4_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip4_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip4_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip4_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) ip4_addr_isbroadcast(addr, netif) +#define ip_addr_ismulticast(ipaddr) ip4_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip4_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip4_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip4addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip4addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip4addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP4ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP4_ADDR_ANY) + +#else /* LWIP_IPV4 */ + +typedef ip6_addr_t ip_addr_t; +#define IPADDR6_INIT(a, b, c, d) { { a, b, c, d } IPADDR6_ZONE_INIT } +#define IPADDR6_INIT_HOST(a, b, c, d) { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } +#define IP_IS_V4_VAL(ipaddr) 0 +#define IP_IS_V6_VAL(ipaddr) 1 +#define IP_IS_V4(ipaddr) 0 +#define IP_IS_V6(ipaddr) 1 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V6 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip6_addr_t) +#define ip_2_ip6(ipaddr) (ipaddr) +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) IP6_ADDR(ipaddr,i0,i1,i2,i3) +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_addr_copy(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6_packed(dest, src) ip6_addr_copy_from_packed(dest, src) +#define ip_addr_set(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip6(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip6_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip6_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip6_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip6_addr_set_zero(target) +#define ip_addr_netcmp(addr1, addr2, mask) 0 +#define ip_addr_cmp(addr1, addr2) ip6_addr_cmp(addr1, addr2) +#define ip_addr_cmp_zoneless(addr1, addr2) ip6_addr_cmp_zoneless(addr1, addr2) +#define ip_addr_isany(ipaddr) ip6_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip6_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip6_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip6_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) 0 +#define ip_addr_ismulticast(ipaddr) ip6_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip6_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip6_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip6addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip6addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip6addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP6_ADDR_ANY) + +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_IPV4 + +extern const ip_addr_t ip_addr_any; +extern const ip_addr_t ip_addr_broadcast; + +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IP wildcard. + * Defined to @ref IP4_ADDR_ANY when IPv4 is enabled. + * Defined to @ref IP6_ADDR_ANY in IPv6 only systems. + * Use this if you can handle IPv4 _AND_ IPv6 addresses. + * Use @ref IP4_ADDR_ANY or @ref IP6_ADDR_ANY when the IP + * type matters. + */ +#define IP_ADDR_ANY IP4_ADDR_ANY +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IPv4 wildcard and the broadcast address + */ +#define IP4_ADDR_ANY (&ip_addr_any) +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip4_addr_t + * for the wildcard and the broadcast address + */ +#define IP4_ADDR_ANY4 (ip_2_ip4(&ip_addr_any)) + +/** @ingroup ip4addr */ +#define IP_ADDR_BROADCAST (&ip_addr_broadcast) +/** @ingroup ip4addr */ +#define IP4_ADDR_BROADCAST (ip_2_ip4(&ip_addr_broadcast)) + +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + +extern const ip_addr_t ip6_addr_any; + +/** + * @ingroup ip6addr + * IP6_ADDR_ANY can be used as a fixed ip_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY (&ip6_addr_any) +/** + * @ingroup ip6addr + * IP6_ADDR_ANY6 can be used as a fixed ip6_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY6 (ip_2_ip6(&ip6_addr_any)) + +#if !LWIP_IPV4 +/** IPv6-only configurations */ +#define IP_ADDR_ANY IP6_ADDR_ANY +#endif /* !LWIP_IPV4 */ + +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup ipaddr */ +#define IP_ANY_TYPE (&ip_addr_any_type) +#else +#define IP_ANY_TYPE IP_ADDR_ANY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h new file mode 100644 index 000000000..ff208d25c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h @@ -0,0 +1,82 @@ +/** + * @file + * Heap API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_MEM_H +#define LWIP_HDR_MEM_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if MEM_LIBC_MALLOC + +#include "lwip/arch.h" + +typedef size_t mem_size_t; +#define MEM_SIZE_F SZT_F + +#elif MEM_USE_POOLS + +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F + +#else + +/* MEM_SIZE would have to be aligned, but using 64000 here instead of + * 65535 leaves some room for alignment... + */ +#if MEM_SIZE > 64000L +typedef u32_t mem_size_t; +#define MEM_SIZE_F U32_F +#else +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F +#endif /* MEM_SIZE > 64000 */ +#endif + +void mem_init(void); +void *mem_trim(void *mem, mem_size_t size); +void *mem_malloc(mem_size_t size); +void *mem_calloc(mem_size_t count, mem_size_t size); +void mem_free(void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEM_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h new file mode 100644 index 000000000..1630b26c2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h @@ -0,0 +1,155 @@ +/** + * @file + * Memory pool API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_H +#define LWIP_HDR_MEMP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* run once with empty definition to handle all custom includes in lwippools.h */ +#define LWIP_MEMPOOL(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +/** Create the list of all memory pools managed by memp. MEMP_MAX represents a NULL pool at the end */ +typedef enum { +#define LWIP_MEMPOOL(name,num,size,desc) MEMP_##name, +#include "lwip/priv/memp_std.h" + MEMP_MAX +} memp_t; + +#include "lwip/priv/memp_priv.h" +#include "lwip/stats.h" + +extern const struct memp_desc* const memp_pools[MEMP_MAX]; + +/** + * @ingroup mempool + * Declare prototype for private memory pool if it is used in multiple files + */ +#define LWIP_MEMPOOL_PROTOTYPE(name) extern const struct memp_desc memp_ ## name + +#if MEMP_MEM_MALLOC + +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size) \ + }; + +#else /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Declare a private memory pool + * Private mempools example: + * .h: only when pool is used in multiple .c files: LWIP_MEMPOOL_PROTOTYPE(my_private_pool); + * .c: + * - in global variables section: LWIP_MEMPOOL_DECLARE(my_private_pool, 10, sizeof(foo), "Some description") + * - call ONCE before using pool (e.g. in some init() function): LWIP_MEMPOOL_INIT(my_private_pool); + * - allocate: void* my_new_mem = LWIP_MEMPOOL_ALLOC(my_private_pool); + * - free: LWIP_MEMPOOL_FREE(my_private_pool, my_new_mem); + * + * To relocate a pool, declare it as extern in cc.h. Example for GCC: + * extern u8_t \_\_attribute\_\_((section(".onchip_mem"))) memp_memory_my_private_pool_base[]; + */ +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_DECLARE_MEMORY_ALIGNED(memp_memory_ ## name ## _base, ((num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size)))); \ + \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + \ + static struct memp *memp_tab_ ## name; \ + \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size), \ + (num), \ + memp_memory_ ## name ## _base, \ + &memp_tab_ ## name \ + }; + +#endif /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Initialize a private memory pool + */ +#define LWIP_MEMPOOL_INIT(name) memp_init_pool(&memp_ ## name) +/** + * @ingroup mempool + * Allocate from a private memory pool + */ +#define LWIP_MEMPOOL_ALLOC(name) memp_malloc_pool(&memp_ ## name) +/** + * @ingroup mempool + * Free element from a private memory pool + */ +#define LWIP_MEMPOOL_FREE(name, x) memp_free_pool(&memp_ ## name, (x)) + +#if MEM_USE_POOLS +/** This structure is used to save the pool one element came from. + * This has to be defined here as it is required for pool size calculation. */ +struct memp_malloc_helper +{ + memp_t poolnr; +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + u16_t size; +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +}; +#endif /* MEM_USE_POOLS */ + +void memp_init(void); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_fn(memp_t type, const char* file, const int line); +#define memp_malloc(t) memp_malloc_fn((t), __FILE__, __LINE__) +#else +void *memp_malloc(memp_t type); +#endif +void memp_free(memp_t type, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h new file mode 100644 index 000000000..7fa0797f2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h @@ -0,0 +1,99 @@ +/** + * @file + * + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_MLD6_H +#define LWIP_HDR_MLD6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_MLD && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** MLD group */ +struct mld_group { + /** next link */ + struct mld_group *next; + /** multicast address */ + ip6_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +#define MLD6_TMR_INTERVAL 100 /* Milliseconds */ + +err_t mld6_stop(struct netif *netif); +void mld6_report_groups(struct netif *netif); +void mld6_tmr(void); +struct mld_group *mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr); +void mld6_input(struct pbuf *p, struct netif *inp); +err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); + +/** @ingroup mld6 + * Get list head of MLD6 groups for netif. + * Note: The allnodes group IP is NOT in the list, since it must always + * be received for correct IPv6 operation. + * @see @ref netif_set_mld_mac_filter() + */ +#define netif_mld6_data(netif) ((struct mld_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_MLD && LWIP_IPV6 */ + +#endif /* LWIP_HDR_MLD6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h new file mode 100644 index 000000000..c30e624f0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h @@ -0,0 +1,90 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_H +#define LWIP_HDR_ND6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define ND6_TMR_INTERVAL 1000 + +/** Router solicitations are sent in 4 second intervals (see RFC 4861, ch. 6.3.7) */ +#ifndef ND6_RTR_SOLICITATION_INTERVAL +#define ND6_RTR_SOLICITATION_INTERVAL 4000 +#endif + +struct pbuf; +struct netif; + +void nd6_tmr(void); +void nd6_input(struct pbuf *p, struct netif *inp); +void nd6_clear_destination_cache(void); +struct netif *nd6_find_route(const ip6_addr_t *ip6addr); +err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp); +u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif); +#if LWIP_ND6_TCP_REACHABILITY_HINTS +void nd6_reachability_hint(const ip6_addr_t *ip6addr); +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ +void nd6_cleanup_netif(struct netif *netif); +#if LWIP_IPV6_MLD +void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state); +#endif /* LWIP_IPV6_MLD */ +void nd6_restart_netif(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h new file mode 100644 index 000000000..42a911bca --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h @@ -0,0 +1,116 @@ +/** + * @file + * netbuf API (for netconn API) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETBUF_H +#define LWIP_HDR_NETBUF_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This netbuf has dest-addr/port set */ +#define NETBUF_FLAG_DESTADDR 0x01 +/** This netbuf includes a checksum */ +#define NETBUF_FLAG_CHKSUM 0x02 + +/** "Network buffer" - contains data and addressing info */ +struct netbuf { + struct pbuf *p, *ptr; + ip_addr_t addr; + u16_t port; +#if LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY + u8_t flags; + u16_t toport_chksum; +#if LWIP_NETBUF_RECVINFO + ip_addr_t toaddr; +#endif /* LWIP_NETBUF_RECVINFO */ +#endif /* LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY */ +}; + +/* Network buffer functions: */ +struct netbuf * netbuf_new (void); +void netbuf_delete (struct netbuf *buf); +void * netbuf_alloc (struct netbuf *buf, u16_t size); +void netbuf_free (struct netbuf *buf); +err_t netbuf_ref (struct netbuf *buf, + const void *dataptr, u16_t size); +void netbuf_chain (struct netbuf *head, struct netbuf *tail); + +err_t netbuf_data (struct netbuf *buf, + void **dataptr, u16_t *len); +s8_t netbuf_next (struct netbuf *buf); +void netbuf_first (struct netbuf *buf); + + +#define netbuf_copy_partial(buf, dataptr, len, offset) \ + pbuf_copy_partial((buf)->p, (dataptr), (len), (offset)) +#define netbuf_copy(buf,dataptr,len) netbuf_copy_partial(buf, dataptr, len, 0) +#define netbuf_take(buf, dataptr, len) pbuf_take((buf)->p, dataptr, len) +#define netbuf_len(buf) ((buf)->p->tot_len) +#define netbuf_fromaddr(buf) (&((buf)->addr)) +#define netbuf_set_fromaddr(buf, fromaddr) ip_addr_set(&((buf)->addr), fromaddr) +#define netbuf_fromport(buf) ((buf)->port) +#if LWIP_NETBUF_RECVINFO +#define netbuf_destaddr(buf) (&((buf)->toaddr)) +#define netbuf_set_destaddr(buf, destaddr) ip_addr_set(&((buf)->toaddr), destaddr) +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_destport(buf) (((buf)->flags & NETBUF_FLAG_DESTADDR) ? (buf)->toport_chksum : 0) +#else /* LWIP_CHECKSUM_ON_COPY */ +#define netbuf_destport(buf) ((buf)->toport_chksum) +#endif /* LWIP_CHECKSUM_ON_COPY */ +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_set_chksum(buf, chksum) do { (buf)->flags = NETBUF_FLAG_CHKSUM; \ + (buf)->toport_chksum = chksum; } while(0) +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETBUF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h new file mode 100644 index 000000000..d3d15dfac --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h @@ -0,0 +1,150 @@ +/** + * @file + * NETDB API (sockets) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETDB_H +#define LWIP_HDR_NETDB_H + +#include "lwip/opt.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/arch.h" +#include "lwip/inet.h" +#include "lwip/sockets.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* some rarely used options */ +#ifndef LWIP_DNS_API_DECLARE_H_ERRNO +#define LWIP_DNS_API_DECLARE_H_ERRNO 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_ERRORS +#define LWIP_DNS_API_DEFINE_ERRORS 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_FLAGS +#define LWIP_DNS_API_DEFINE_FLAGS 1 +#endif + +#ifndef LWIP_DNS_API_DECLARE_STRUCTS +#define LWIP_DNS_API_DECLARE_STRUCTS 1 +#endif + +#if LWIP_DNS_API_DEFINE_ERRORS +/** Errors used by the DNS API functions, h_errno can be one of them */ +#define EAI_NONAME 200 +#define EAI_SERVICE 201 +#define EAI_FAIL 202 +#define EAI_MEMORY 203 +#define EAI_FAMILY 204 + +#define HOST_NOT_FOUND 210 +#define NO_DATA 211 +#define NO_RECOVERY 212 +#define TRY_AGAIN 213 +#endif /* LWIP_DNS_API_DEFINE_ERRORS */ + +#if LWIP_DNS_API_DEFINE_FLAGS +/* input flags for struct addrinfo */ +#define AI_PASSIVE 0x01 +#define AI_CANONNAME 0x02 +#define AI_NUMERICHOST 0x04 +#define AI_NUMERICSERV 0x08 +#define AI_V4MAPPED 0x10 +#define AI_ALL 0x20 +#define AI_ADDRCONFIG 0x40 +#endif /* LWIP_DNS_API_DEFINE_FLAGS */ + +#if LWIP_DNS_API_DECLARE_STRUCTS +struct hostent { + char *h_name; /* Official name of the host. */ + char **h_aliases; /* A pointer to an array of pointers to alternative host names, + terminated by a null pointer. */ + int h_addrtype; /* Address type. */ + int h_length; /* The length, in bytes, of the address. */ + char **h_addr_list; /* A pointer to an array of pointers to network addresses (in + network byte order) for the host, terminated by a null pointer. */ +#define h_addr h_addr_list[0] /* for backward compatibility */ +}; + +struct addrinfo { + int ai_flags; /* Input flags. */ + int ai_family; /* Address family of socket. */ + int ai_socktype; /* Socket type. */ + int ai_protocol; /* Protocol of socket. */ + socklen_t ai_addrlen; /* Length of socket address. */ + struct sockaddr *ai_addr; /* Socket address of socket. */ + char *ai_canonname; /* Canonical name of service location. */ + struct addrinfo *ai_next; /* Pointer to next in list. */ +}; +#endif /* LWIP_DNS_API_DECLARE_STRUCTS */ + +#define NETDB_ELEM_SIZE (sizeof(struct addrinfo) + sizeof(struct sockaddr_storage) + DNS_MAX_NAME_LENGTH + 1) + +#if LWIP_DNS_API_DECLARE_H_ERRNO +/* application accessible error code set by the DNS API functions */ +extern int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO*/ + +struct hostent *lwip_gethostbyname(const char *name); +int lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop); +void lwip_freeaddrinfo(struct addrinfo *ai); +int lwip_getaddrinfo(const char *nodename, + const char *servname, + const struct addrinfo *hints, + struct addrinfo **res); + +#if LWIP_COMPAT_SOCKETS +/** @ingroup netdbapi */ +#define gethostbyname(name) lwip_gethostbyname(name) +/** @ingroup netdbapi */ +#define gethostbyname_r(name, ret, buf, buflen, result, h_errnop) \ + lwip_gethostbyname_r(name, ret, buf, buflen, result, h_errnop) +/** @ingroup netdbapi */ +#define freeaddrinfo(addrinfo) lwip_freeaddrinfo(addrinfo) +/** @ingroup netdbapi */ +#define getaddrinfo(nodname, servname, hints, res) \ + lwip_getaddrinfo(nodname, servname, hints, res) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS && LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETDB_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h new file mode 100644 index 000000000..911196ab3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h @@ -0,0 +1,669 @@ +/** + * @file + * netif API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_H +#define LWIP_HDR_NETIF_H + +#include "lwip/opt.h" + +#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) + +#include "lwip/err.h" + +#include "lwip/ip_addr.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses are expected to be in + * the same byte order as in IP_PCB. */ + +/** Must be the maximum of all used hardware address lengths + across all types of interfaces in use. + This does not have to be changed, normally. */ +#ifndef NETIF_MAX_HWADDR_LEN +#define NETIF_MAX_HWADDR_LEN 6U +#endif + +/** The size of a fully constructed netif name which the + * netif can be identified by in APIs. Composed of + * 2 chars, 3 (max) digits, and 1 \0 + */ +#define NETIF_NAMESIZE 6 + +/** + * @defgroup netif_flags Flags + * @ingroup netif + * @{ + */ + +/** Whether the network interface is 'up'. This is + * a software flag used to control whether this network + * interface is enabled and processes traffic. + * It must be set by the startup code before this netif can be used + * (also for dhcp/autoip). + */ +#define NETIF_FLAG_UP 0x01U +/** If set, the netif has broadcast capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_BROADCAST 0x02U +/** If set, the interface has an active link + * (set by the network interface driver). + * Either set by the netif driver in its init function (if the link + * is up at that time) or at a later point once the link comes up + * (if link detection is supported by the hardware). */ +#define NETIF_FLAG_LINK_UP 0x04U +/** If set, the netif is an ethernet device using ARP. + * Set by the netif driver in its init function. + * Used to check input packet types and use of DHCP. */ +#define NETIF_FLAG_ETHARP 0x08U +/** If set, the netif is an ethernet device. It might not use + * ARP or TCP/IP if it is used for PPPoE only. + */ +#define NETIF_FLAG_ETHERNET 0x10U +/** If set, the netif has IGMP capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_IGMP 0x20U +/** If set, the netif has MLD6 capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_MLD6 0x40U + +/** + * @} + */ + +enum lwip_internal_netif_client_data_index +{ +#if LWIP_IPV4 +#if LWIP_DHCP + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, +#endif +#if LWIP_AUTOIP + LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, +#endif +#if LWIP_IGMP + LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, +#endif +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +#if LWIP_IPV6_DHCP6 + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, +#endif +#if LWIP_IPV6_MLD + LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, +#endif +#endif /* LWIP_IPV6 */ + LWIP_NETIF_CLIENT_DATA_INDEX_MAX +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_CHECKSUM_GEN_IP 0x0001 +#define NETIF_CHECKSUM_GEN_UDP 0x0002 +#define NETIF_CHECKSUM_GEN_TCP 0x0004 +#define NETIF_CHECKSUM_GEN_ICMP 0x0008 +#define NETIF_CHECKSUM_GEN_ICMP6 0x0010 +#define NETIF_CHECKSUM_CHECK_IP 0x0100 +#define NETIF_CHECKSUM_CHECK_UDP 0x0200 +#define NETIF_CHECKSUM_CHECK_TCP 0x0400 +#define NETIF_CHECKSUM_CHECK_ICMP 0x0800 +#define NETIF_CHECKSUM_CHECK_ICMP6 0x1000 +#define NETIF_CHECKSUM_ENABLE_ALL 0xFFFF +#define NETIF_CHECKSUM_DISABLE_ALL 0x0000 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +struct netif; + +/** MAC Filter Actions, these are passed to a netif's igmp_mac_filter or + * mld_mac_filter callback function. */ +enum netif_mac_filter_action { + /** Delete a filter entry */ + NETIF_DEL_MAC_FILTER = 0, + /** Add a filter entry */ + NETIF_ADD_MAC_FILTER = 1 +}; + +/** Function prototype for netif init functions. Set up flags and output/linkoutput + * callback functions in this function. + * + * @param netif The netif to initialize + */ +typedef err_t (*netif_init_fn)(struct netif *netif); +/** Function prototype for netif->input functions. This function is saved as 'input' + * callback function in the netif struct. Call it when a packet has been received. + * + * @param p The received packet, copied into a pbuf + * @param inp The netif which received the packet + * @return ERR_OK if the packet was handled + * != ERR_OK is the packet was NOT handled, in this case, the caller has + * to free the pbuf + */ +typedef err_t (*netif_input_fn)(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV4 +/** Function prototype for netif->output functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'etharp_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IP address to which the packet shall be sent + */ +typedef err_t (*netif_output_fn)(struct netif *netif, struct pbuf *p, + const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 +/** Function prototype for netif->output_ip6 functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'ethip6_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IPv6 address to which the packet shall be sent + */ +typedef err_t (*netif_output_ip6_fn)(struct netif *netif, struct pbuf *p, + const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +/** Function prototype for netif->linkoutput functions. Only used for ethernet + * netifs. This function is called by ARP when a packet shall be sent. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (raw ethernet packet) + */ +typedef err_t (*netif_linkoutput_fn)(struct netif *netif, struct pbuf *p); +/** Function prototype for netif status- or link-callback functions. */ +typedef void (*netif_status_callback_fn)(struct netif *netif); +#if LWIP_IPV4 && LWIP_IGMP +/** Function prototype for netif igmp_mac_filter functions */ +typedef err_t (*netif_igmp_mac_filter_fn)(struct netif *netif, + const ip4_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** Function prototype for netif mld_mac_filter functions */ +typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, + const ip6_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if LWIP_DHCP || LWIP_AUTOIP || LWIP_IGMP || LWIP_IPV6_MLD || LWIP_IPV6_DHCP6 || (LWIP_NUM_NETIF_CLIENT_DATA > 0) +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +u8_t netif_alloc_client_data_id(void); +#endif +/** @ingroup netif_cd + * Set client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_set_client_data(netif, id, data) netif_get_client_data(netif, id) = (data) +/** @ingroup netif_cd + * Get client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_get_client_data(netif, id) (netif)->client_data[(id)] +#endif + +#if (LWIP_IPV4 && LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) || (LWIP_IPV6 && (LWIP_ND6_NUM_DESTINATIONS > 0x7f)) +typedef u16_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7FFF +#else +typedef u8_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7F +#endif + +#if LWIP_NETIF_HWADDRHINT +#define LWIP_NETIF_USE_HINTS 1 +struct netif_hint { + netif_addr_idx_t addr_hint; +}; +#else /* LWIP_NETIF_HWADDRHINT */ +#define LWIP_NETIF_USE_HINTS 0 +#endif /* LWIP_NETIF_HWADDRHINT */ + +/** Generic data structure used for all lwIP network interfaces. + * The following fields should be filled in by the initialization + * function for the device driver: hwaddr_len, hwaddr[], mtu, flags */ +struct netif { +#if !LWIP_SINGLE_NETIF + /** pointer to next in linked list */ + struct netif *next; +#endif + +#if LWIP_IPV4 + /** IP address configuration in network byte order */ + ip_addr_t ip_addr; + ip_addr_t netmask; + ip_addr_t gw; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Array of IPv6 addresses for this netif. */ + ip_addr_t ip6_addr[LWIP_IPV6_NUM_ADDRESSES]; + /** The state of each IPv6 address (Tentative, Preferred, etc). + * @see ip6_addr.h */ + u8_t ip6_addr_state[LWIP_IPV6_NUM_ADDRESSES]; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /** Remaining valid and preferred lifetime of each IPv6 address, in seconds. + * For valid lifetimes, the special value of IP6_ADDR_LIFE_STATIC (0) + * indicates the address is static and has no lifetimes. */ + u32_t ip6_addr_valid_life[LWIP_IPV6_NUM_ADDRESSES]; + u32_t ip6_addr_pref_life[LWIP_IPV6_NUM_ADDRESSES]; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ +#endif /* LWIP_IPV6 */ + /** This function is called by the network device driver + * to pass a packet up the TCP/IP stack. */ + netif_input_fn input; +#if LWIP_IPV4 + /** This function is called by the IP module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually etharp_output() */ + netif_output_fn output; +#endif /* LWIP_IPV4 */ + /** This function is called by ethernet_output() when it wants + * to send a packet on the interface. This function outputs + * the pbuf as-is on the link medium. */ + netif_linkoutput_fn linkoutput; +#if LWIP_IPV6 + /** This function is called by the IPv6 module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually ethip6_output() */ + netif_output_ip6_fn output_ip6; +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + /** This function is called when the netif state is set to up or down + */ + netif_status_callback_fn status_callback; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + /** This function is called when the netif link is set to up or down + */ + netif_status_callback_fn link_callback; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK + /** This function is called when the netif has been removed */ + netif_status_callback_fn remove_callback; +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + /** This field can be set by the device driver and could point + * to state information for the device. */ + void *state; +#ifdef netif_get_client_data + void* client_data[LWIP_NETIF_CLIENT_DATA_INDEX_MAX + LWIP_NUM_NETIF_CLIENT_DATA]; +#endif +#if LWIP_NETIF_HOSTNAME + /* the hostname for this netif, NULL is a valid value */ + const char* hostname; +#endif /* LWIP_NETIF_HOSTNAME */ +#if LWIP_CHECKSUM_CTRL_PER_NETIF + u16_t chksum_flags; +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ + /** maximum transfer unit (in bytes) */ + u16_t mtu; +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /** maximum transfer unit (in bytes), updated by RA */ + u16_t mtu6; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + /** link level hardware address of this interface */ + u8_t hwaddr[NETIF_MAX_HWADDR_LEN]; + /** number of bytes used in hwaddr */ + u8_t hwaddr_len; + /** flags (@see @ref netif_flags) */ + u8_t flags; + /** descriptive abbreviation */ + char name[2]; + /** number of this interface. Used for @ref if_api and @ref netifapi_netif, + * as well as for IPv6 zones */ + u8_t num; +#if LWIP_IPV6_AUTOCONFIG + /** is this netif enabled for IPv6 autoconfiguration */ + u8_t ip6_autoconfig_enabled; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /** Number of Router Solicitation messages that remain to be sent. */ + u8_t rs_count; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if MIB2_STATS + /** link type (from "snmp_ifType" enum from snmp_mib2.h) */ + u8_t link_type; + /** (estimate) link speed */ + u32_t link_speed; + /** timestamp at last change made (up/down) */ + u32_t ts; + /** counters */ + struct stats_mib2_netif_ctrs mib2_counters; +#endif /* MIB2_STATS */ +#if LWIP_IPV4 && LWIP_IGMP + /** This function could be called to add or delete an entry in the multicast + filter table of the ethernet MAC.*/ + netif_igmp_mac_filter_fn igmp_mac_filter; +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + /** This function could be called to add or delete an entry in the IPv6 multicast + filter table of the ethernet MAC. */ + netif_mld_mac_filter_fn mld_mac_filter; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if LWIP_NETIF_USE_HINTS + struct netif_hint *hints; +#endif /* LWIP_NETIF_USE_HINTS */ +#if ENABLE_LOOPBACK + /* List of packets to be queued for ourselves. */ + struct pbuf *loop_first; + struct pbuf *loop_last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t loop_cnt_current; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ +#endif /* ENABLE_LOOPBACK */ +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) do { \ + (netif)->chksum_flags = chksumflags; } while(0) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) if (((netif) == NULL) || (((netif)->chksum_flags & (chksumflag)) != 0)) +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +#if LWIP_SINGLE_NETIF +#define NETIF_FOREACH(netif) if (((netif) = netif_default) != NULL) +#else /* LWIP_SINGLE_NETIF */ +/** The list of network interfaces. */ +extern struct netif *netif_list; +#define NETIF_FOREACH(netif) for ((netif) = netif_list; (netif) != NULL; (netif) = (netif)->next) +#endif /* LWIP_SINGLE_NETIF */ +/** The default network interface. */ +extern struct netif *netif_default; + +void netif_init(void); + +struct netif *netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +struct netif *netif_add(struct netif *netif, + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, + void *state, netif_init_fn init, netif_input_fn input); +void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw); +#else /* LWIP_IPV4 */ +struct netif *netif_add(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); +#endif /* LWIP_IPV4 */ +void netif_remove(struct netif * netif); + +/* Returns a network interface given its name. The name is of the form + "et0", where the first two letters are the "name" field in the + netif structure, and the digit is in the num field in the same + structure. */ +struct netif *netif_find(const char *name); + +void netif_set_default(struct netif *netif); + +#if LWIP_IPV4 +void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr); +void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask); +void netif_set_gw(struct netif *netif, const ip4_addr_t *gw); +/** @ingroup netif_ip4 */ +#define netif_ip4_addr(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->ip_addr))) +/** @ingroup netif_ip4 */ +#define netif_ip4_netmask(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->netmask))) +/** @ingroup netif_ip4 */ +#define netif_ip4_gw(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->gw))) +/** @ingroup netif_ip4 */ +#define netif_ip_addr4(netif) ((const ip_addr_t*)&((netif)->ip_addr)) +/** @ingroup netif_ip4 */ +#define netif_ip_netmask4(netif) ((const ip_addr_t*)&((netif)->netmask)) +/** @ingroup netif_ip4 */ +#define netif_ip_gw4(netif) ((const ip_addr_t*)&((netif)->gw)) +#endif /* LWIP_IPV4 */ + +#define netif_set_flags(netif, set_flags) do { (netif)->flags = (u8_t)((netif)->flags | (set_flags)); } while(0) +#define netif_clear_flags(netif, clr_flags) do { (netif)->flags = (u8_t)((netif)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netif_is_flag_set(nefif, flag) (((netif)->flags & (flag)) != 0) + +void netif_set_up(struct netif *netif); +void netif_set_down(struct netif *netif); +/** @ingroup netif + * Ask if an interface is up + */ +#define netif_is_up(netif) (((netif)->flags & NETIF_FLAG_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_STATUS_CALLBACK +void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback); +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK +void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback); +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +void netif_set_link_up(struct netif *netif); +void netif_set_link_down(struct netif *netif); +/** Ask if a link is up */ +#define netif_is_link_up(netif) (((netif)->flags & NETIF_FLAG_LINK_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_LINK_CALLBACK +void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback); +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_HOSTNAME +/** @ingroup netif */ +#define netif_set_hostname(netif, name) do { if((netif) != NULL) { (netif)->hostname = name; }}while(0) +/** @ingroup netif */ +#define netif_get_hostname(netif) (((netif) != NULL) ? ((netif)->hostname) : NULL) +#endif /* LWIP_NETIF_HOSTNAME */ + +#if LWIP_IGMP +/** @ingroup netif */ +#define netif_set_igmp_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->igmp_mac_filter = function; }}while(0) +#define netif_get_igmp_mac_filter(netif) (((netif) != NULL) ? ((netif)->igmp_mac_filter) : NULL) +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** @ingroup netif */ +#define netif_set_mld_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->mld_mac_filter = function; }}while(0) +#define netif_get_mld_mac_filter(netif) (((netif) != NULL) ? ((netif)->mld_mac_filter) : NULL) +#define netif_mld_mac_filter(netif, addr, action) do { if((netif) && (netif)->mld_mac_filter) { (netif)->mld_mac_filter((netif), (addr), (action)); }}while(0) +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if ENABLE_LOOPBACK +err_t netif_loop_output(struct netif *netif, struct pbuf *p); +void netif_poll(struct netif *netif); +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +void netif_poll_all(void); +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +err_t netif_input(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV6 +/** @ingroup netif_ip6 */ +#define netif_ip_addr6(netif, i) ((const ip_addr_t*)(&((netif)->ip6_addr[i]))) +/** @ingroup netif_ip6 */ +#define netif_ip6_addr(netif, i) ((const ip6_addr_t*)ip_2_ip6(&((netif)->ip6_addr[i]))) +void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6); +void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3); +#define netif_ip6_addr_state(netif, i) ((netif)->ip6_addr_state[i]) +void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state); +s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr); +void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit); +err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx); +#define netif_set_ip6_autoconfig_enabled(netif, action) do { if(netif) { (netif)->ip6_autoconfig_enabled = (action); }}while(0) +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define netif_ip6_addr_valid_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_valid_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_valid_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_valid_life[i] = (secs); }} while (0) +#define netif_ip6_addr_pref_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_pref_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_pref_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_pref_life[i] = (secs); }} while (0) +#define netif_ip6_addr_isstatic(netif, i) \ + (netif_ip6_addr_valid_life((netif), (i)) == IP6_ADDR_LIFE_STATIC) +#else /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#define netif_ip6_addr_isstatic(netif, i) (1) /* all addresses are static */ +#endif /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#if LWIP_ND6_ALLOW_RA_UPDATES +#define netif_mtu6(netif) ((netif)->mtu6) +#else /* LWIP_ND6_ALLOW_RA_UPDATES */ +#define netif_mtu6(netif) ((netif)->mtu) +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ +#endif /* LWIP_IPV6 */ + +#if LWIP_NETIF_USE_HINTS +#define NETIF_SET_HINTS(netif, netifhint) (netif)->hints = (netifhint) +#define NETIF_RESET_HINTS(netif) (netif)->hints = NULL +#else /* LWIP_NETIF_USE_HINTS */ +#define NETIF_SET_HINTS(netif, netifhint) +#define NETIF_RESET_HINTS(netif) +#endif /* LWIP_NETIF_USE_HINTS */ + +u8_t netif_name_to_index(const char *name); +char * netif_index_to_name(u8_t idx, char *name); +struct netif* netif_get_by_index(u8_t idx); + +/* Interface indexes always start at 1 per RFC 3493, section 4, num starts at 0 (internal index is 0..254)*/ +#define netif_get_index(netif) ((u8_t)((netif)->num + 1)) +#define NETIF_NO_INDEX (0) + +/** + * @ingroup netif + * Extended netif status callback (NSC) reasons flags. + * May be extended in the future! + */ +typedef u16_t netif_nsc_reason_t; + +/* used for initialization only */ +#define LWIP_NSC_NONE 0x0000 +/** netif was added. arg: NULL. Called AFTER netif was added. */ +#define LWIP_NSC_NETIF_ADDED 0x0001 +/** netif was removed. arg: NULL. Called BEFORE netif is removed. */ +#define LWIP_NSC_NETIF_REMOVED 0x0002 +/** link changed */ +#define LWIP_NSC_LINK_CHANGED 0x0004 +/** netif administrative status changed.\n + * up is called AFTER netif is set up.\n + * down is called BEFORE the netif is actually set down. */ +#define LWIP_NSC_STATUS_CHANGED 0x0008 +/** IPv4 address has changed */ +#define LWIP_NSC_IPV4_ADDRESS_CHANGED 0x0010 +/** IPv4 gateway has changed */ +#define LWIP_NSC_IPV4_GATEWAY_CHANGED 0x0020 +/** IPv4 netmask has changed */ +#define LWIP_NSC_IPV4_NETMASK_CHANGED 0x0040 +/** called AFTER IPv4 address/gateway/netmask changes have been applied */ +#define LWIP_NSC_IPV4_SETTINGS_CHANGED 0x0080 +/** IPv6 address was added */ +#define LWIP_NSC_IPV6_SET 0x0100 +/** IPv6 address state has changed */ +#define LWIP_NSC_IPV6_ADDR_STATE_CHANGED 0x0200 + +/** @ingroup netif + * Argument supplied to netif_ext_callback_fn. + */ +typedef union +{ + /** Args to LWIP_NSC_LINK_CHANGED callback */ + struct link_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } link_changed; + /** Args to LWIP_NSC_STATUS_CHANGED callback */ + struct status_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } status_changed; + /** Args to LWIP_NSC_IPV4_ADDRESS_CHANGED|LWIP_NSC_IPV4_GATEWAY_CHANGED|LWIP_NSC_IPV4_NETMASK_CHANGED|LWIP_NSC_IPV4_SETTINGS_CHANGED callback */ + struct ipv4_changed_s + { + /** Old IPv4 address */ + const ip_addr_t* old_address; + const ip_addr_t* old_netmask; + const ip_addr_t* old_gw; + } ipv4_changed; + /** Args to LWIP_NSC_IPV6_SET callback */ + struct ipv6_set_s + { + /** Index of changed IPv6 address */ + s8_t addr_index; + /** Old IPv6 address */ + const ip_addr_t* old_address; + } ipv6_set; + /** Args to LWIP_NSC_IPV6_ADDR_STATE_CHANGED callback */ + struct ipv6_addr_state_changed_s + { + /** Index of affected IPv6 address */ + s8_t addr_index; + /** Old IPv6 address state */ + u8_t old_state; + /** Affected IPv6 address */ + const ip_addr_t* address; + } ipv6_addr_state_changed; +} netif_ext_callback_args_t; + +/** + * @ingroup netif + * Function used for extended netif status callbacks + * Note: When parsing reason argument, keep in mind that more reasons may be added in the future! + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +typedef void (*netif_ext_callback_fn)(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +struct netif_ext_callback; +typedef struct netif_ext_callback +{ + netif_ext_callback_fn callback_fn; + struct netif_ext_callback* next; +} netif_ext_callback_t; + +#define NETIF_DECLARE_EXT_CALLBACK(name) static netif_ext_callback_t name; +void netif_add_ext_callback(netif_ext_callback_t* callback, netif_ext_callback_fn fn); +void netif_remove_ext_callback(netif_ext_callback_t* callback); +void netif_invoke_ext_callback(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); +#else +#define NETIF_DECLARE_EXT_CALLBACK(name) +#define netif_add_ext_callback(callback, fn) +#define netif_remove_ext_callback(callback) +#define netif_invoke_ext_callback(netif, reason, args) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h new file mode 100644 index 000000000..e0631791b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h @@ -0,0 +1,161 @@ +/** + * @file + * netif API (to be used from non-TCPIP threads) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_NETIFAPI_H +#define LWIP_HDR_NETIFAPI_H + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* API for application */ +#if LWIP_ARP && LWIP_IPV4 +/* Used for netfiapi_arp_* APIs */ +enum netifapi_arp_entry { + NETIFAPI_ARP_PERM /* Permanent entry */ + /* Other entry types can be added here */ +}; + +/** @ingroup netifapi_arp */ +err_t netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type); +/** @ingroup netifapi_arp */ +err_t netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type); +#endif /* LWIP_ARP && LWIP_IPV4 */ + +err_t netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +err_t netifapi_netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, const ip4_addr_t *gw); +#endif /* LWIP_IPV4*/ + +err_t netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc); + +/** @ingroup netifapi_netif */ +err_t netifapi_netif_name_to_index(const char *name, u8_t *index); +/** @ingroup netifapi_netif */ +err_t netifapi_netif_index_to_name(u8_t index, char *name); + +/** @ingroup netifapi_netif + * @see netif_remove() + */ +#define netifapi_netif_remove(n) netifapi_netif_common(n, netif_remove, NULL) +/** @ingroup netifapi_netif + * @see netif_set_up() + */ +#define netifapi_netif_set_up(n) netifapi_netif_common(n, netif_set_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_down() + */ +#define netifapi_netif_set_down(n) netifapi_netif_common(n, netif_set_down, NULL) +/** @ingroup netifapi_netif + * @see netif_set_default() + */ +#define netifapi_netif_set_default(n) netifapi_netif_common(n, netif_set_default, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_up() + */ +#define netifapi_netif_set_link_up(n) netifapi_netif_common(n, netif_set_link_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_down() + */ +#define netifapi_netif_set_link_down(n) netifapi_netif_common(n, netif_set_link_down, NULL) + +/** + * @defgroup netifapi_dhcp4 DHCPv4 + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_dhcp4 + * @see dhcp_start() + */ +#define netifapi_dhcp_start(n) netifapi_netif_common(n, NULL, dhcp_start) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_stop(n) netifapi_netif_common(n, dhcp_stop, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_inform() + */ +#define netifapi_dhcp_inform(n) netifapi_netif_common(n, dhcp_inform, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_renew() + */ +#define netifapi_dhcp_renew(n) netifapi_netif_common(n, NULL, dhcp_renew) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_release(n) netifapi_netif_common(n, NULL, dhcp_release) +/** @ingroup netifapi_dhcp4 + * @see dhcp_release_and_stop() + */ +#define netifapi_dhcp_release_and_stop(n) netifapi_netif_common(n, dhcp_release_and_stop, NULL) + +/** + * @defgroup netifapi_autoip AUTOIP + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_autoip + * @see autoip_start() + */ +#define netifapi_autoip_start(n) netifapi_netif_common(n, NULL, autoip_start) +/** @ingroup netifapi_autoip + * @see autoip_stop() + */ +#define netifapi_autoip_stop(n) netifapi_netif_common(n, NULL, autoip_stop) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETIF_API */ + +#endif /* LWIP_HDR_NETIFAPI_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h new file mode 100644 index 000000000..82c420c16 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h @@ -0,0 +1,3519 @@ +/** + * @file + * + * lwIP Options Configuration + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* + * NOTE: || defined __DOXYGEN__ is a workaround for doxygen bug - + * without this, doxygen does not see the actual #define + */ + +#if !defined LWIP_HDR_OPT_H +#define LWIP_HDR_OPT_H + +/* + * Include user defined options first. Anything not defined in these files + * will be set to standard values. Override anything you don't like! + */ +#include "lwipopts.h" +#include "lwip/debug.h" + +/** + * @defgroup lwip_opts Options (lwipopts.h) + * @ingroup lwip + * + * @defgroup lwip_opts_debug Debugging + * @ingroup lwip_opts + * + * @defgroup lwip_opts_infrastructure Infrastructure + * @ingroup lwip_opts + * + * @defgroup lwip_opts_callback Callback-style APIs + * @ingroup lwip_opts + * + * @defgroup lwip_opts_threadsafe_apis Thread-safe APIs + * @ingroup lwip_opts + */ + + /* + ------------------------------------ + -------------- NO SYS -------------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_nosys NO_SYS + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * NO_SYS==1: Use lwIP without OS-awareness (no thread, semaphores, mutexes or + * mboxes). This means threaded APIs cannot be used (socket, netconn, + * i.e. everything in the 'api' folder), only the callback-style raw API is + * available (and you have to watch out for yourself that you don't access + * lwIP functions/structures from more than one context at a time!) + */ +#if !defined NO_SYS || defined __DOXYGEN__ +#define NO_SYS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_timers Timers + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_TIMERS==0: Drop support for sys_timeout and lwip-internal cyclic timers. + * (the array of lwip-internal cyclic timers is still provided) + * (check NO_SYS_NO_TIMERS for compatibility to old versions) + */ +#if !defined LWIP_TIMERS || defined __DOXYGEN__ +#ifdef NO_SYS_NO_TIMERS +#define LWIP_TIMERS (!NO_SYS || (NO_SYS && !NO_SYS_NO_TIMERS)) +#else +#define LWIP_TIMERS 1 +#endif +#endif + +/** + * LWIP_TIMERS_CUSTOM==1: Provide your own timer implementation. + * Function prototypes in timeouts.h and the array of lwip-internal cyclic timers + * are still included, but the implementation is not. The following functions + * will be required: sys_timeouts_init(), sys_timeout(), sys_untimeout(), + * sys_timeouts_mbox_fetch() + */ +#if !defined LWIP_TIMERS_CUSTOM || defined __DOXYGEN__ +#define LWIP_TIMERS_CUSTOM 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_memcpy memcpy + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMCPY: override this if you have a faster implementation at hand than the + * one included in your C library + */ +#if !defined MEMCPY || defined __DOXYGEN__ +#define MEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * SMEMCPY: override this with care! Some compilers (e.g. gcc) can inline a + * call to memcpy() if the length is known at compile time and is small. + */ +#if !defined SMEMCPY || defined __DOXYGEN__ +#define SMEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * MEMMOVE: override this if you have a faster implementation at hand than the + * one included in your C library. lwIP currently uses MEMMOVE only when IPv6 + * fragmentation support is enabled. + */ +#if !defined MEMMOVE || defined __DOXYGEN__ +#define MEMMOVE(dst,src,len) memmove(dst,src,len) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ----------- Core locking ----------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_lock Core locking and MPU + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MPU_COMPATIBLE: enables special memory management mechanism + * which makes lwip able to work on MPU (Memory Protection Unit) system + * by not passing stack-pointers to other threads + * (this decreases performance as memory is allocated from pools instead + * of keeping it on the stack) + */ +#if !defined LWIP_MPU_COMPATIBLE || defined __DOXYGEN__ +#define LWIP_MPU_COMPATIBLE 0 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING + * Creates a global mutex that is held during TCPIP thread operations. + * Can be locked by client code to perform lwIP operations without changing + * into TCPIP thread using callbacks. See LOCK_TCPIP_CORE() and + * UNLOCK_TCPIP_CORE(). + * Your system should provide mutexes supporting priority inversion to use this. + */ +#if !defined LWIP_TCPIP_CORE_LOCKING || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING 1 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING_INPUT: when LWIP_TCPIP_CORE_LOCKING is enabled, + * this lets tcpip_input() grab the mutex for input packets as well, + * instead of allocating a message and passing it to tcpip_thread. + * + * ATTENTION: this does not work when tcpip_input() is called from + * interrupt context! + */ +#if !defined LWIP_TCPIP_CORE_LOCKING_INPUT || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING_INPUT 0 +#endif + +/** + * SYS_LIGHTWEIGHT_PROT==1: enable inter-task protection (and task-vs-interrupt + * protection) for certain critical regions during buffer allocation, deallocation + * and memory allocation and deallocation. + * ATTENTION: This is required when using lwIP from more than one context! If + * you disable this, you must be sure what you are doing! + */ +#if !defined SYS_LIGHTWEIGHT_PROT || defined __DOXYGEN__ +#define SYS_LIGHTWEIGHT_PROT 1 +#endif + +/** + * Macro/function to check whether lwIP's threading/locking + * requirements are satisfied during current function call. + * This macro usually calls a function that is implemented in the OS-dependent + * sys layer and performs the following checks: + * - Not in ISR (this should be checked for NO_SYS==1, too!) + * - If @ref LWIP_TCPIP_CORE_LOCKING = 1: TCPIP core lock is held + * - If @ref LWIP_TCPIP_CORE_LOCKING = 0: function is called from TCPIP thread + * @see @ref multithreading + */ +#if !defined LWIP_ASSERT_CORE_LOCKED || defined __DOXYGEN__ +#define LWIP_ASSERT_CORE_LOCKED() +#endif + +/** + * Called as first thing in the lwIP TCPIP thread. Can be used in conjunction + * with @ref LWIP_ASSERT_CORE_LOCKED to check core locking. + * @see @ref multithreading + */ +#if !defined LWIP_MARK_TCPIP_THREAD || defined __DOXYGEN__ +#define LWIP_MARK_TCPIP_THREAD() +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Memory options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_mem Heap and memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEM_LIBC_MALLOC==1: Use malloc/free/realloc provided by your C-library + * instead of the lwip internal allocator. Can save code size if you + * already use it. + */ +#if !defined MEM_LIBC_MALLOC || defined __DOXYGEN__ +#define MEM_LIBC_MALLOC 0 +#endif + +/** + * MEMP_MEM_MALLOC==1: Use mem_malloc/mem_free instead of the lwip pool allocator. + * Especially useful with MEM_LIBC_MALLOC but handle with care regarding execution + * speed (heap alloc can be much slower than pool alloc) and usage from interrupts + * (especially if your netif driver allocates PBUF_POOL pbufs for received frames + * from interrupt)! + * ATTENTION: Currently, this uses the heap for ALL pools (also for private pools, + * not only for internal pools defined in memp_std.h)! + */ +#if !defined MEMP_MEM_MALLOC || defined __DOXYGEN__ +#define MEMP_MEM_MALLOC 0 +#endif + +/** + * MEMP_MEM_INIT==1: Force use of memset to initialize pool memory. + * Useful if pool are moved in uninitialized section of memory. This will ensure + * default values in pcbs struct are well initialized in all conditions. + */ +#if !defined MEMP_MEM_INIT || defined __DOXYGEN__ +#define MEMP_MEM_INIT 0 +#endif + +/** + * MEM_ALIGNMENT: should be set to the alignment of the CPU + * 4 byte alignment -> \#define MEM_ALIGNMENT 4 + * 2 byte alignment -> \#define MEM_ALIGNMENT 2 + */ +#if !defined MEM_ALIGNMENT || defined __DOXYGEN__ +#define MEM_ALIGNMENT 1 +#endif + +/** + * MEM_SIZE: the size of the heap memory. If the application will send + * a lot of data that needs to be copied, this should be set high. + */ +#if !defined MEM_SIZE || defined __DOXYGEN__ +#define MEM_SIZE 1600 +#endif + +/** + * MEMP_OVERFLOW_CHECK: memp overflow protection reserves a configurable + * amount of bytes before and after each memp element in every pool and fills + * it with a prominent default value. + * MEMP_OVERFLOW_CHECK == 0 no checking + * MEMP_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEMP_OVERFLOW_CHECK >= 2 checks each element in every pool every time + * memp_malloc() or memp_free() is called (useful but slow!) + */ +#if !defined MEMP_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEMP_OVERFLOW_CHECK 0 +#endif + +/** + * MEMP_SANITY_CHECK==1: run a sanity check after each memp_free() to make + * sure that there are no cycles in the linked lists. + */ +#if !defined MEMP_SANITY_CHECK || defined __DOXYGEN__ +#define MEMP_SANITY_CHECK 0 +#endif + +/** + * MEM_OVERFLOW_CHECK: mem overflow protection reserves a configurable + * amount of bytes before and after each heap allocation chunk and fills + * it with a prominent default value. + * MEM_OVERFLOW_CHECK == 0 no checking + * MEM_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEM_OVERFLOW_CHECK >= 2 checks all heap elements every time + * mem_malloc() or mem_free() is called (useful but slow!) + */ +#if !defined MEM_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEM_OVERFLOW_CHECK 0 +#endif + +/** + * MEM_SANITY_CHECK==1: run a sanity check after each mem_free() to make + * sure that the linked list of heap elements is not corrupted. + */ +#if !defined MEM_SANITY_CHECK || defined __DOXYGEN__ +#define MEM_SANITY_CHECK 0 +#endif + +/** + * MEM_USE_POOLS==1: Use an alternative to malloc() by allocating from a set + * of memory pools of various sizes. When mem_malloc is called, an element of + * the smallest pool that can provide the length needed is returned. + * To use this, MEMP_USE_CUSTOM_POOLS also has to be enabled. + */ +#if !defined MEM_USE_POOLS || defined __DOXYGEN__ +#define MEM_USE_POOLS 0 +#endif + +/** + * MEM_USE_POOLS_TRY_BIGGER_POOL==1: if one malloc-pool is empty, try the next + * bigger pool - WARNING: THIS MIGHT WASTE MEMORY but it can make a system more + * reliable. */ +#if !defined MEM_USE_POOLS_TRY_BIGGER_POOL || defined __DOXYGEN__ +#define MEM_USE_POOLS_TRY_BIGGER_POOL 0 +#endif + +/** + * MEMP_USE_CUSTOM_POOLS==1: whether to include a user file lwippools.h + * that defines additional pools beyond the "standard" ones required + * by lwIP. If you set this to 1, you must have lwippools.h in your + * include path somewhere. + */ +#if !defined MEMP_USE_CUSTOM_POOLS || defined __DOXYGEN__ +#define MEMP_USE_CUSTOM_POOLS 0 +#endif + +/** + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. + * + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. + * + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: + * - pbuf_free_callback(p); + * - mem_free_callback(m); + */ +#if !defined LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT || defined __DOXYGEN__ +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Internal Memory Pool Sizes ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_memp Internal memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMP_NUM_PBUF: the number of memp struct pbufs (used for PBUF_ROM and PBUF_REF). + * If the application sends a lot of data out of ROM (or other static memory), + * this should be set high. + */ +#if !defined MEMP_NUM_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_PBUF 16 +#endif + +/** + * MEMP_NUM_RAW_PCB: Number of raw connection PCBs + * (requires the LWIP_RAW option) + */ +#if !defined MEMP_NUM_RAW_PCB || defined __DOXYGEN__ +#define MEMP_NUM_RAW_PCB 4 +#endif + +/** + * MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One + * per active UDP "connection". + * (requires the LWIP_UDP option) + */ +#if !defined MEMP_NUM_UDP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_UDP_PCB 4 +#endif + +/** + * MEMP_NUM_TCP_PCB: the number of simultaneously active TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB 5 +#endif + +/** + * MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB_LISTEN || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB_LISTEN 8 +#endif + +/** + * MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP segments. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_SEG || defined __DOXYGEN__ +#define MEMP_NUM_TCP_SEG 16 +#endif + +/** + * MEMP_NUM_ALTCP_PCB: the number of simultaneously active altcp layer pcbs. + * (requires the LWIP_ALTCP option) + * Connections with multiple layers require more than one altcp_pcb (e.g. TLS + * over TCP requires 2 altcp_pcbs, one for TLS and one for TCP). + */ +#if !defined MEMP_NUM_ALTCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_ALTCP_PCB MEMP_NUM_TCP_PCB +#endif + +/** + * MEMP_NUM_REASSDATA: the number of IP packets simultaneously queued for + * reassembly (whole packets, not fragments!) + */ +#if !defined MEMP_NUM_REASSDATA || defined __DOXYGEN__ +#define MEMP_NUM_REASSDATA 5 +#endif + +/** + * MEMP_NUM_FRAG_PBUF: the number of IP fragments simultaneously sent + * (fragments, not whole packets!). + * This is only used with LWIP_NETIF_TX_SINGLE_PBUF==0 and only has to be > 1 + * with DMA-enabled MACs where the packet is not yet sent when netif->output + * returns. + */ +#if !defined MEMP_NUM_FRAG_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_FRAG_PBUF 15 +#endif + +/** + * MEMP_NUM_ARP_QUEUE: the number of simultaneously queued outgoing + * packets (pbufs) that are waiting for an ARP request (to resolve + * their destination address) to finish. + * (requires the ARP_QUEUEING option) + */ +#if !defined MEMP_NUM_ARP_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ARP_QUEUE 30 +#endif + +/** + * MEMP_NUM_IGMP_GROUP: The number of multicast groups whose network interfaces + * can be members at the same time (one per netif - allsystems group -, plus one + * per netif membership). + * (requires the LWIP_IGMP option) + */ +#if !defined MEMP_NUM_IGMP_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_IGMP_GROUP 8 +#endif + +/** + * The number of sys timeouts used by the core stack (not apps) + * The default number of timeouts is calculated here for all enabled modules. + */ +#define LWIP_NUM_SYS_TIMEOUT_INTERNAL (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_NUM_TIMEOUTS + (LWIP_IPV6 * (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD))) + +/** + * MEMP_NUM_SYS_TIMEOUT: the number of simultaneously active timeouts. + * The default number of timeouts is calculated here for all enabled modules. + * The formula expects settings to be either '0' or '1'. + */ +#if !defined MEMP_NUM_SYS_TIMEOUT || defined __DOXYGEN__ +#define MEMP_NUM_SYS_TIMEOUT LWIP_NUM_SYS_TIMEOUT_INTERNAL +#endif + +/** + * MEMP_NUM_NETBUF: the number of struct netbufs. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETBUF || defined __DOXYGEN__ +#define MEMP_NUM_NETBUF 2 +#endif + +/** + * MEMP_NUM_NETCONN: the number of struct netconns. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETCONN || defined __DOXYGEN__ +#define MEMP_NUM_NETCONN 4 +#endif + +/** + * MEMP_NUM_SELECT_CB: the number of struct lwip_select_cb. + * (Only needed if you have LWIP_MPU_COMPATIBLE==1 and use the socket API. + * In that case, you need one per thread calling lwip_select.) + */ +#if !defined MEMP_NUM_SELECT_CB || defined __DOXYGEN__ +#define MEMP_NUM_SELECT_CB 4 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_API: the number of struct tcpip_msg, which are used + * for callback/timeout API communication. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_API || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_API 8 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_INPKT: the number of struct tcpip_msg, which are used + * for incoming packets. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_INPKT || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_INPKT 8 +#endif + +/** + * MEMP_NUM_NETDB: the number of concurrently running lwip_addrinfo() calls + * (before freeing the corresponding memory using lwip_freeaddrinfo()). + */ +#if !defined MEMP_NUM_NETDB || defined __DOXYGEN__ +#define MEMP_NUM_NETDB 1 +#endif + +/** + * MEMP_NUM_LOCALHOSTLIST: the number of host entries in the local host list + * if DNS_LOCAL_HOSTLIST_IS_DYNAMIC==1. + */ +#if !defined MEMP_NUM_LOCALHOSTLIST || defined __DOXYGEN__ +#define MEMP_NUM_LOCALHOSTLIST 1 +#endif + +/** + * PBUF_POOL_SIZE: the number of buffers in the pbuf pool. + */ +#if !defined PBUF_POOL_SIZE || defined __DOXYGEN__ +#define PBUF_POOL_SIZE 16 +#endif + +/** MEMP_NUM_API_MSG: the number of concurrently active calls to various + * socket, netconn, and tcpip functions + */ +#if !defined MEMP_NUM_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_DNS_API_MSG: the number of concurrently active calls to netconn_gethostbyname + */ +#if !defined MEMP_NUM_DNS_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_DNS_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA: the number of concurrently active calls + * to getsockopt/setsockopt + */ +#if !defined MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA || defined __DOXYGEN__ +#define MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_NETIFAPI_MSG: the number of concurrently active calls to the + * netifapi functions + */ +#if !defined MEMP_NUM_NETIFAPI_MSG || defined __DOXYGEN__ +#define MEMP_NUM_NETIFAPI_MSG MEMP_NUM_TCPIP_MSG_API +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- ARP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_arp ARP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ARP==1: Enable ARP functionality. + */ +#if !defined LWIP_ARP || defined __DOXYGEN__ +#define LWIP_ARP 1 +#endif + +/** + * ARP_TABLE_SIZE: Number of active MAC-IP address pairs cached. + */ +#if !defined ARP_TABLE_SIZE || defined __DOXYGEN__ +#define ARP_TABLE_SIZE 10 +#endif + +/** the time an ARP entry stays valid after its last update, + * for ARP_TMR_INTERVAL = 1000, this is + * (60 * 5) seconds = 5 minutes. + */ +#if !defined ARP_MAXAGE || defined __DOXYGEN__ +#define ARP_MAXAGE 300 +#endif + +/** + * ARP_QUEUEING==1: Multiple outgoing packets are queued during hardware address + * resolution. By default, only the most recent packet is queued per IP address. + * This is sufficient for most protocols and mainly reduces TCP connection + * startup time. Set this to 1 if you know your application sends more than one + * packet in a row to an IP address that is not in the ARP cache. + */ +#if !defined ARP_QUEUEING || defined __DOXYGEN__ +#define ARP_QUEUEING 0 +#endif + +/** The maximum number of packets which may be queued for each + * unresolved address by other network layers. Defaults to 3, 0 means disabled. + * Old packets are dropped, new packets are queued. + */ +#if !defined ARP_QUEUE_LEN || defined __DOXYGEN__ +#define ARP_QUEUE_LEN 3 +#endif + +/** + * ETHARP_SUPPORT_VLAN==1: support receiving and sending ethernet packets with + * VLAN header. See the description of LWIP_HOOK_VLAN_CHECK and + * LWIP_HOOK_VLAN_SET hooks to check/set VLAN headers. + * Additionally, you can define ETHARP_VLAN_CHECK to an u16_t VLAN ID to check. + * If ETHARP_VLAN_CHECK is defined, only VLAN-traffic for this VLAN is accepted. + * If ETHARP_VLAN_CHECK is not defined, all traffic is accepted. + * Alternatively, define a function/define ETHARP_VLAN_CHECK_FN(eth_hdr, vlan) + * that returns 1 to accept a packet or 0 to drop a packet. + */ +#if !defined ETHARP_SUPPORT_VLAN || defined __DOXYGEN__ +#define ETHARP_SUPPORT_VLAN 0 +#endif + +/** LWIP_ETHERNET==1: enable ethernet support even though ARP might be disabled + */ +#if !defined LWIP_ETHERNET || defined __DOXYGEN__ +#define LWIP_ETHERNET LWIP_ARP +#endif + +/** ETH_PAD_SIZE: number of bytes added before the ethernet header to ensure + * alignment of payload after that header. Since the header is 14 bytes long, + * without this padding e.g. addresses in the IP header will not be aligned + * on a 32-bit boundary, so setting this to 2 can speed up 32-bit-platforms. + */ +#if !defined ETH_PAD_SIZE || defined __DOXYGEN__ +#define ETH_PAD_SIZE 0 +#endif + +/** ETHARP_SUPPORT_STATIC_ENTRIES==1: enable code to support static ARP table + * entries (using etharp_add_static_entry/etharp_remove_static_entry). + */ +#if !defined ETHARP_SUPPORT_STATIC_ENTRIES || defined __DOXYGEN__ +#define ETHARP_SUPPORT_STATIC_ENTRIES 0 +#endif + +/** ETHARP_TABLE_MATCH_NETIF==1: Match netif for ARP table entries. + * If disabled, duplicate IP address on multiple netifs are not supported + * (but this should only occur for AutoIP). + */ +#if !defined ETHARP_TABLE_MATCH_NETIF || defined __DOXYGEN__ +#define ETHARP_TABLE_MATCH_NETIF !LWIP_SINGLE_NETIF +#endif +/** + * @} + */ + +/* + -------------------------------- + ---------- IP options ---------- + -------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv4 IPv4 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV4==1: Enable IPv4 + */ +#if !defined LWIP_IPV4 || defined __DOXYGEN__ +#define LWIP_IPV4 1 +#endif + +/** + * IP_FORWARD==1: Enables the ability to forward IP packets across network + * interfaces. If you are going to run lwIP on a device with only one network + * interface, define this to 0. + */ +#if !defined IP_FORWARD || defined __DOXYGEN__ +#define IP_FORWARD 0 +#endif + +/** + * IP_REASSEMBLY==1: Reassemble incoming fragmented IP packets. Note that + * this option does not affect outgoing packet sizes, which can be controlled + * via IP_FRAG. + */ +#if !defined IP_REASSEMBLY || defined __DOXYGEN__ +#define IP_REASSEMBLY 1 +#endif + +/** + * IP_FRAG==1: Fragment outgoing IP packets if their size exceeds MTU. Note + * that this option does not affect incoming packet sizes, which can be + * controlled via IP_REASSEMBLY. + */ +#if !defined IP_FRAG || defined __DOXYGEN__ +#define IP_FRAG 1 +#endif + +#if !LWIP_IPV4 +/* disable IPv4 extensions when IPv4 is disabled */ +#undef IP_FORWARD +#define IP_FORWARD 0 +#undef IP_REASSEMBLY +#define IP_REASSEMBLY 0 +#undef IP_FRAG +#define IP_FRAG 0 +#endif /* !LWIP_IPV4 */ + +/** + * IP_OPTIONS_ALLOWED: Defines the behavior for IP options. + * IP_OPTIONS_ALLOWED==0: All packets with IP options are dropped. + * IP_OPTIONS_ALLOWED==1: IP options are allowed (but not parsed). + */ +#if !defined IP_OPTIONS_ALLOWED || defined __DOXYGEN__ +#define IP_OPTIONS_ALLOWED 1 +#endif + +/** + * IP_REASS_MAXAGE: Maximum time (in multiples of IP_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IP_REASS_MAXAGE || defined __DOXYGEN__ +#define IP_REASS_MAXAGE 15 +#endif + +/** + * IP_REASS_MAX_PBUFS: Total maximum amount of pbufs waiting to be reassembled. + * Since the received pbufs are enqueued, be sure to configure + * PBUF_POOL_SIZE > IP_REASS_MAX_PBUFS so that the stack is still able to receive + * packets even if the maximum amount of fragments is enqueued for reassembly! + * When IPv4 *and* IPv6 are enabled, this even changes to + * (PBUF_POOL_SIZE > 2 * IP_REASS_MAX_PBUFS)! + */ +#if !defined IP_REASS_MAX_PBUFS || defined __DOXYGEN__ +#define IP_REASS_MAX_PBUFS 10 +#endif + +/** + * IP_DEFAULT_TTL: Default value for Time-To-Live used by transport layers. + */ +#if !defined IP_DEFAULT_TTL || defined __DOXYGEN__ +#define IP_DEFAULT_TTL 255 +#endif + +/** + * IP_SOF_BROADCAST=1: Use the SOF_BROADCAST field to enable broadcast + * filter per pcb on udp and raw send operations. To enable broadcast filter + * on recv operations, you also have to set IP_SOF_BROADCAST_RECV=1. + */ +#if !defined IP_SOF_BROADCAST || defined __DOXYGEN__ +#define IP_SOF_BROADCAST 0 +#endif + +/** + * IP_SOF_BROADCAST_RECV (requires IP_SOF_BROADCAST=1) enable the broadcast + * filter on recv operations. + */ +#if !defined IP_SOF_BROADCAST_RECV || defined __DOXYGEN__ +#define IP_SOF_BROADCAST_RECV 0 +#endif + +/** + * IP_FORWARD_ALLOW_TX_ON_RX_NETIF==1: allow ip_forward() to send packets back + * out on the netif where it was received. This should only be used for + * wireless networks. + * ATTENTION: When this is 1, make sure your netif driver correctly marks incoming + * link-layer-broadcast/multicast packets as such using the corresponding pbuf flags! + */ +#if !defined IP_FORWARD_ALLOW_TX_ON_RX_NETIF || defined __DOXYGEN__ +#define IP_FORWARD_ALLOW_TX_ON_RX_NETIF 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- ICMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_icmp ICMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ICMP==1: Enable ICMP module inside the IP stack. + * Be careful, disable that make your product non-compliant to RFC1122 + */ +#if !defined LWIP_ICMP || defined __DOXYGEN__ +#define LWIP_ICMP 1 +#endif + +/** + * ICMP_TTL: Default value for Time-To-Live used by ICMP packets. + */ +#if !defined ICMP_TTL || defined __DOXYGEN__ +#define ICMP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only) + */ +#if !defined LWIP_BROADCAST_PING || defined __DOXYGEN__ +#define LWIP_BROADCAST_PING 0 +#endif + +/** + * LWIP_MULTICAST_PING==1: respond to multicast pings (default is unicast only) + */ +#if !defined LWIP_MULTICAST_PING || defined __DOXYGEN__ +#define LWIP_MULTICAST_PING 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- RAW options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_raw RAW + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined LWIP_RAW || defined __DOXYGEN__ +#define LWIP_RAW 0 +#endif + +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined RAW_TTL || defined __DOXYGEN__ +#define RAW_TTL IP_DEFAULT_TTL +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dhcp DHCP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_DHCP==1: Enable DHCP module. + */ +#if !defined LWIP_DHCP || defined __DOXYGEN__ +#define LWIP_DHCP 0 +#endif +#if !LWIP_IPV4 +/* disable DHCP when IPv4 is disabled */ +#undef LWIP_DHCP +#define LWIP_DHCP 0 +#endif /* !LWIP_IPV4 */ + +/** + * DHCP_DOES_ARP_CHECK==1: Do an ARP check on the offered address. + */ +#if !defined DHCP_DOES_ARP_CHECK || defined __DOXYGEN__ +#define DHCP_DOES_ARP_CHECK (LWIP_DHCP && LWIP_ARP) +#endif + +/** + * LWIP_DHCP_BOOTP_FILE==1: Store offered_si_addr and boot_file_name. + */ +#if !defined LWIP_DHCP_BOOTP_FILE || defined __DOXYGEN__ +#define LWIP_DHCP_BOOTP_FILE 0 +#endif + +/** + * LWIP_DHCP_GETS_NTP==1: Request NTP servers with discover/select. For each + * response packet, an callback is called, which has to be provided by the port: + * void dhcp_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP_MAX_DNS_SERVERS > 0: Request DNS servers with discover/select. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- AUTOIP options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_autoip AUTOIP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_AUTOIP==1: Enable AUTOIP module. + */ +#if !defined LWIP_AUTOIP || defined __DOXYGEN__ +#define LWIP_AUTOIP 0 +#endif +#if !LWIP_IPV4 +/* disable AUTOIP when IPv4 is disabled */ +#undef LWIP_AUTOIP +#define LWIP_AUTOIP 0 +#endif /* !LWIP_IPV4 */ + +/** + * LWIP_DHCP_AUTOIP_COOP==1: Allow DHCP and AUTOIP to be both enabled on + * the same interface at the same time. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP 0 +#endif + +/** + * LWIP_DHCP_AUTOIP_COOP_TRIES: Set to the number of DHCP DISCOVER probes + * that should be sent before falling back on AUTOIP (the DHCP client keeps + * running in this case). This can be set as low as 1 to get an AutoIP address + * very quickly, but you should be prepared to handle a changing IP address + * when DHCP overrides AutoIP. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP_TRIES || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP_TRIES 9 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- SNMP MIB2 support ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_mib2 SNMP MIB2 callbacks + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MIB2_CALLBACKS==1: Turn on SNMP MIB2 callbacks. + * Turn this on to get callbacks needed to implement MIB2. + * Usually MIB2_STATS should be enabled, too. + */ +#if !defined LWIP_MIB2_CALLBACKS || defined __DOXYGEN__ +#define LWIP_MIB2_CALLBACKS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + -------- Multicast options ------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_multicast Multicast + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MULTICAST_TX_OPTIONS==1: Enable multicast TX support like the socket options + * IP_MULTICAST_TTL/IP_MULTICAST_IF/IP_MULTICAST_LOOP, as well as (currently only) + * core support for the corresponding IPv6 options. + */ +#if !defined LWIP_MULTICAST_TX_OPTIONS || defined __DOXYGEN__ +#define LWIP_MULTICAST_TX_OPTIONS ((LWIP_IGMP || LWIP_IPV6_MLD) && (LWIP_UDP || LWIP_RAW)) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- IGMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_igmp IGMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_IGMP==1: Turn on IGMP module. + */ +#if !defined LWIP_IGMP || defined __DOXYGEN__ +#define LWIP_IGMP 0 +#endif +#if !LWIP_IPV4 +#undef LWIP_IGMP +#define LWIP_IGMP 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DNS options ----------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dns DNS + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_DNS==1: Turn on DNS module. UDP must be available for DNS + * transport. + */ +#if !defined LWIP_DNS || defined __DOXYGEN__ +#define LWIP_DNS 0 +#endif + +/** DNS maximum number of entries to maintain locally. */ +#if !defined DNS_TABLE_SIZE || defined __DOXYGEN__ +#define DNS_TABLE_SIZE 4 +#endif + +/** DNS maximum host name length supported in the name table. */ +#if !defined DNS_MAX_NAME_LENGTH || defined __DOXYGEN__ +#define DNS_MAX_NAME_LENGTH 256 +#endif + +/** The maximum of DNS servers + * The first server can be initialized automatically by defining + * DNS_SERVER_ADDRESS(ipaddr), where 'ipaddr' is an 'ip_addr_t*' + */ +#if !defined DNS_MAX_SERVERS || defined __DOXYGEN__ +#define DNS_MAX_SERVERS 2 +#endif + +/** DNS maximum number of retries when asking for a name, before "timeout". */ +#if !defined DNS_MAX_RETRIES || defined __DOXYGEN__ +#define DNS_MAX_RETRIES 4 +#endif + +/** DNS do a name checking between the query and the response. */ +#if !defined DNS_DOES_NAME_CHECK || defined __DOXYGEN__ +#define DNS_DOES_NAME_CHECK 1 +#endif + +/** LWIP_DNS_SECURE: controls the security level of the DNS implementation + * Use all DNS security features by default. + * This is overridable but should only be needed by very small targets + * or when using against non standard DNS servers. */ +#if !defined LWIP_DNS_SECURE || defined __DOXYGEN__ +#define LWIP_DNS_SECURE (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) +#endif + +/* A list of DNS security features follows */ +#define LWIP_DNS_SECURE_RAND_XID 1 +#define LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING 2 +#define LWIP_DNS_SECURE_RAND_SRC_PORT 4 + +/** DNS_LOCAL_HOSTLIST: Implements a local host-to-address list. If enabled, you have to define an initializer: + * \#define DNS_LOCAL_HOSTLIST_INIT {DNS_LOCAL_HOSTLIST_ELEM("host_ip4", IPADDR4_INIT_BYTES(1,2,3,4)), \ + * DNS_LOCAL_HOSTLIST_ELEM("host_ip6", IPADDR6_INIT_HOST(123, 234, 345, 456)} + * + * Instead, you can also use an external function: + * \#define DNS_LOOKUP_LOCAL_EXTERN(x) extern err_t my_lookup_function(const char *name, ip_addr_t *addr, u8_t dns_addrtype) + * that looks up the IP address and returns ERR_OK if found (LWIP_DNS_ADDRTYPE_xxx is passed in dns_addrtype). + */ +#if !defined DNS_LOCAL_HOSTLIST || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST 0 +#endif /* DNS_LOCAL_HOSTLIST */ + +/** If this is turned on, the local host-list can be dynamically changed + * at runtime. */ +#if !defined DNS_LOCAL_HOSTLIST_IS_DYNAMIC || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST_IS_DYNAMIC 0 +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Set this to 1 to enable querying ".local" names via mDNS + * using a One-Shot Multicast DNS Query */ +#if !defined LWIP_DNS_SUPPORT_MDNS_QUERIES || defined __DOXYGEN__ +#define LWIP_DNS_SUPPORT_MDNS_QUERIES 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- UDP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_udp UDP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_UDP==1: Turn on UDP. + */ +#if !defined LWIP_UDP || defined __DOXYGEN__ +#define LWIP_UDP 1 +#endif + +/** + * LWIP_UDPLITE==1: Turn on UDP-Lite. (Requires LWIP_UDP) + */ +#if !defined LWIP_UDPLITE || defined __DOXYGEN__ +#define LWIP_UDPLITE 0 +#endif + +/** + * UDP_TTL: Default Time-To-Live value. + */ +#if !defined UDP_TTL || defined __DOXYGEN__ +#define UDP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_NETBUF_RECVINFO==1: append destination addr and port to every netbuf. + */ +#if !defined LWIP_NETBUF_RECVINFO || defined __DOXYGEN__ +#define LWIP_NETBUF_RECVINFO 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- TCP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_tcp TCP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_TCP==1: Turn on TCP. + */ +#if !defined LWIP_TCP || defined __DOXYGEN__ +#define LWIP_TCP 1 +#endif + +/** + * TCP_TTL: Default Time-To-Live value. + */ +#if !defined TCP_TTL || defined __DOXYGEN__ +#define TCP_TTL IP_DEFAULT_TTL +#endif + +/** + * TCP_WND: The size of a TCP window. This must be at least + * (2 * TCP_MSS) for things to work well. + * ATTENTION: when using TCP_RCV_SCALE, TCP_WND is the total size + * with scaling applied. Maximum window value in the TCP header + * will be TCP_WND >> TCP_RCV_SCALE + */ +#if !defined TCP_WND || defined __DOXYGEN__ +#define TCP_WND (4 * TCP_MSS) +#endif + +/** + * TCP_MAXRTX: Maximum number of retransmissions of data segments. + */ +#if !defined TCP_MAXRTX || defined __DOXYGEN__ +#define TCP_MAXRTX 12 +#endif + +/** + * TCP_SYNMAXRTX: Maximum number of retransmissions of SYN segments. + */ +#if !defined TCP_SYNMAXRTX || defined __DOXYGEN__ +#define TCP_SYNMAXRTX 6 +#endif + +/** + * TCP_QUEUE_OOSEQ==1: TCP will queue segments that arrive out of order. + * Define to 0 if your device is low on memory. + */ +#if !defined TCP_QUEUE_OOSEQ || defined __DOXYGEN__ +#define TCP_QUEUE_OOSEQ LWIP_TCP +#endif + +/** + * LWIP_TCP_SACK_OUT==1: TCP will support sending selective acknowledgements (SACKs). + */ +#if !defined LWIP_TCP_SACK_OUT || defined __DOXYGEN__ +#define LWIP_TCP_SACK_OUT 0 +#endif + +/** + * LWIP_TCP_MAX_SACK_NUM: The maximum number of SACK values to include in TCP segments. + * Must be at least 1, but is only used if LWIP_TCP_SACK_OUT is enabled. + * NOTE: Even though we never send more than 3 or 4 SACK ranges in a single segment + * (depending on other options), setting this option to values greater than 4 is not pointless. + * This is basically the max number of SACK ranges we want to keep track of. + * As new data is delivered, some of the SACK ranges may be removed or merged. + * In that case some of those older SACK ranges may be used again. + * The amount of memory used to store SACK ranges is LWIP_TCP_MAX_SACK_NUM * 8 bytes for each TCP PCB. + */ +#if !defined LWIP_TCP_MAX_SACK_NUM || defined __DOXYGEN__ +#define LWIP_TCP_MAX_SACK_NUM 4 +#endif + +/** + * TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default, + * you might want to increase this.) + * For the receive side, this MSS is advertised to the remote side + * when opening a connection. For the transmit size, this MSS sets + * an upper limit on the MSS advertised by the remote host. + */ +#if !defined TCP_MSS || defined __DOXYGEN__ +#define TCP_MSS 536 +#endif + +/** + * TCP_CALCULATE_EFF_SEND_MSS: "The maximum size of a segment that TCP really + * sends, the 'effective send MSS,' MUST be the smaller of the send MSS (which + * reflects the available reassembly buffer size at the remote host) and the + * largest size permitted by the IP layer" (RFC 1122) + * Setting this to 1 enables code that checks TCP_MSS against the MTU of the + * netif used for a connection and limits the MSS if it would be too big otherwise. + */ +#if !defined TCP_CALCULATE_EFF_SEND_MSS || defined __DOXYGEN__ +#define TCP_CALCULATE_EFF_SEND_MSS 1 +#endif + + +/** + * TCP_SND_BUF: TCP sender buffer space (bytes). + * To achieve good performance, this should be at least 2 * TCP_MSS. + */ +#if !defined TCP_SND_BUF || defined __DOXYGEN__ +#define TCP_SND_BUF (2 * TCP_MSS) +#endif + +/** + * TCP_SND_QUEUELEN: TCP sender buffer space (pbufs). This must be at least + * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. + */ +#if !defined TCP_SND_QUEUELEN || defined __DOXYGEN__ +#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#endif + +/** + * TCP_SNDLOWAT: TCP writable space (bytes). This must be less than + * TCP_SND_BUF. It is the amount of space which must be available in the + * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). + */ +#if !defined TCP_SNDLOWAT || defined __DOXYGEN__ +#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#endif + +/** + * TCP_SNDQUEUELOWAT: TCP writable bufs (pbuf count). This must be less + * than TCP_SND_QUEUELEN. If the number of pbufs queued on a pcb drops below + * this number, select returns writable (combined with TCP_SNDLOWAT). + */ +#if !defined TCP_SNDQUEUELOWAT || defined __DOXYGEN__ +#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#endif + +/** + * TCP_OOSEQ_MAX_BYTES: The default maximum number of bytes queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_BYTES || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_BYTES 0 +#endif + +/** + * TCP_OOSEQ_BYTES_LIMIT(pcb): Return the maximum number of bytes to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_BYTES==1. + * Use this to override TCP_OOSEQ_MAX_BYTES to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_BYTES_LIMIT +#if TCP_OOSEQ_MAX_BYTES +#define TCP_OOSEQ_BYTES_LIMIT(pcb) TCP_OOSEQ_MAX_BYTES +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_BYTES_LIMIT(pcb) +#endif +#endif + +/** + * TCP_OOSEQ_MAX_PBUFS: The default maximum number of pbufs queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_PBUFS || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_PBUFS 0 +#endif + +/** + * TCP_OOSEQ_PBUFS_LIMIT(pcb): Return the maximum number of pbufs to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_PBUFS==1. + * Use this to override TCP_OOSEQ_MAX_PBUFS to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_PBUFS_LIMIT +#if TCP_OOSEQ_MAX_PBUFS +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) TCP_OOSEQ_MAX_PBUFS +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) +#endif +#endif + +/** + * TCP_LISTEN_BACKLOG: Enable the backlog option for tcp listen pcb. + */ +#if !defined TCP_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_LISTEN_BACKLOG 0 +#endif + +/** + * The maximum allowed backlog for TCP listen netconns. + * This backlog is used unless another is explicitly specified. + * 0xff is the maximum (u8_t). + */ +#if !defined TCP_DEFAULT_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_DEFAULT_LISTEN_BACKLOG 0xff +#endif + +/** + * TCP_OVERSIZE: The maximum number of bytes that tcp_write may + * allocate ahead of time in an attempt to create shorter pbuf chains + * for transmission. The meaningful range is 0 to TCP_MSS. Some + * suggested values are: + * + * 0: Disable oversized allocation. Each tcp_write() allocates a new + pbuf (old behaviour). + * 1: Allocate size-aligned pbufs with minimal excess. Use this if your + * scatter-gather DMA requires aligned fragments. + * 128: Limit the pbuf/memory overhead to 20%. + * TCP_MSS: Try to create unfragmented TCP packets. + * TCP_MSS/4: Try to create 4 fragments or less per TCP packet. + */ +#if !defined TCP_OVERSIZE || defined __DOXYGEN__ +#define TCP_OVERSIZE TCP_MSS +#endif + +/** + * LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option. + * The timestamp option is currently only used to help remote hosts, it is not + * really used locally. Therefore, it is only enabled when a TS option is + * received in the initial SYN packet from a remote host. + */ +#if !defined LWIP_TCP_TIMESTAMPS || defined __DOXYGEN__ +#define LWIP_TCP_TIMESTAMPS 0 +#endif + +/** + * TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an + * explicit window update + */ +#if !defined TCP_WND_UPDATE_THRESHOLD || defined __DOXYGEN__ +#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#endif + +/** + * LWIP_EVENT_API and LWIP_CALLBACK_API: Only one of these should be set to 1. + * LWIP_EVENT_API==1: The user defines lwip_tcp_event() to receive all + * events (accept, sent, etc) that happen in the system. + * LWIP_CALLBACK_API==1: The PCB callback function is called directly + * for the event. This is the default. + */ +#if !defined(LWIP_EVENT_API) && !defined(LWIP_CALLBACK_API) || defined __DOXYGEN__ +#define LWIP_EVENT_API 0 +#define LWIP_CALLBACK_API 1 +#else +#ifndef LWIP_EVENT_API +#define LWIP_EVENT_API 0 +#endif +#ifndef LWIP_CALLBACK_API +#define LWIP_CALLBACK_API 0 +#endif +#endif + +/** + * LWIP_WND_SCALE and TCP_RCV_SCALE: + * Set LWIP_WND_SCALE to 1 to enable window scaling. + * Set TCP_RCV_SCALE to the desired scaling factor (shift count in the + * range of [0..14]). + * When LWIP_WND_SCALE is enabled but TCP_RCV_SCALE is 0, we can use a large + * send window while having a small receive window only. + */ +#if !defined LWIP_WND_SCALE || defined __DOXYGEN__ +#define LWIP_WND_SCALE 0 +#define TCP_RCV_SCALE 0 +#endif + +/** + * LWIP_TCP_PCB_NUM_EXT_ARGS: + * When this is > 0, every tcp pcb (including listen pcb) includes a number of + * additional argument entries in an array (see tcp_ext_arg_alloc_id) + */ +#if !defined LWIP_TCP_PCB_NUM_EXT_ARGS || defined __DOXYGEN__ +#define LWIP_TCP_PCB_NUM_EXT_ARGS 0 +#endif + +/** LWIP_ALTCP==1: enable the altcp API. + * altcp is an abstraction layer that prevents applications linking against the + * tcp.h functions but provides the same functionality. It is used to e.g. add + * SSL/TLS or proxy-connect support to an application written for the tcp callback + * API without that application knowing the protocol details. + * + * With LWIP_ALTCP==0, applications written against the altcp API can still be + * compiled but are directly linked against the tcp.h callback API and then + * cannot use layered protocols. + * + * See @ref altcp_api + */ +#if !defined LWIP_ALTCP || defined __DOXYGEN__ +#define LWIP_ALTCP 0 +#endif + +/** LWIP_ALTCP_TLS==1: enable TLS support for altcp API. + * This needs a port of the functions in altcp_tls.h to a TLS library. + * A port to ARM mbedtls is provided with lwIP, see apps/altcp_tls/ directory + * and LWIP_ALTCP_TLS_MBEDTLS option. + */ +#if !defined LWIP_ALTCP_TLS || defined __DOXYGEN__ +#define LWIP_ALTCP_TLS 0 +#endif + +/** + * @} + */ + +/* + ---------------------------------- + ---------- Pbuf options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_pbuf PBUF + * @ingroup lwip_opts + * @{ + */ +/** + * PBUF_LINK_HLEN: the number of bytes that should be allocated for a + * link level header. The default is 14, the standard value for + * Ethernet. + */ +#if !defined PBUF_LINK_HLEN || defined __DOXYGEN__ +#if defined LWIP_HOOK_VLAN_SET && !defined __DOXYGEN__ +#define PBUF_LINK_HLEN (18 + ETH_PAD_SIZE) +#else /* LWIP_HOOK_VLAN_SET */ +#define PBUF_LINK_HLEN (14 + ETH_PAD_SIZE) +#endif /* LWIP_HOOK_VLAN_SET */ +#endif + +/** + * PBUF_LINK_ENCAPSULATION_HLEN: the number of bytes that should be allocated + * for an additional encapsulation header before ethernet headers (e.g. 802.11) + */ +#if !defined PBUF_LINK_ENCAPSULATION_HLEN || defined __DOXYGEN__ +#define PBUF_LINK_ENCAPSULATION_HLEN 0 +#endif + +/** + * PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. The default is + * designed to accommodate single full size TCP frame in one pbuf, including + * TCP_MSS, IP header, and link header. + */ +#if !defined PBUF_POOL_BUFSIZE || defined __DOXYGEN__ +#define PBUF_POOL_BUFSIZE LWIP_MEM_ALIGN_SIZE(TCP_MSS+40+PBUF_LINK_ENCAPSULATION_HLEN+PBUF_LINK_HLEN) +#endif + +/** + * LWIP_PBUF_REF_T: Refcount type in pbuf. + * Default width of u8_t can be increased if 255 refs are not enough for you. + */ +#if !defined LWIP_PBUF_REF_T || defined __DOXYGEN__ +#define LWIP_PBUF_REF_T u8_t +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Network Interfaces options ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_netif NETIF + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_SINGLE_NETIF==1: use a single netif only. This is the common case for + * small real-life targets. Some code like routing etc. can be left out. + */ +#if !defined LWIP_SINGLE_NETIF || defined __DOXYGEN__ +#define LWIP_SINGLE_NETIF 0 +#endif + +/** + * LWIP_NETIF_HOSTNAME==1: use DHCP_OPTION_HOSTNAME with netif's hostname + * field. + */ +#if !defined LWIP_NETIF_HOSTNAME || defined __DOXYGEN__ +#define LWIP_NETIF_HOSTNAME 0 +#endif + +/** + * LWIP_NETIF_API==1: Support netif api (in netifapi.c) + */ +#if !defined LWIP_NETIF_API || defined __DOXYGEN__ +#define LWIP_NETIF_API 0 +#endif + +/** + * LWIP_NETIF_STATUS_CALLBACK==1: Support a callback function whenever an interface + * changes its up/down status (i.e., due to DHCP IP acquisition) + */ +#if !defined LWIP_NETIF_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_EXT_STATUS_CALLBACK==1: Support an extended callback function + * for several netif related event that supports multiple subscribers. + * @see netif_ext_status_callback + */ +#if !defined LWIP_NETIF_EXT_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_EXT_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_LINK_CALLBACK==1: Support a callback function from an interface + * whenever the link changes (i.e., link down) + */ +#if !defined LWIP_NETIF_LINK_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LINK_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_REMOVE_CALLBACK==1: Support a callback function that is called + * when a netif has been removed + */ +#if !defined LWIP_NETIF_REMOVE_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_REMOVE_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_HWADDRHINT==1: Cache link-layer-address hints (e.g. table + * indices) in struct netif. TCP and UDP can make use of this to prevent + * scanning the ARP table for every sent packet. While this is faster for big + * ARP tables or many concurrent connections, it might be counterproductive + * if you have a tiny ARP table or if there never are concurrent connections. + */ +#if !defined LWIP_NETIF_HWADDRHINT || defined __DOXYGEN__ +#define LWIP_NETIF_HWADDRHINT 0 +#endif + +/** + * LWIP_NETIF_TX_SINGLE_PBUF: if this is set to 1, lwIP *tries* to put all data + * to be sent into one single pbuf. This is for compatibility with DMA-enabled + * MACs that do not support scatter-gather. + * Beware that this might involve CPU-memcpy before transmitting that would not + * be needed without this flag! Use this only if you need to! + * + * ATTENTION: a driver should *NOT* rely on getting single pbufs but check TX + * pbufs for being in one piece. If not, @ref pbuf_clone can be used to get + * a single pbuf: + * if (p->next != NULL) { + * struct pbuf *q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + * if (q == NULL) { + * return ERR_MEM; + * } + * p = q; ATTENTION: do NOT free the old 'p' as the ref belongs to the caller! + * } + */ +#if !defined LWIP_NETIF_TX_SINGLE_PBUF || defined __DOXYGEN__ +#define LWIP_NETIF_TX_SINGLE_PBUF 0 +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * LWIP_NUM_NETIF_CLIENT_DATA: Number of clients that may store + * data in client_data member array of struct netif (max. 256). + */ +#if !defined LWIP_NUM_NETIF_CLIENT_DATA || defined __DOXYGEN__ +#define LWIP_NUM_NETIF_CLIENT_DATA 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- LOOPIF options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_loop Loopback interface + * @ingroup lwip_opts_netif + * @{ + */ +/** + * LWIP_HAVE_LOOPIF==1: Support loop interface (127.0.0.1). + * This is only needed when no real netifs are available. If at least one other + * netif is available, loopback traffic uses this netif. + */ +#if !defined LWIP_HAVE_LOOPIF || defined __DOXYGEN__ +#define LWIP_HAVE_LOOPIF (LWIP_NETIF_LOOPBACK && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_LOOPIF_MULTICAST==1: Support multicast/IGMP on loop interface (127.0.0.1). + */ +#if !defined LWIP_LOOPIF_MULTICAST || defined __DOXYGEN__ +#define LWIP_LOOPIF_MULTICAST 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP + * address equal to the netif IP address, looping them back up the stack. + */ +#if !defined LWIP_NETIF_LOOPBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK 0 +#endif + +/** + * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback + * sending for each netif (0 = disabled) + */ +#if !defined LWIP_LOOPBACK_MAX_PBUFS || defined __DOXYGEN__ +#define LWIP_LOOPBACK_MAX_PBUFS 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in + * the system, as netifs must change how they behave depending on this setting + * for the LWIP_NETIF_LOOPBACK option to work. + * Setting this is needed to avoid reentering non-reentrant functions like + * tcp_input(). + * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a + * multithreaded environment like tcpip.c. In this case, netif->input() + * is called directly. + * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup. + * The packets are put on a list and netif_poll() must be called in + * the main application loop. + */ +#if !defined LWIP_NETIF_LOOPBACK_MULTITHREADING || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Thread options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_thread Threading + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * TCPIP_THREAD_NAME: The name assigned to the main tcpip thread. + */ +#if !defined TCPIP_THREAD_NAME || defined __DOXYGEN__ +#define TCPIP_THREAD_NAME "tcpip_thread" +#endif + +/** + * TCPIP_THREAD_STACKSIZE: The stack size used by the main tcpip thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_STACKSIZE || defined __DOXYGEN__ +#define TCPIP_THREAD_STACKSIZE 0 +#endif + +/** + * TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_PRIO || defined __DOXYGEN__ +#define TCPIP_THREAD_PRIO 1 +#endif + +/** + * TCPIP_MBOX_SIZE: The mailbox size for the tcpip thread messages + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when tcpip_init is called. + */ +#if !defined TCPIP_MBOX_SIZE || defined __DOXYGEN__ +#define TCPIP_MBOX_SIZE 0 +#endif + +/** + * Define this to something that triggers a watchdog. This is called from + * tcpip_thread after processing a message. + */ +#if !defined LWIP_TCPIP_THREAD_ALIVE || defined __DOXYGEN__ +#define LWIP_TCPIP_THREAD_ALIVE() +#endif + +/** + * SLIPIF_THREAD_NAME: The name assigned to the slipif_loop thread. + */ +#if !defined SLIPIF_THREAD_NAME || defined __DOXYGEN__ +#define SLIPIF_THREAD_NAME "slipif_loop" +#endif + +/** + * SLIP_THREAD_STACKSIZE: The stack size used by the slipif_loop thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_STACKSIZE || defined __DOXYGEN__ +#define SLIPIF_THREAD_STACKSIZE 0 +#endif + +/** + * SLIPIF_THREAD_PRIO: The priority assigned to the slipif_loop thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_PRIO || defined __DOXYGEN__ +#define SLIPIF_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_THREAD_NAME: The name assigned to any other lwIP thread. + */ +#if !defined DEFAULT_THREAD_NAME || defined __DOXYGEN__ +#define DEFAULT_THREAD_NAME "lwIP" +#endif + +/** + * DEFAULT_THREAD_STACKSIZE: The stack size used by any other lwIP thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_STACKSIZE || defined __DOXYGEN__ +#define DEFAULT_THREAD_STACKSIZE 0 +#endif + +/** + * DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_PRIO || defined __DOXYGEN__ +#define DEFAULT_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_RAW_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_RAW. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_RAW_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_RAW_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_UDP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_UDP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_UDP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_UDP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_TCP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_TCP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_TCP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_TCP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_ACCEPTMBOX_SIZE: The mailbox size for the incoming connections. + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when the acceptmbox is created. + */ +#if !defined DEFAULT_ACCEPTMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_ACCEPTMBOX_SIZE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/** + * @defgroup lwip_opts_netconn Netconn + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c) + */ +#if !defined LWIP_NETCONN || defined __DOXYGEN__ +#define LWIP_NETCONN 1 +#endif + +/** LWIP_TCPIP_TIMEOUT==1: Enable tcpip_timeout/tcpip_untimeout to create + * timers running in tcpip_thread from another thread. + */ +#if !defined LWIP_TCPIP_TIMEOUT || defined __DOXYGEN__ +#define LWIP_TCPIP_TIMEOUT 0 +#endif + +/** LWIP_NETCONN_SEM_PER_THREAD==1: Use one (thread-local) semaphore per + * thread calling socket/netconn functions instead of allocating one + * semaphore per netconn (and per select etc.) + * ATTENTION: a thread-local semaphore for API calls is needed: + * - LWIP_NETCONN_THREAD_SEM_GET() returning a sys_sem_t* + * - LWIP_NETCONN_THREAD_SEM_ALLOC() creating the semaphore + * - LWIP_NETCONN_THREAD_SEM_FREE() freeing the semaphore + * The latter 2 can be invoked up by calling netconn_thread_init()/netconn_thread_cleanup(). + * Ports may call these for threads created with sys_thread_new(). + */ +#if !defined LWIP_NETCONN_SEM_PER_THREAD || defined __DOXYGEN__ +#define LWIP_NETCONN_SEM_PER_THREAD 0 +#endif + +/** LWIP_NETCONN_FULLDUPLEX==1: Enable code that allows reading from one thread, + * writing from a 2nd thread and closing from a 3rd thread at the same time. + * ATTENTION: This is currently really alpha! Some requirements: + * - LWIP_NETCONN_SEM_PER_THREAD==1 is required to use one socket/netconn from + * multiple threads at once + * - sys_mbox_free() has to unblock receive tasks waiting on recvmbox/acceptmbox + * and prevent a task pending on this during/after deletion + */ +#if !defined LWIP_NETCONN_FULLDUPLEX || defined __DOXYGEN__ +#define LWIP_NETCONN_FULLDUPLEX 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Socket options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_socket Sockets + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_SOCKET==1: Enable Socket API (require to use sockets.c) + */ +#if !defined LWIP_SOCKET || defined __DOXYGEN__ +#define LWIP_SOCKET 1 +#endif + +/** + * LWIP_COMPAT_SOCKETS==1: Enable BSD-style sockets functions names through defines. + * LWIP_COMPAT_SOCKETS==2: Same as ==1 but correctly named functions are created. + * While this helps code completion, it might conflict with existing libraries. + * (only used if you use sockets.c) + */ +#if !defined LWIP_COMPAT_SOCKETS || defined __DOXYGEN__ +#define LWIP_COMPAT_SOCKETS 1 +#endif + +/** + * LWIP_POSIX_SOCKETS_IO_NAMES==1: Enable POSIX-style sockets functions names. + * Disable this option if you use a POSIX operating system that uses the same + * names (read, write & close). (only used if you use sockets.c) + */ +#if !defined LWIP_POSIX_SOCKETS_IO_NAMES || defined __DOXYGEN__ +#define LWIP_POSIX_SOCKETS_IO_NAMES 1 +#endif + +/** + * LWIP_SOCKET_OFFSET==n: Increases the file descriptor number created by LwIP with n. + * This can be useful when there are multiple APIs which create file descriptors. + * When they all start with a different offset and you won't make them overlap you can + * re implement read/write/close/ioctl/fnctl to send the requested action to the right + * library (sharing select will need more work though). + */ +#if !defined LWIP_SOCKET_OFFSET || defined __DOXYGEN__ +#define LWIP_SOCKET_OFFSET 0 +#endif + +/** + * LWIP_TCP_KEEPALIVE==1: Enable TCP_KEEPIDLE, TCP_KEEPINTVL and TCP_KEEPCNT + * options processing. Note that TCP_KEEPIDLE and TCP_KEEPINTVL have to be set + * in seconds. (does not require sockets.c, and will affect tcp.c) + */ +#if !defined LWIP_TCP_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_TCP_KEEPALIVE 0 +#endif + +/** + * LWIP_SO_SNDTIMEO==1: Enable send timeout for sockets/netconns and + * SO_SNDTIMEO processing. + */ +#if !defined LWIP_SO_SNDTIMEO || defined __DOXYGEN__ +#define LWIP_SO_SNDTIMEO 0 +#endif + +/** + * LWIP_SO_RCVTIMEO==1: Enable receive timeout for sockets/netconns and + * SO_RCVTIMEO processing. + */ +#if !defined LWIP_SO_RCVTIMEO || defined __DOXYGEN__ +#define LWIP_SO_RCVTIMEO 0 +#endif + +/** + * LWIP_SO_SNDRCVTIMEO_NONSTANDARD==1: SO_RCVTIMEO/SO_SNDTIMEO take an int + * (milliseconds, much like winsock does) instead of a struct timeval (default). + */ +#if !defined LWIP_SO_SNDRCVTIMEO_NONSTANDARD || defined __DOXYGEN__ +#define LWIP_SO_SNDRCVTIMEO_NONSTANDARD 0 +#endif + +/** + * LWIP_SO_RCVBUF==1: Enable SO_RCVBUF processing. + */ +#if !defined LWIP_SO_RCVBUF || defined __DOXYGEN__ +#define LWIP_SO_RCVBUF 0 +#endif + +/** + * LWIP_SO_LINGER==1: Enable SO_LINGER processing. + */ +#if !defined LWIP_SO_LINGER || defined __DOXYGEN__ +#define LWIP_SO_LINGER 0 +#endif + +/** + * If LWIP_SO_RCVBUF is used, this is the default value for recv_bufsize. + */ +#if !defined RECV_BUFSIZE_DEFAULT || defined __DOXYGEN__ +#define RECV_BUFSIZE_DEFAULT INT_MAX +#endif + +/** + * By default, TCP socket/netconn close waits 20 seconds max to send the FIN + */ +#if !defined LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT || defined __DOXYGEN__ +#define LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT 20000 +#endif + +/** + * SO_REUSE==1: Enable SO_REUSEADDR option. + */ +#if !defined SO_REUSE || defined __DOXYGEN__ +#define SO_REUSE 0 +#endif + +/** + * SO_REUSE_RXTOALL==1: Pass a copy of incoming broadcast/multicast packets + * to all local matches if SO_REUSEADDR is turned on. + * WARNING: Adds a memcpy for every packet if passing to more than one pcb! + */ +#if !defined SO_REUSE_RXTOALL || defined __DOXYGEN__ +#define SO_REUSE_RXTOALL 0 +#endif + +/** + * LWIP_FIONREAD_LINUXMODE==0 (default): ioctl/FIONREAD returns the amount of + * pending data in the network buffer. This is the way windows does it. It's + * the default for lwIP since it is smaller. + * LWIP_FIONREAD_LINUXMODE==1: ioctl/FIONREAD returns the size of the next + * pending datagram in bytes. This is the way linux does it. This code is only + * here for compatibility. + */ +#if !defined LWIP_FIONREAD_LINUXMODE || defined __DOXYGEN__ +#define LWIP_FIONREAD_LINUXMODE 0 +#endif + +/** + * LWIP_SOCKET_SELECT==1 (default): enable select() for sockets (uses a netconn + * callback to keep track of events). + * This saves RAM (counters per socket) and code (netconn event callback), which + * should improve performance a bit). + */ +#if !defined LWIP_SOCKET_SELECT || defined __DOXYGEN__ +#define LWIP_SOCKET_SELECT 1 +#endif + +/** + * LWIP_SOCKET_POLL==1 (default): enable poll() for sockets (including + * struct pollfd, nfds_t, and constants) + */ +#if !defined LWIP_SOCKET_POLL || defined __DOXYGEN__ +#define LWIP_SOCKET_POLL 1 +#endif +/** + * @} + */ + +/* + ---------------------------------------- + ---------- Statistics options ---------- + ---------------------------------------- +*/ +/** + * @defgroup lwip_opts_stats Statistics + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_STATS==1: Enable statistics collection in lwip_stats. + */ +#if !defined LWIP_STATS || defined __DOXYGEN__ +#define LWIP_STATS 1 +#endif + +#if LWIP_STATS + +/** + * LWIP_STATS_DISPLAY==1: Compile in the statistics output functions. + */ +#if !defined LWIP_STATS_DISPLAY || defined __DOXYGEN__ +#define LWIP_STATS_DISPLAY 0 +#endif + +/** + * LINK_STATS==1: Enable link stats. + */ +#if !defined LINK_STATS || defined __DOXYGEN__ +#define LINK_STATS 1 +#endif + +/** + * ETHARP_STATS==1: Enable etharp stats. + */ +#if !defined ETHARP_STATS || defined __DOXYGEN__ +#define ETHARP_STATS (LWIP_ARP) +#endif + +/** + * IP_STATS==1: Enable IP stats. + */ +#if !defined IP_STATS || defined __DOXYGEN__ +#define IP_STATS 1 +#endif + +/** + * IPFRAG_STATS==1: Enable IP fragmentation stats. Default is + * on if using either frag or reass. + */ +#if !defined IPFRAG_STATS || defined __DOXYGEN__ +#define IPFRAG_STATS (IP_REASSEMBLY || IP_FRAG) +#endif + +/** + * ICMP_STATS==1: Enable ICMP stats. + */ +#if !defined ICMP_STATS || defined __DOXYGEN__ +#define ICMP_STATS 1 +#endif + +/** + * IGMP_STATS==1: Enable IGMP stats. + */ +#if !defined IGMP_STATS || defined __DOXYGEN__ +#define IGMP_STATS (LWIP_IGMP) +#endif + +/** + * UDP_STATS==1: Enable UDP stats. Default is on if + * UDP enabled, otherwise off. + */ +#if !defined UDP_STATS || defined __DOXYGEN__ +#define UDP_STATS (LWIP_UDP) +#endif + +/** + * TCP_STATS==1: Enable TCP stats. Default is on if TCP + * enabled, otherwise off. + */ +#if !defined TCP_STATS || defined __DOXYGEN__ +#define TCP_STATS (LWIP_TCP) +#endif + +/** + * MEM_STATS==1: Enable mem.c stats. + */ +#if !defined MEM_STATS || defined __DOXYGEN__ +#define MEM_STATS ((MEM_LIBC_MALLOC == 0) && (MEM_USE_POOLS == 0)) +#endif + +/** + * MEMP_STATS==1: Enable memp.c pool stats. + */ +#if !defined MEMP_STATS || defined __DOXYGEN__ +#define MEMP_STATS (MEMP_MEM_MALLOC == 0) +#endif + +/** + * SYS_STATS==1: Enable system stats (sem and mbox counts, etc). + */ +#if !defined SYS_STATS || defined __DOXYGEN__ +#define SYS_STATS (NO_SYS == 0) +#endif + +/** + * IP6_STATS==1: Enable IPv6 stats. + */ +#if !defined IP6_STATS || defined __DOXYGEN__ +#define IP6_STATS (LWIP_IPV6) +#endif + +/** + * ICMP6_STATS==1: Enable ICMP for IPv6 stats. + */ +#if !defined ICMP6_STATS || defined __DOXYGEN__ +#define ICMP6_STATS (LWIP_IPV6 && LWIP_ICMP6) +#endif + +/** + * IP6_FRAG_STATS==1: Enable IPv6 fragmentation stats. + */ +#if !defined IP6_FRAG_STATS || defined __DOXYGEN__ +#define IP6_FRAG_STATS (LWIP_IPV6 && (LWIP_IPV6_FRAG || LWIP_IPV6_REASS)) +#endif + +/** + * MLD6_STATS==1: Enable MLD for IPv6 stats. + */ +#if !defined MLD6_STATS || defined __DOXYGEN__ +#define MLD6_STATS (LWIP_IPV6 && LWIP_IPV6_MLD) +#endif + +/** + * ND6_STATS==1: Enable Neighbor discovery for IPv6 stats. + */ +#if !defined ND6_STATS || defined __DOXYGEN__ +#define ND6_STATS (LWIP_IPV6) +#endif + +/** + * MIB2_STATS==1: Stats for SNMP MIB2. + */ +#if !defined MIB2_STATS || defined __DOXYGEN__ +#define MIB2_STATS 0 +#endif + +#else + +#define LINK_STATS 0 +#define ETHARP_STATS 0 +#define IP_STATS 0 +#define IPFRAG_STATS 0 +#define ICMP_STATS 0 +#define IGMP_STATS 0 +#define UDP_STATS 0 +#define TCP_STATS 0 +#define MEM_STATS 0 +#define MEMP_STATS 0 +#define SYS_STATS 0 +#define LWIP_STATS_DISPLAY 0 +#define IP6_STATS 0 +#define ICMP6_STATS 0 +#define IP6_FRAG_STATS 0 +#define MLD6_STATS 0 +#define ND6_STATS 0 +#define MIB2_STATS 0 + +#endif /* LWIP_STATS */ +/** + * @} + */ + +/* + -------------------------------------- + ---------- Checksum options ---------- + -------------------------------------- +*/ +/** + * @defgroup lwip_opts_checksum Checksum + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_CHECKSUM_CTRL_PER_NETIF==1: Checksum generation/check can be enabled/disabled + * per netif. + * ATTENTION: if enabled, the CHECKSUM_GEN_* and CHECKSUM_CHECK_* defines must be enabled! + */ +#if !defined LWIP_CHECKSUM_CTRL_PER_NETIF || defined __DOXYGEN__ +#define LWIP_CHECKSUM_CTRL_PER_NETIF 0 +#endif + +/** + * CHECKSUM_GEN_IP==1: Generate checksums in software for outgoing IP packets. + */ +#if !defined CHECKSUM_GEN_IP || defined __DOXYGEN__ +#define CHECKSUM_GEN_IP 1 +#endif + +/** + * CHECKSUM_GEN_UDP==1: Generate checksums in software for outgoing UDP packets. + */ +#if !defined CHECKSUM_GEN_UDP || defined __DOXYGEN__ +#define CHECKSUM_GEN_UDP 1 +#endif + +/** + * CHECKSUM_GEN_TCP==1: Generate checksums in software for outgoing TCP packets. + */ +#if !defined CHECKSUM_GEN_TCP || defined __DOXYGEN__ +#define CHECKSUM_GEN_TCP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP==1: Generate checksums in software for outgoing ICMP packets. + */ +#if !defined CHECKSUM_GEN_ICMP || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP6==1: Generate checksums in software for outgoing ICMP6 packets. + */ +#if !defined CHECKSUM_GEN_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP6 1 +#endif + +/** + * CHECKSUM_CHECK_IP==1: Check checksums in software for incoming IP packets. + */ +#if !defined CHECKSUM_CHECK_IP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_IP 1 +#endif + +/** + * CHECKSUM_CHECK_UDP==1: Check checksums in software for incoming UDP packets. + */ +#if !defined CHECKSUM_CHECK_UDP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_UDP 1 +#endif + +/** + * CHECKSUM_CHECK_TCP==1: Check checksums in software for incoming TCP packets. + */ +#if !defined CHECKSUM_CHECK_TCP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_TCP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP==1: Check checksums in software for incoming ICMP packets. + */ +#if !defined CHECKSUM_CHECK_ICMP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP6==1: Check checksums in software for incoming ICMPv6 packets + */ +#if !defined CHECKSUM_CHECK_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP6 1 +#endif + +/** + * LWIP_CHECKSUM_ON_COPY==1: Calculate checksum when copying data from + * application buffers to pbufs. + */ +#if !defined LWIP_CHECKSUM_ON_COPY || defined __DOXYGEN__ +#define LWIP_CHECKSUM_ON_COPY 0 +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv6 IPv6 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV6==1: Enable IPv6 + */ +#if !defined LWIP_IPV6 || defined __DOXYGEN__ +#define LWIP_IPV6 0 +#endif + +/** + * IPV6_REASS_MAXAGE: Maximum time (in multiples of IP6_REASS_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IPV6_REASS_MAXAGE || defined __DOXYGEN__ +#define IPV6_REASS_MAXAGE 60 +#endif + +/** + * LWIP_IPV6_SCOPES==1: Enable support for IPv6 address scopes, ensuring that + * e.g. link-local addresses are really treated as link-local. Disable this + * setting only for single-interface configurations. + * All addresses that have a scope according to the default policy (link-local + * unicast addresses, interface-local and link-local multicast addresses) should + * now have a zone set on them before being passed to the core API, although + * lwIP will currently attempt to select a zone on the caller's behalf when + * necessary. Applications that directly assign IPv6 addresses to interfaces + * (which is NOT recommended) must now ensure that link-local addresses carry + * the netif's zone. See the new ip6_zone.h header file for more information and + * relevant macros. For now it is still possible to turn off scopes support + * through the new LWIP_IPV6_SCOPES option. When upgrading an implementation that + * uses the core API directly, it is highly recommended to enable + * LWIP_IPV6_SCOPES_DEBUG at least for a while, to ensure e.g. proper address + * initialization. + */ +#if !defined LWIP_IPV6_SCOPES || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES (LWIP_IPV6 && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_IPV6_SCOPES_DEBUG==1: Perform run-time checks to verify that addresses + * are properly zoned (see ip6_zone.h on what that means) where it matters. + * Enabling this setting is highly recommended when upgrading from an existing + * installation that is not yet scope-aware; otherwise it may be too expensive. + */ +#if !defined LWIP_IPV6_SCOPES_DEBUG || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES_DEBUG 0 +#endif + +/** + * LWIP_IPV6_NUM_ADDRESSES: Number of IPv6 addresses per netif. + */ +#if !defined LWIP_IPV6_NUM_ADDRESSES || defined __DOXYGEN__ +#define LWIP_IPV6_NUM_ADDRESSES 3 +#endif + +/** + * LWIP_IPV6_FORWARD==1: Forward IPv6 packets across netifs + */ +#if !defined LWIP_IPV6_FORWARD || defined __DOXYGEN__ +#define LWIP_IPV6_FORWARD 0 +#endif + +/** + * LWIP_IPV6_FRAG==1: Fragment outgoing IPv6 packets that are too big. + */ +#if !defined LWIP_IPV6_FRAG || defined __DOXYGEN__ +#define LWIP_IPV6_FRAG 1 +#endif + +/** + * LWIP_IPV6_REASS==1: reassemble incoming IPv6 packets that fragmented + */ +#if !defined LWIP_IPV6_REASS || defined __DOXYGEN__ +#define LWIP_IPV6_REASS LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_SEND_ROUTER_SOLICIT==1: Send router solicitation messages during + * network startup. + */ +#if !defined LWIP_IPV6_SEND_ROUTER_SOLICIT || defined __DOXYGEN__ +#define LWIP_IPV6_SEND_ROUTER_SOLICIT 1 +#endif + +/** + * LWIP_IPV6_AUTOCONFIG==1: Enable stateless address autoconfiguration as per RFC 4862. + */ +#if !defined LWIP_IPV6_AUTOCONFIG || defined __DOXYGEN__ +#define LWIP_IPV6_AUTOCONFIG LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_ADDRESS_LIFETIMES==1: Keep valid and preferred lifetimes for each + * IPv6 address. Required for LWIP_IPV6_AUTOCONFIG. May still be enabled + * otherwise, in which case the application may assign address lifetimes with + * the appropriate macros. Addresses with no lifetime are assumed to be static. + * If this option is disabled, all addresses are assumed to be static. + */ +#if !defined LWIP_IPV6_ADDRESS_LIFETIMES || defined __DOXYGEN__ +#define LWIP_IPV6_ADDRESS_LIFETIMES LWIP_IPV6_AUTOCONFIG +#endif + +/** + * LWIP_IPV6_DUP_DETECT_ATTEMPTS=[0..7]: Number of duplicate address detection attempts. + */ +#if !defined LWIP_IPV6_DUP_DETECT_ATTEMPTS || defined __DOXYGEN__ +#define LWIP_IPV6_DUP_DETECT_ATTEMPTS 1 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_icmp6 ICMP6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ICMP6==1: Enable ICMPv6 (mandatory per RFC) + */ +#if !defined LWIP_ICMP6 || defined __DOXYGEN__ +#define LWIP_ICMP6 LWIP_IPV6 +#endif + +/** + * LWIP_ICMP6_DATASIZE: bytes from original packet to send back in + * ICMPv6 error messages. + */ +#if !defined LWIP_ICMP6_DATASIZE || defined __DOXYGEN__ +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/** + * LWIP_ICMP6_HL: default hop limit for ICMPv6 messages + */ +#if !defined LWIP_ICMP6_HL || defined __DOXYGEN__ +#define LWIP_ICMP6_HL 255 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_mld6 Multicast listener discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_MLD==1: Enable multicast listener discovery protocol. + * If LWIP_IPV6 is enabled but this setting is disabled, the MAC layer must + * indiscriminately pass all inbound IPv6 multicast traffic to lwIP. + */ +#if !defined LWIP_IPV6_MLD || defined __DOXYGEN__ +#define LWIP_IPV6_MLD LWIP_IPV6 +#endif + +/** + * MEMP_NUM_MLD6_GROUP: Max number of IPv6 multicast groups that can be joined. + * There must be enough groups so that each netif can join the solicited-node + * multicast group for each of its local addresses, plus one for MDNS if + * applicable, plus any number of groups to be joined on UDP sockets. + */ +#if !defined MEMP_NUM_MLD6_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_MLD6_GROUP 4 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_nd6 Neighbor discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ND6_QUEUEING==1: queue outgoing IPv6 packets while MAC address + * is being resolved. + */ +#if !defined LWIP_ND6_QUEUEING || defined __DOXYGEN__ +#define LWIP_ND6_QUEUEING LWIP_IPV6 +#endif + +/** + * MEMP_NUM_ND6_QUEUE: Max number of IPv6 packets to queue during MAC resolution. + */ +#if !defined MEMP_NUM_ND6_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ND6_QUEUE 20 +#endif + +/** + * LWIP_ND6_NUM_NEIGHBORS: Number of entries in IPv6 neighbor cache + */ +#if !defined LWIP_ND6_NUM_NEIGHBORS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_NEIGHBORS 10 +#endif + +/** + * LWIP_ND6_NUM_DESTINATIONS: number of entries in IPv6 destination cache + */ +#if !defined LWIP_ND6_NUM_DESTINATIONS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_DESTINATIONS 10 +#endif + +/** + * LWIP_ND6_NUM_PREFIXES: number of entries in IPv6 on-link prefixes cache + */ +#if !defined LWIP_ND6_NUM_PREFIXES || defined __DOXYGEN__ +#define LWIP_ND6_NUM_PREFIXES 5 +#endif + +/** + * LWIP_ND6_NUM_ROUTERS: number of entries in IPv6 default router cache + */ +#if !defined LWIP_ND6_NUM_ROUTERS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_ROUTERS 3 +#endif + +/** + * LWIP_ND6_MAX_MULTICAST_SOLICIT: max number of multicast solicit messages to send + * (neighbor solicit and router solicit) + */ +#if !defined LWIP_ND6_MAX_MULTICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_MULTICAST_SOLICIT 3 +#endif + +/** + * LWIP_ND6_MAX_UNICAST_SOLICIT: max number of unicast neighbor solicitation messages + * to send during neighbor reachability detection. + */ +#if !defined LWIP_ND6_MAX_UNICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_UNICAST_SOLICIT 3 +#endif + +/** + * Unused: See ND RFC (time in milliseconds). + */ +#if !defined LWIP_ND6_MAX_ANYCAST_DELAY_TIME || defined __DOXYGEN__ +#define LWIP_ND6_MAX_ANYCAST_DELAY_TIME 1000 +#endif + +/** + * Unused: See ND RFC + */ +#if !defined LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT 3 +#endif + +/** + * LWIP_ND6_REACHABLE_TIME: default neighbor reachable time (in milliseconds). + * May be updated by router advertisement messages. + */ +#if !defined LWIP_ND6_REACHABLE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_REACHABLE_TIME 30000 +#endif + +/** + * LWIP_ND6_RETRANS_TIMER: default retransmission timer for solicitation messages + */ +#if !defined LWIP_ND6_RETRANS_TIMER || defined __DOXYGEN__ +#define LWIP_ND6_RETRANS_TIMER 1000 +#endif + +/** + * LWIP_ND6_DELAY_FIRST_PROBE_TIME: Delay before first unicast neighbor solicitation + * message is sent, during neighbor reachability detection. + */ +#if !defined LWIP_ND6_DELAY_FIRST_PROBE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_DELAY_FIRST_PROBE_TIME 5000 +#endif + +/** + * LWIP_ND6_ALLOW_RA_UPDATES==1: Allow Router Advertisement messages to update + * Reachable time and retransmission timers, and netif MTU. + */ +#if !defined LWIP_ND6_ALLOW_RA_UPDATES || defined __DOXYGEN__ +#define LWIP_ND6_ALLOW_RA_UPDATES 1 +#endif + +/** + * LWIP_ND6_TCP_REACHABILITY_HINTS==1: Allow TCP to provide Neighbor Discovery + * with reachability hints for connected destinations. This helps avoid sending + * unicast neighbor solicitation messages. + */ +#if !defined LWIP_ND6_TCP_REACHABILITY_HINTS || defined __DOXYGEN__ +#define LWIP_ND6_TCP_REACHABILITY_HINTS 1 +#endif + +/** + * LWIP_ND6_RDNSS_MAX_DNS_SERVERS > 0: Use IPv6 Router Advertisement Recursive + * DNS Server Option (as per RFC 6106) to copy a defined maximum number of DNS + * servers to the DNS module. + */ +#if !defined LWIP_ND6_RDNSS_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_ND6_RDNSS_MAX_DNS_SERVERS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_dhcpv6 DHCPv6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_DHCP6==1: enable DHCPv6 stateful/stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6 || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATEFUL==1: enable DHCPv6 stateful address autoconfiguration. + * (not supported, yet!) + */ +#if !defined LWIP_IPV6_DHCP6_STATEFUL || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATEFUL 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATELESS==1: enable DHCPv6 stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6_STATELESS || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATELESS LWIP_IPV6_DHCP6 +#endif + +/** + * LWIP_DHCP6_GETS_NTP==1: Request NTP servers via DHCPv6. For each + * response packet, a callback is called, which has to be provided by the port: + * void dhcp6_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP6_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP6_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP6_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP6_MAX_DNS_SERVERS > 0: Request DNS servers via DHCPv6. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP6_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Hook options --------------- + --------------------------------------- +*/ + +/** + * @defgroup lwip_opts_hooks Hooks + * @ingroup lwip_opts_infrastructure + * Hooks are undefined by default, define them to a function if you need them. + * @{ + */ + +/** + * LWIP_HOOK_FILENAME: Custom filename to \#include in files that provide hooks. + * Declare your hook function prototypes in there, you may also \#include all headers + * providing data types that are need in this file. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_FILENAME "path/to/my/lwip_hooks.h" +#endif + +/** + * LWIP_HOOK_TCP_ISN: + * Hook for generation of the Initial Sequence Number (ISN) for a new TCP + * connection. The default lwIP ISN generation algorithm is very basic and may + * allow for TCP spoofing attacks. This hook provides the means to implement + * the standardized ISN generation algorithm from RFC 6528 (see contrib/adons/tcp_isn), + * or any other desired algorithm as a replacement. + * Called from tcp_connect() and tcp_listen_input() when an ISN is needed for + * a new TCP connection, if TCP support (@ref LWIP_TCP) is enabled.\n + * Signature:\code{.c} + * u32_t my_hook_tcp_isn(const ip_addr_t* local_ip, u16_t local_port, const ip_addr_t* remote_ip, u16_t remote_port); + * \endcode + * - it may be necessary to use "struct ip_addr" (ip4_addr, ip6_addr) instead of "ip_addr_t" in function declarations\n + * Arguments: + * - local_ip: pointer to the local IP address of the connection + * - local_port: local port number of the connection (host-byte order) + * - remote_ip: pointer to the remote IP address of the connection + * - remote_port: remote port number of the connection (host-byte order)\n + * Return value: + * - the 32-bit Initial Sequence Number to use for the new TCP connection. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_ISN(local_ip, local_port, remote_ip, remote_port) +#endif + +/** + * LWIP_HOOK_TCP_INPACKET_PCB: + * Hook for intercepting incoming packets before they are passed to a pcb. This + * allows updating some state or even dropping a packet. + * Signature:\code{.c} + * err_t my_hook_tcp_inpkt(struct tcp_pcb *pcb, struct tcp_hdr *hdr, u16_t optlen, u16_t opt1len, u8_t *opt2, struct pbuf *p); + * \endcode + * Arguments: + * - pcb: tcp_pcb selected for input of this packet (ATTENTION: this may be + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - hdr: pointer to tcp header (ATTENTION: tcp options may not be in one piece!) + * - optlen: tcp option length + * - opt1len: tcp option length 1st part + * - opt2: if this is != NULL, tcp options are split among 2 pbufs. In that case, + * options start at right after the tcp header ('(u8_t*)(hdr + 1)') for + * the first 'opt1len' bytes and the rest starts at 'opt2'. opt2len can + * be simply calculated: 'opt2len = optlen - opt1len;' + * - p: input packet, p->payload points to application data (that's why tcp hdr + * and options are passed in seperately) + * Return value: + * - ERR_OK: continue input of this packet as normal + * - != ERR_OK: drop this packet for input (don't continue input processing) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_INPACKET_PCB(pcb, hdr, optlen, opt1len, opt2, p) +#endif + +/** + * LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH: + * Hook for increasing the size of the options allocated with a tcp header. + * Together with LWIP_HOOK_TCP_OUT_ADD_TCPOPTS, this can be used to add custom + * options to outgoing tcp segments. + * Signature:\code{.c} + * u8_t my_hook_tcp_out_tcpopt_length(const struct tcp_pcb *pcb, u8_t internal_option_length); + * \endcode + * Arguments: + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - internal_option_length: tcp option length used by the stack internally + * Return value: + * - a number of bytes to allocate for tcp options (internal_option_length <= ret <= 40) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, internal_len) +#endif + +/** + * LWIP_HOOK_TCP_OUT_ADD_TCPOPTS: + * Hook for adding custom options to outgoing tcp segments. + * Space for these custom options has to be reserved via LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH. + * Signature:\code{.c} + * u32_t *my_hook_tcp_out_add_tcpopts(struct pbuf *p, struct tcp_hdr *hdr, const struct tcp_pcb *pcb, u32_t *opts); + * \endcode + * Arguments: + * - p: output packet, p->payload pointing to tcp header, data follows + * - hdr: tcp header + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - opts: pointer where to add the custom options (there may already be options + * between the header and these) + * Return value: + * - pointer pointing directly after the inserted options + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, hdr, pcb, opts) +#endif + +/** + * LWIP_HOOK_IP4_INPUT(pbuf, input_netif): + * Called from ip_input() (IPv4) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP4_ROUTE(dest): + * Called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *dest); + * \endcode + * Arguments: + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE() +#endif + +/** + * LWIP_HOOK_IP4_ROUTE_SRC(src, dest): + * Source-based routing for IPv4 - called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *src, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - src: local/source IPv4 address + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE_SRC(src, dest) +#endif + +/** + * LWIP_HOOK_IP4_CANFORWARD(src, dest): + * Check if an IPv4 can be forwarded - called from: + * ip4_input() -> ip4_forward() -> ip4_canforward() (IPv4) + * - source address is available via ip4_current_src_addr() + * - calling an output function in this context (e.g. multicast router) is allowed + * Signature:\code{.c} + * int my_hook(struct pbuf *p, u32_t dest_addr_hostorder); + * \endcode + * Arguments: + * - p: packet to forward + * - dest: destination IPv4 address + * Returns values: + * - 1: forward + * - 0: don't forward + * - -1: no decision. In that case, ip4_canforward() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_CANFORWARD(src, dest) +#endif + +/** + * LWIP_HOOK_ETHARP_GET_GW(netif, dest): + * Called from etharp_output() (IPv4) + * Signature:\code{.c} + * const ip4_addr_t *my_hook(struct netif *netif, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv4 address + * Return values: + * - the IPv4 address of the gateway to handle the specified destination IPv4 address + * - NULL, in which case the netif's default gateway is used + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv4 routing together with + * LWIP_HOOK_IP4_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ETHARP_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_IP6_INPUT(pbuf, input_netif): + * Called from ip6_input() (IPv6) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip6_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP6_ROUTE(src, dest): + * Called from ip_route() (IPv6) + * Signature:\code{.c} + * struct netif *my_hook(const ip6_addr_t *dest, const ip6_addr_t *src); + * \endcode + * Arguments: + * - src: source IPv6 address + * - dest: destination IPv6 address + * Return values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip6_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_ROUTE(src, dest) +#endif + +/** + * LWIP_HOOK_ND6_GET_GW(netif, dest): + * Called from nd6_get_next_hop_entry() (IPv6) + * Signature:\code{.c} + * const ip6_addr_t *my_hook(struct netif *netif, const ip6_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv6 address + * Return values: + * - the IPv6 address of the next hop to handle the specified destination IPv6 address + * - NULL, in which case a NDP-discovered router is used instead + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv6 routing together with + * LWIP_HOOK_IP6_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ND6_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr): + * Called from ethernet_input() if VLAN support is enabled + * Signature:\code{.c} + * int my_hook(struct netif *netif, struct eth_hdr *eth_hdr, struct eth_vlan_hdr *vlan_hdr); + * \endcode + * Arguments: + * - netif: struct netif on which the packet has been received + * - eth_hdr: struct eth_hdr of the packet + * - vlan_hdr: struct eth_vlan_hdr of the packet + * Return values: + * - 0: Packet must be dropped. + * - != 0: Packet must be accepted. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr) +#endif + +/** + * LWIP_HOOK_VLAN_SET: + * Hook can be used to set prio_vid field of vlan_hdr. If you need to store data + * on per-netif basis to implement this callback, see @ref netif_cd. + * Called from ethernet_output() if VLAN support (@ref ETHARP_SUPPORT_VLAN) is enabled.\n + * Signature:\code{.c} + * s32_t my_hook_vlan_set(struct netif* netif, struct pbuf* pbuf, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type);\n + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - p: struct pbuf packet to be sent + * - src: source eth address + * - dst: destination eth address + * - eth_type: ethernet type to packet to be sent\n + * + * + * Return values: + * - <0: Packet shall not contain VLAN header. + * - 0 <= return value <= 0xFFFF: Packet shall contain VLAN header. Return value is prio_vid in host byte order. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type) +#endif + +/** + * LWIP_HOOK_MEMP_AVAILABLE(memp_t_type): + * Called from memp_free() when a memp pool was empty and an item is now available + * Signature:\code{.c} + * void my_hook(memp_t type); + * \endcode + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_MEMP_AVAILABLE(memp_t_type) +#endif + +/** + * LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif): + * Called from ethernet_input() when an unknown eth type is encountered. + * Signature:\code{.c} + * err_t my_hook(struct pbuf* pbuf, struct netif* netif); + * \endcode + * Arguments: + * - p: rx packet with unknown eth type + * - netif: netif on which the packet has been received + * Return values: + * - ERR_OK if packet is accepted (hook function now owns the pbuf) + * - any error code otherwise (pbuf is freed) + * + * Payload points to ethernet header! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif) +#endif + +/** + * LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr): + * Called from various dhcp functions when sending a DHCP message. + * This hook is called just before the DHCP message trailer is added, so the + * options are at the end of a DHCP message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that will be sent + * - msg_type: dhcp message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * LWIP_ASSERT("dhcp option overflow", *options_len_ptr + option_len + 2 <= DHCP_OPTIONS_LEN); + * msg->options[(*options_len_ptr)++] = <option_number>; + * msg->options[(*options_len_ptr)++] = <option_len>; + * msg->options[(*options_len_ptr)++] = <option_bytes>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif + +/** + * LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, option_value_offset): + * Called from dhcp_parse_reply when receiving a DHCP message. + * This hook is called for every option in the received message that is not handled internally. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u8_t option, u8_t option_len, struct pbuf *pbuf, u16_t option_value_offset); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that was received + * - msg_type: dhcp message type received (u8_t, ATTENTION: only valid after + * the message type option has been parsed!) + * - option: option value (u8_t) + * - len: option data length (u8_t) + * - pbuf: pbuf where option data is contained + * - option_value_offset: offset in pbuf where option data begins + * + * A nice way to get the option contents is pbuf_get_contiguous(): + * u8_t buf[32]; + * u8_t *ptr = (u8_t*)pbuf_get_contiguous(p, buf, sizeof(buf), LWIP_MIN(option_len, sizeof(buf)), offset); + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) +#endif + +/** + * LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len): + * Called from various dhcp6 functions when sending a DHCP6 message. + * This hook is called just before the DHCP6 message is sent, so the + * options are at the end of a DHCP6 message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp6 *dhcp, u8_t state, struct dhcp6_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp6: struct dhcp6 on that netif + * - state: current dhcp6 state (dhcp6_state_enum_t as an u8_t) + * - msg: struct dhcp6_msg that will be sent + * - msg_type: dhcp6 message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp6_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * u8_t *options = (u8_t *)(msg + 1); + * LWIP_ASSERT("dhcp option overflow", sizeof(struct dhcp6_msg) + *options_len_ptr + newoptlen <= max_len); + * options[(*options_len_ptr)++] = <option_data>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif + +/** + * LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement setsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, const void *optval, socklen_t optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to set + * - optval: value to set + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement getsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, void *optval, socklen_t *optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to get + * - optval: value to get + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) + * Called from netconn APIs (not usable with callback apps) allowing an + * external DNS resolver (which uses sequential API) to handle the query. + * Signature:\code{.c} + * int my_hook(const char *name, ip_addr_t *addr, u8_t addrtype, err_t *err) + * \endcode + * Arguments: + * - name: hostname to resolve + * - addr: output host address + * - addrtype: type of address to query + * - err: output error + * Return values: + * - 0: Hook has not consumed hostname query, query continues into DNS module + * - != 0: Hook has consumed the query + * + * err must also be checked to determine if the hook consumed the query, but + * the query failed + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Debugging options ---------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_debugmsg Debug messages + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_DBG_MIN_LEVEL: After masking, the value of the debug is + * compared against this value. If it is smaller, then debugging + * messages are written. + * @see debugging_levels + */ +#if !defined LWIP_DBG_MIN_LEVEL || defined __DOXYGEN__ +#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL +#endif + +/** + * LWIP_DBG_TYPES_ON: A mask that can be used to globally enable/disable + * debug messages of certain types. + * @see debugging_levels + */ +#if !defined LWIP_DBG_TYPES_ON || defined __DOXYGEN__ +#define LWIP_DBG_TYPES_ON LWIP_DBG_ON +#endif + +/** + * ETHARP_DEBUG: Enable debugging in etharp.c. + */ +#if !defined ETHARP_DEBUG || defined __DOXYGEN__ +#define ETHARP_DEBUG LWIP_DBG_OFF +#endif + +/** + * NETIF_DEBUG: Enable debugging in netif.c. + */ +#if !defined NETIF_DEBUG || defined __DOXYGEN__ +#define NETIF_DEBUG LWIP_DBG_OFF +#endif + +/** + * PBUF_DEBUG: Enable debugging in pbuf.c. + */ +#if !defined PBUF_DEBUG || defined __DOXYGEN__ +#define PBUF_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_LIB_DEBUG: Enable debugging in api_lib.c. + */ +#if !defined API_LIB_DEBUG || defined __DOXYGEN__ +#define API_LIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_MSG_DEBUG: Enable debugging in api_msg.c. + */ +#if !defined API_MSG_DEBUG || defined __DOXYGEN__ +#define API_MSG_DEBUG LWIP_DBG_OFF +#endif + +/** + * SOCKETS_DEBUG: Enable debugging in sockets.c. + */ +#if !defined SOCKETS_DEBUG || defined __DOXYGEN__ +#define SOCKETS_DEBUG LWIP_DBG_OFF +#endif + +/** + * ICMP_DEBUG: Enable debugging in icmp.c. + */ +#if !defined ICMP_DEBUG || defined __DOXYGEN__ +#define ICMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IGMP_DEBUG: Enable debugging in igmp.c. + */ +#if !defined IGMP_DEBUG || defined __DOXYGEN__ +#define IGMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * INET_DEBUG: Enable debugging in inet.c. + */ +#if !defined INET_DEBUG || defined __DOXYGEN__ +#define INET_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_DEBUG: Enable debugging for IP. + */ +#if !defined IP_DEBUG || defined __DOXYGEN__ +#define IP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_REASS_DEBUG: Enable debugging in ip_frag.c for both frag & reass. + */ +#if !defined IP_REASS_DEBUG || defined __DOXYGEN__ +#define IP_REASS_DEBUG LWIP_DBG_OFF +#endif + +/** + * RAW_DEBUG: Enable debugging in raw.c. + */ +#if !defined RAW_DEBUG || defined __DOXYGEN__ +#define RAW_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEM_DEBUG: Enable debugging in mem.c. + */ +#if !defined MEM_DEBUG || defined __DOXYGEN__ +#define MEM_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEMP_DEBUG: Enable debugging in memp.c. + */ +#if !defined MEMP_DEBUG || defined __DOXYGEN__ +#define MEMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SYS_DEBUG: Enable debugging in sys.c. + */ +#if !defined SYS_DEBUG || defined __DOXYGEN__ +#define SYS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TIMERS_DEBUG: Enable debugging in timers.c. + */ +#if !defined TIMERS_DEBUG || defined __DOXYGEN__ +#define TIMERS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_DEBUG: Enable debugging for TCP. + */ +#if !defined TCP_DEBUG || defined __DOXYGEN__ +#define TCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_INPUT_DEBUG: Enable debugging in tcp_in.c for incoming debug. + */ +#if !defined TCP_INPUT_DEBUG || defined __DOXYGEN__ +#define TCP_INPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_FR_DEBUG: Enable debugging in tcp_in.c for fast retransmit. + */ +#if !defined TCP_FR_DEBUG || defined __DOXYGEN__ +#define TCP_FR_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RTO_DEBUG: Enable debugging in TCP for retransmit + * timeout. + */ +#if !defined TCP_RTO_DEBUG || defined __DOXYGEN__ +#define TCP_RTO_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_CWND_DEBUG: Enable debugging for TCP congestion window. + */ +#if !defined TCP_CWND_DEBUG || defined __DOXYGEN__ +#define TCP_CWND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_WND_DEBUG: Enable debugging in tcp_in.c for window updating. + */ +#if !defined TCP_WND_DEBUG || defined __DOXYGEN__ +#define TCP_WND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_OUTPUT_DEBUG: Enable debugging in tcp_out.c output functions. + */ +#if !defined TCP_OUTPUT_DEBUG || defined __DOXYGEN__ +#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RST_DEBUG: Enable debugging for TCP with the RST message. + */ +#if !defined TCP_RST_DEBUG || defined __DOXYGEN__ +#define TCP_RST_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_QLEN_DEBUG: Enable debugging for TCP queue lengths. + */ +#if !defined TCP_QLEN_DEBUG || defined __DOXYGEN__ +#define TCP_QLEN_DEBUG LWIP_DBG_OFF +#endif + +/** + * UDP_DEBUG: Enable debugging in UDP. + */ +#if !defined UDP_DEBUG || defined __DOXYGEN__ +#define UDP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCPIP_DEBUG: Enable debugging in tcpip.c. + */ +#if !defined TCPIP_DEBUG || defined __DOXYGEN__ +#define TCPIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SLIP_DEBUG: Enable debugging in slipif.c. + */ +#if !defined SLIP_DEBUG || defined __DOXYGEN__ +#define SLIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP_DEBUG: Enable debugging in dhcp.c. + */ +#if !defined DHCP_DEBUG || defined __DOXYGEN__ +#define DHCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * AUTOIP_DEBUG: Enable debugging in autoip.c. + */ +#if !defined AUTOIP_DEBUG || defined __DOXYGEN__ +#define AUTOIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DNS_DEBUG: Enable debugging for DNS. + */ +#if !defined DNS_DEBUG || defined __DOXYGEN__ +#define DNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP6_DEBUG: Enable debugging for IPv6. + */ +#if !defined IP6_DEBUG || defined __DOXYGEN__ +#define IP6_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP6_DEBUG: Enable debugging in dhcp6.c. + */ +#if !defined DHCP6_DEBUG || defined __DOXYGEN__ +#define DHCP6_DEBUG LWIP_DBG_OFF +#endif +/** + * @} + */ + +/** + * LWIP_TESTMODE: Changes to make unit test possible + */ +#if !defined LWIP_TESTMODE +#define LWIP_TESTMODE 0 +#endif + +/* + -------------------------------------------------- + ---------- Performance tracking options ---------- + -------------------------------------------------- +*/ +/** + * @defgroup lwip_opts_perf Performance + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_PERF: Enable performance testing for lwIP + * (if enabled, arch/perf.h is included) + */ +#if !defined LWIP_PERF || defined __DOXYGEN__ +#define LWIP_PERF 0 +#endif +/** + * @} + */ + +#endif /* LWIP_HDR_OPT_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h new file mode 100644 index 000000000..82902a4e9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h @@ -0,0 +1,322 @@ +/** + * @file + * pbuf API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_PBUF_H +#define LWIP_HDR_PBUF_H + +#include "lwip/opt.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** LWIP_SUPPORT_CUSTOM_PBUF==1: Custom pbufs behave much like their pbuf type + * but they are allocated by external code (initialised by calling + * pbuf_alloced_custom()) and when pbuf_free gives up their last reference, they + * are freed by calling pbuf_custom->custom_free_function(). + * Currently, the pbuf_custom code is only needed for one specific configuration + * of IP_FRAG, unless required by external driver/application code. */ +#ifndef LWIP_SUPPORT_CUSTOM_PBUF +#define LWIP_SUPPORT_CUSTOM_PBUF ((IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG)) +#endif + +/** @ingroup pbuf + * PBUF_NEEDS_COPY(p): return a boolean value indicating whether the given + * pbuf needs to be copied in order to be kept around beyond the current call + * stack without risking being corrupted. The default setting provides safety: + * it will make a copy iof any pbuf chain that does not consist entirely of + * PBUF_ROM type pbufs. For setups with zero-copy support, it may be redefined + * to evaluate to true in all cases, for example. However, doing so also has an + * effect on the application side: any buffers that are *not* copied must also + * *not* be reused by the application after passing them to lwIP. For example, + * when setting PBUF_NEEDS_COPY to (0), after using udp_send() with a PBUF_RAM + * pbuf, the application must free the pbuf immediately, rather than reusing it + * for other purposes. For more background information on this, see tasks #6735 + * and #7896, and bugs #11400 and #49914. */ +#ifndef PBUF_NEEDS_COPY +#define PBUF_NEEDS_COPY(p) ((p)->type_internal & PBUF_TYPE_FLAG_DATA_VOLATILE) +#endif /* PBUF_NEEDS_COPY */ + +/* @todo: We need a mechanism to prevent wasting memory in every pbuf + (TCP vs. UDP, IPv4 vs. IPv6: UDP/IPv4 packets may waste up to 28 bytes) */ + +#define PBUF_TRANSPORT_HLEN 20 +#if LWIP_IPV6 +#define PBUF_IP_HLEN 40 +#else +#define PBUF_IP_HLEN 20 +#endif + +/** + * @ingroup pbuf + * Enumeration of pbuf layers + */ +typedef enum { + /** Includes spare room for transport layer header, e.g. UDP header. + * Use this if you intend to pass the pbuf to functions like udp_send(). + */ + PBUF_TRANSPORT = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN, + /** Includes spare room for IP header. + * Use this if you intend to pass the pbuf to functions like raw_send(). + */ + PBUF_IP = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN, + /** Includes spare room for link layer header (ethernet header). + * Use this if you intend to pass the pbuf to functions like ethernet_output(). + * @see PBUF_LINK_HLEN + */ + PBUF_LINK = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN, + /** Includes spare room for additional encapsulation header before ethernet + * headers (e.g. 802.11). + * Use this if you intend to pass the pbuf to functions like netif->linkoutput(). + * @see PBUF_LINK_ENCAPSULATION_HLEN + */ + PBUF_RAW_TX = PBUF_LINK_ENCAPSULATION_HLEN, + /** Use this for input packets in a netif driver when calling netif->input() + * in the most common case - ethernet-layer netif driver. */ + PBUF_RAW = 0 +} pbuf_layer; + + +/* Base flags for pbuf_type definitions: */ + +/** Indicates that the payload directly follows the struct pbuf. + * This makes @ref pbuf_header work in both directions. */ +#define PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS 0x80 +/** Indicates the data stored in this pbuf can change. If this pbuf needs + * to be queued, it must be copied/duplicated. */ +#define PBUF_TYPE_FLAG_DATA_VOLATILE 0x40 +/** 4 bits are reserved for 16 allocation sources (e.g. heap, pool1, pool2, etc) + * Internally, we use: 0=heap, 1=MEMP_PBUF, 2=MEMP_PBUF_POOL -> 13 types free*/ +#define PBUF_TYPE_ALLOC_SRC_MASK 0x0F +/** Indicates this pbuf is used for RX (if not set, indicates use for TX). + * This information can be used to keep some spare RX buffers e.g. for + * receiving TCP ACKs to unblock a connection) */ +#define PBUF_ALLOC_FLAG_RX 0x0100 +/** Indicates the application needs the pbuf payload to be in one piece */ +#define PBUF_ALLOC_FLAG_DATA_CONTIGUOUS 0x0200 + +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP 0x00 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF 0x01 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL 0x02 +/** First pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MIN 0x03 +/** Last pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MAX PBUF_TYPE_ALLOC_SRC_MASK + +/** + * @ingroup pbuf + * Enumeration of pbuf types + */ +typedef enum { + /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload + are allocated in one piece of contiguous memory (so the first payload byte + can be calculated from struct pbuf). + pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might + change in future versions). + This should be used for all OUTGOING packets (TX).*/ + PBUF_RAM = (PBUF_ALLOC_FLAG_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP), + /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in + totally different memory areas. Since it points to ROM, payload does not + have to be copied when queued for transmission. */ + PBUF_ROM = PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF, + /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change + so it has to be duplicated when queued before transmitting, depending on + who has a 'ref' to it. */ + PBUF_REF = (PBUF_TYPE_FLAG_DATA_VOLATILE | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF), + /** pbuf payload refers to RAM. This one comes from a pool and should be used + for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct + pbuf and its payload are allocated in one piece of contiguous memory (so + the first payload byte can be calculated from struct pbuf). + Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, + you are unable to receive TCP acks! */ + PBUF_POOL = (PBUF_ALLOC_FLAG_RX | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) +} pbuf_type; + + +/** indicates this packet's data should be immediately passed to the application */ +#define PBUF_FLAG_PUSH 0x01U +/** indicates this is a custom pbuf: pbuf_free calls pbuf_custom->custom_free_function() + when the last reference is released (plus custom PBUF_RAM cannot be trimmed) */ +#define PBUF_FLAG_IS_CUSTOM 0x02U +/** indicates this pbuf is UDP multicast to be looped back */ +#define PBUF_FLAG_MCASTLOOP 0x04U +/** indicates this pbuf was received as link-level broadcast */ +#define PBUF_FLAG_LLBCAST 0x08U +/** indicates this pbuf was received as link-level multicast */ +#define PBUF_FLAG_LLMCAST 0x10U +/** indicates this pbuf includes a TCP FIN flag */ +#define PBUF_FLAG_TCP_FIN 0x20U + +/** Main packet buffer struct */ +struct pbuf { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + void *payload; + + /** + * total length of this buffer and all next buffers in chain + * belonging to the same packet. + * + * For non-queue packet chains this is the invariant: + * p->tot_len == p->len + (p->next? p->next->tot_len: 0) + */ + u16_t tot_len; + + /** length of this buffer */ + u16_t len; + + /** a bit field indicating pbuf type and allocation sources + (see PBUF_TYPE_FLAG_*, PBUF_ALLOC_FLAG_* and PBUF_TYPE_ALLOC_SRC_MASK) + */ + u8_t type_internal; + + /** misc flags */ + u8_t flags; + + /** + * the reference count always equals the number of pointers + * that refer to this pbuf. This can be pointers from an application, + * the stack itself, or pbuf->next pointers from a chain. + */ + LWIP_PBUF_REF_T ref; + + /** For incoming packets, this contains the input netif's index */ + u8_t if_idx; +}; + + +/** Helper struct for const-correctness only. + * The only meaning of this one is to provide a const payload pointer + * for PBUF_ROM type. + */ +struct pbuf_rom { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + const void *payload; +}; + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** Prototype for a function to free a custom pbuf */ +typedef void (*pbuf_free_custom_fn)(struct pbuf *p); + +/** A custom pbuf: like a pbuf, but following a function pointer to free it. */ +struct pbuf_custom { + /** The actual pbuf */ + struct pbuf pbuf; + /** This function is called when pbuf_free deallocates this pbuf(_custom) */ + pbuf_free_custom_fn custom_free_function; +}; +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** Define this to 0 to prevent freeing ooseq pbufs when the PBUF_POOL is empty */ +#ifndef PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_FREE_OOSEQ 1 +#endif /* PBUF_POOL_FREE_OOSEQ */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ +extern volatile u8_t pbuf_free_ooseq_pending; +void pbuf_free_ooseq(void); +/** When not using sys_check_timeouts(), call PBUF_CHECK_FREE_OOSEQ() + at regular intervals from main level to check if ooseq pbufs need to be + freed! */ +#define PBUF_CHECK_FREE_OOSEQ() do { if(pbuf_free_ooseq_pending) { \ + /* pbuf_alloc() reported PBUF_POOL to be empty -> try to free some \ + ooseq queued pbufs now */ \ + pbuf_free_ooseq(); }}while(0) +#else /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ */ + /* Otherwise declare an empty PBUF_CHECK_FREE_OOSEQ */ + #define PBUF_CHECK_FREE_OOSEQ() +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ*/ + +/* Initializes the pbuf module. This call is empty for now, but may not be in future. */ +#define pbuf_init() + +struct pbuf *pbuf_alloc(pbuf_layer l, u16_t length, pbuf_type type); +struct pbuf *pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type); +#if LWIP_SUPPORT_CUSTOM_PBUF +struct pbuf *pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, + struct pbuf_custom *p, void *payload_mem, + u16_t payload_mem_len); +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ +void pbuf_realloc(struct pbuf *p, u16_t size); +#define pbuf_get_allocsrc(p) ((p)->type_internal & PBUF_TYPE_ALLOC_SRC_MASK) +#define pbuf_match_allocsrc(p, type) (pbuf_get_allocsrc(p) == ((type) & PBUF_TYPE_ALLOC_SRC_MASK)) +#define pbuf_match_type(p, type) pbuf_match_allocsrc(p, type) +u8_t pbuf_header(struct pbuf *p, s16_t header_size); +u8_t pbuf_header_force(struct pbuf *p, s16_t header_size); +u8_t pbuf_add_header(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_add_header_force(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_remove_header(struct pbuf *p, size_t header_size); +struct pbuf *pbuf_free_header(struct pbuf *q, u16_t size); +void pbuf_ref(struct pbuf *p); +u8_t pbuf_free(struct pbuf *p); +u16_t pbuf_clen(const struct pbuf *p); +void pbuf_cat(struct pbuf *head, struct pbuf *tail); +void pbuf_chain(struct pbuf *head, struct pbuf *tail); +struct pbuf *pbuf_dechain(struct pbuf *p); +err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from); +u16_t pbuf_copy_partial(const struct pbuf *p, void *dataptr, u16_t len, u16_t offset); +void *pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset); +err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len); +err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset); +struct pbuf *pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset); +struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer); +struct pbuf *pbuf_clone(pbuf_layer l, pbuf_type type, struct pbuf *p); +#if LWIP_CHECKSUM_ON_COPY +err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum); +#endif /* LWIP_CHECKSUM_ON_COPY */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest); +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +u8_t pbuf_get_at(const struct pbuf* p, u16_t offset); +int pbuf_try_get_at(const struct pbuf* p, u16_t offset); +void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data); +u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n); +u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset); +u16_t pbuf_strstr(const struct pbuf* p, const char* substr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PBUF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h new file mode 100644 index 000000000..2d3b2fdbc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h @@ -0,0 +1,146 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_PRIV_H +#define LWIP_HDR_ALTCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_alloc(void); +void altcp_free(struct altcp_pcb *conn); + +/* Function prototypes for application layers */ +typedef void (*altcp_set_poll_fn)(struct altcp_pcb *conn, u8_t interval); +typedef void (*altcp_recved_fn)(struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_bind_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +typedef err_t (*altcp_connect_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +typedef struct altcp_pcb *(*altcp_listen_fn)(struct altcp_pcb *conn, u8_t backlog, err_t *err); + +typedef void (*altcp_abort_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_close_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_shutdown_fn)(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +typedef err_t (*altcp_write_fn)(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +typedef err_t (*altcp_output_fn)(struct altcp_pcb *conn); + +typedef u16_t (*altcp_mss_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndbuf_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndqueuelen_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_disable_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_enable_fn)(struct altcp_pcb *conn); +typedef int (*altcp_nagle_disabled_fn)(struct altcp_pcb *conn); + +typedef void (*altcp_setprio_fn)(struct altcp_pcb *conn, u8_t prio); + +typedef void (*altcp_dealloc_fn)(struct altcp_pcb *conn); + +typedef err_t (*altcp_get_tcp_addrinfo_fn)(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +typedef ip_addr_t *(*altcp_get_ip_fn)(struct altcp_pcb *conn, int local); +typedef u16_t (*altcp_get_port_fn)(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +typedef enum tcp_state (*altcp_dbg_get_tcp_state_fn)(struct altcp_pcb *conn); +#endif + +struct altcp_functions { + altcp_set_poll_fn set_poll; + altcp_recved_fn recved; + altcp_bind_fn bind; + altcp_connect_fn connect; + altcp_listen_fn listen; + altcp_abort_fn abort; + altcp_close_fn close; + altcp_shutdown_fn shutdown; + altcp_write_fn write; + altcp_output_fn output; + altcp_mss_fn mss; + altcp_sndbuf_fn sndbuf; + altcp_sndqueuelen_fn sndqueuelen; + altcp_nagle_disable_fn nagle_disable; + altcp_nagle_enable_fn nagle_enable; + altcp_nagle_disabled_fn nagle_disabled; + altcp_setprio_fn setprio; + altcp_dealloc_fn dealloc; + altcp_get_tcp_addrinfo_fn addrinfo; + altcp_get_ip_fn getip; + altcp_get_port_fn getport; +#ifdef LWIP_DEBUG + altcp_dbg_get_tcp_state_fn dbg_get_tcp_state; +#endif +}; + +void altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval); +void altcp_default_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); +err_t altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_default_output(struct altcp_pcb *conn); +u16_t altcp_default_mss(struct altcp_pcb *conn); +u16_t altcp_default_sndbuf(struct altcp_pcb *conn); +u16_t altcp_default_sndqueuelen(struct altcp_pcb *conn); +void altcp_default_nagle_disable(struct altcp_pcb *conn); +void altcp_default_nagle_enable(struct altcp_pcb *conn); +int altcp_default_nagle_disabled(struct altcp_pcb *conn); +void altcp_default_setprio(struct altcp_pcb *conn, u8_t prio); +void altcp_default_dealloc(struct altcp_pcb *conn); +err_t altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_default_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_default_get_port(struct altcp_pcb *conn, int local); +#ifdef LWIP_DEBUG +enum tcp_state altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h new file mode 100644 index 000000000..9e8ffc9ea --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h @@ -0,0 +1,272 @@ +/** + * @file + * netconn API lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_MSG_H +#define LWIP_HDR_API_MSG_H + +#include "lwip/opt.h" + +#include "lwip/arch.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#if LWIP_MPU_COMPATIBLE +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_MSG_M_DEF_SEM(m) *m +#else +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif +#else /* LWIP_MPU_COMPATIBLE */ +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif /* LWIP_MPU_COMPATIBLE */ + +/* For the netconn API, these values are use as a bitmask! */ +#define NETCONN_SHUT_RD 1 +#define NETCONN_SHUT_WR 2 +#define NETCONN_SHUT_RDWR (NETCONN_SHUT_RD | NETCONN_SHUT_WR) + +/* IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ +/** This struct includes everything that is necessary to execute a function + for a netconn in another thread context (mainly used to process netconns + in the tcpip_thread context to be thread safe). */ +struct api_msg { + /** The netconn which to process - always needed: it includes the semaphore + which is used to block the application thread until the function finished. */ + struct netconn *conn; + /** The return value of the function executed in tcpip_thread. */ + err_t err; + /** Depending on the executed function, one of these union members is used */ + union { + /** used for lwip_netconn_do_send */ + struct netbuf *b; + /** used for lwip_netconn_do_newconn */ + struct { + u8_t proto; + } n; + /** used for lwip_netconn_do_bind and lwip_netconn_do_connect */ + struct { + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; + u8_t if_idx; + } bc; + /** used for lwip_netconn_do_getaddr */ + struct { + ip_addr_t API_MSG_M_DEF(ipaddr); + u16_t API_MSG_M_DEF(port); + u8_t local; + } ad; + /** used for lwip_netconn_do_write */ + struct { + /** current vector to write */ + const struct netvector *vector; + /** number of unwritten vectors */ + u16_t vector_cnt; + /** offset into current vector */ + size_t vector_off; + /** total length across vectors */ + size_t len; + /** offset into total length/output of bytes written when err == ERR_OK */ + size_t offset; + u8_t apiflags; +#if LWIP_SO_SNDTIMEO + u32_t time_started; +#endif /* LWIP_SO_SNDTIMEO */ + } w; + /** used for lwip_netconn_do_recv */ + struct { + size_t len; + } r; +#if LWIP_TCP + /** used for lwip_netconn_do_close (/shutdown) */ + struct { + u8_t shut; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + u32_t time_started; +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + u8_t polls_left; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + } sd; +#endif /* LWIP_TCP */ +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) + /** used for lwip_netconn_do_join_leave_group */ + struct { + API_MSG_M_DEF_C(ip_addr_t, multiaddr); + API_MSG_M_DEF_C(ip_addr_t, netif_addr); + u8_t if_idx; + enum netconn_igmp join_or_leave; + } jl; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if TCP_LISTEN_BACKLOG + struct { + u8_t backlog; + } lb; +#endif /* TCP_LISTEN_BACKLOG */ + } msg; +#if LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t* op_completed_sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define LWIP_API_MSG_SEM(msg) ((msg)->op_completed_sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define LWIP_API_MSG_SEM(msg) (&(msg)->conn->op_completed) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + +#if LWIP_DNS +/** As lwip_netconn_do_gethostbyname requires more arguments but doesn't require a netconn, + it has its own struct (to avoid struct api_msg getting bigger than necessary). + lwip_netconn_do_gethostbyname must be called using tcpip_callback instead of tcpip_apimsg + (see netconn_gethostbyname). */ +struct dns_api_msg { + /** Hostname to query or dotted IP address string */ +#if LWIP_MPU_COMPATIBLE + char name[DNS_MAX_NAME_LENGTH]; +#else /* LWIP_MPU_COMPATIBLE */ + const char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + /** The resolved address is stored here */ + ip_addr_t API_MSG_M_DEF(addr); +#if LWIP_IPV4 && LWIP_IPV6 + /** Type of resolve call */ + u8_t dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /** This semaphore is posted when the name is resolved, the application thread + should wait on it. */ + sys_sem_t API_MSG_M_DEF_SEM(sem); + /** Errors are given back here */ + err_t API_MSG_M_DEF(err); +}; +#endif /* LWIP_DNS */ + +#if LWIP_NETCONN_FULLDUPLEX +int lwip_netconn_is_deallocated_msg(void *msg); +#endif +int lwip_netconn_is_err_msg(void *msg, err_t *err); +void lwip_netconn_do_newconn (void *m); +void lwip_netconn_do_delconn (void *m); +void lwip_netconn_do_bind (void *m); +void lwip_netconn_do_bind_if (void *m); +void lwip_netconn_do_connect (void *m); +void lwip_netconn_do_disconnect (void *m); +void lwip_netconn_do_listen (void *m); +void lwip_netconn_do_send (void *m); +void lwip_netconn_do_recv (void *m); +#if TCP_LISTEN_BACKLOG +void lwip_netconn_do_accepted (void *m); +#endif /* TCP_LISTEN_BACKLOG */ +void lwip_netconn_do_write (void *m); +void lwip_netconn_do_getaddr (void *m); +void lwip_netconn_do_close (void *m); +void lwip_netconn_do_shutdown (void *m); +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +void lwip_netconn_do_join_leave_group(void *m); +void lwip_netconn_do_join_leave_group_netif(void *m); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +void lwip_netconn_do_gethostbyname(void *arg); +#endif /* LWIP_DNS */ + +struct netconn* netconn_alloc(enum netconn_type t, netconn_callback callback); +void netconn_free(struct netconn *conn); + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +/* netifapi related lwIP internal definitions */ + +#if LWIP_MPU_COMPATIBLE +#define NETIFAPI_IPADDR_DEF(type, m) type m +#else /* LWIP_MPU_COMPATIBLE */ +#define NETIFAPI_IPADDR_DEF(type, m) const type * m +#endif /* LWIP_MPU_COMPATIBLE */ + +typedef void (*netifapi_void_fn)(struct netif *netif); +typedef err_t (*netifapi_errt_fn)(struct netif *netif); + +struct netifapi_msg { + struct tcpip_api_call_data call; + struct netif *netif; + union { + struct { +#if LWIP_IPV4 + NETIFAPI_IPADDR_DEF(ip4_addr_t, ipaddr); + NETIFAPI_IPADDR_DEF(ip4_addr_t, netmask); + NETIFAPI_IPADDR_DEF(ip4_addr_t, gw); +#endif /* LWIP_IPV4 */ + void *state; + netif_init_fn init; + netif_input_fn input; + } add; + struct { + netifapi_void_fn voidfunc; + netifapi_errt_fn errtfunc; + } common; + struct { +#if LWIP_MPU_COMPATIBLE + char name[NETIF_NAMESIZE]; +#else /* LWIP_MPU_COMPATIBLE */ + char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + u8_t index; + } ifs; + } msg; +}; + +#endif /* LWIP_NETIF_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_API_MSG_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h new file mode 100644 index 000000000..8630d7541 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP internal memory implementations (do not use in application code) + */ + +/* + * Copyright (c) 2018 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_MEM_PRIV_H +#define LWIP_HDR_MEM_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/* if MEM_OVERFLOW_CHECK or MEMP_OVERFLOW_CHECK is turned on, we reserve some + * bytes at the beginning and at the end of each element, initialize them as + * 0xcd and check them later. + * If MEM(P)_OVERFLOW_CHECK is >= 2, on every call to mem(p)_malloc or mem(p)_free, + * every single element in each pool/heap is checked! + * This is VERY SLOW but also very helpful. + * MEM_SANITY_REGION_BEFORE and MEM_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEM_SANITY_REGION_BEFORE +#define MEM_SANITY_REGION_BEFORE 16 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#if MEM_SANITY_REGION_BEFORE > 0 +#define MEM_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_BEFORE) +#else +#define MEM_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#ifndef MEM_SANITY_REGION_AFTER +#define MEM_SANITY_REGION_AFTER 16 +#endif /* MEM_SANITY_REGION_AFTER*/ +#if MEM_SANITY_REGION_AFTER > 0 +#define MEM_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_AFTER) +#else +#define MEM_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEM_SANITY_REGION_AFTER*/ + +void mem_overflow_init_raw(void *p, size_t size); +void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2); + +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h new file mode 100644 index 000000000..1f14cb162 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h @@ -0,0 +1,161 @@ +/** + * @file + * memory pools lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_PRIV_H +#define LWIP_HDR_MEMP_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" +#include "lwip/priv/mem_priv.h" + +#if MEMP_OVERFLOW_CHECK + + +/* MEMP_SIZE: save space for struct memp and for sanity check */ +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEM_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEM_SANITY_REGION_AFTER_ALIGNED) + +#else /* MEMP_OVERFLOW_CHECK */ + +/* No sanity checks + * We don't need to preserve the struct memp while not allocated, so we + * can save a little space and set MEMP_SIZE to 0. + */ +#define MEMP_SIZE 0 +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x)) + +#endif /* MEMP_OVERFLOW_CHECK */ + +#if !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK +struct memp { + struct memp *next; +#if MEMP_OVERFLOW_CHECK + const char *file; + int line; +#endif /* MEMP_OVERFLOW_CHECK */ +}; +#endif /* !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK */ + +#if MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS +/* Use a helper type to get the start and end of the user "memory pools" for mem_malloc */ +typedef enum { + /* Get the first (via: + MEMP_POOL_HELPER_START = ((u8_t) 1*MEMP_POOL_A + 0*MEMP_POOL_B + 0*MEMP_POOL_C + 0)*/ + MEMP_POOL_HELPER_FIRST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START 1 +#define LWIP_MALLOC_MEMPOOL(num, size) * MEMP_POOL_##size + 0 +#define LWIP_MALLOC_MEMPOOL_END +#include "lwip/priv/memp_std.h" + ) , + /* Get the last (via: + MEMP_POOL_HELPER_END = ((u8_t) 0 + MEMP_POOL_A*0 + MEMP_POOL_B*0 + MEMP_POOL_C*1) */ + MEMP_POOL_HELPER_LAST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL(num, size) 0 + MEMP_POOL_##size * +#define LWIP_MALLOC_MEMPOOL_END 1 +#include "lwip/priv/memp_std.h" + ) +} memp_pool_helper_t; + +/* The actual start and stop values are here (cast them over) + We use this helper type and these defines so we can avoid using const memp_t values */ +#define MEMP_POOL_FIRST ((memp_t) MEMP_POOL_HELPER_FIRST) +#define MEMP_POOL_LAST ((memp_t) MEMP_POOL_HELPER_LAST) +#endif /* MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS */ + +/** Memory pool descriptor */ +struct memp_desc { +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY + /** Textual description */ + const char *desc; +#endif /* LWIP_DEBUG || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY */ +#if MEMP_STATS + /** Statistics */ + struct stats_mem *stats; +#endif + + /** Element size */ + u16_t size; + +#if !MEMP_MEM_MALLOC + /** Number of elements */ + u16_t num; + + /** Base address */ + u8_t *base; + + /** First free element of each pool. Elements form a linked list. */ + struct memp **tab; +#endif /* MEMP_MEM_MALLOC */ +}; + +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY +#define DECLARE_LWIP_MEMPOOL_DESC(desc) (desc), +#else +#define DECLARE_LWIP_MEMPOOL_DESC(desc) +#endif + +#if MEMP_STATS +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) static struct stats_mem name; +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) &name, +#else +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) +#endif + +void memp_init_pool(const struct memp_desc *desc); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_pool_fn(const struct memp_desc* desc, const char* file, const int line); +#define memp_malloc_pool(d) memp_malloc_pool_fn((d), __FILE__, __LINE__) +#else +void *memp_malloc_pool(const struct memp_desc *desc); +#endif +void memp_free_pool(const struct memp_desc* desc, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h new file mode 100644 index 000000000..669ad4d7e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h @@ -0,0 +1,153 @@ +/** + * @file + * lwIP internal memory pools (do not use in application code) + * This file is deliberately included multiple times: once with empty + * definition of LWIP_MEMPOOL() to handle all includes and multiple times + * to build up various lists of mem pools. + */ + +/* + * SETUP: Make sure we define everything we will need. + * + * We have create three types of pools: + * 1) MEMPOOL - standard pools + * 2) MALLOC_MEMPOOL - to be used by mem_malloc in mem.c + * 3) PBUF_MEMPOOL - a mempool of pbuf's, so include space for the pbuf struct + * + * If the include'r doesn't require any special treatment of each of the types + * above, then will declare #2 & #3 to be just standard mempools. + */ +#ifndef LWIP_MALLOC_MEMPOOL +/* This treats "malloc pools" just like any other pool. + The pools are a little bigger to provide 'size' as the amount of user data. */ +#define LWIP_MALLOC_MEMPOOL(num, size) LWIP_MEMPOOL(POOL_##size, num, (size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))), "MALLOC_"#size) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL_END +#endif /* LWIP_MALLOC_MEMPOOL */ + +#ifndef LWIP_PBUF_MEMPOOL +/* This treats "pbuf pools" just like any other pool. + * Allocates buffers for a pbuf struct AND a payload size */ +#define LWIP_PBUF_MEMPOOL(name, num, payload, desc) LWIP_MEMPOOL(name, num, (LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) + LWIP_MEM_ALIGN_SIZE(payload)), desc) +#endif /* LWIP_PBUF_MEMPOOL */ + + +/* + * A list of internal pools used by LWIP. + * + * LWIP_MEMPOOL(pool_name, number_elements, element_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + */ +#if LWIP_RAW +LWIP_MEMPOOL(RAW_PCB, MEMP_NUM_RAW_PCB, sizeof(struct raw_pcb), "RAW_PCB") +#endif /* LWIP_RAW */ + +#if LWIP_UDP +LWIP_MEMPOOL(UDP_PCB, MEMP_NUM_UDP_PCB, sizeof(struct udp_pcb), "UDP_PCB") +#endif /* LWIP_UDP */ + +#if LWIP_TCP +LWIP_MEMPOOL(TCP_PCB, MEMP_NUM_TCP_PCB, sizeof(struct tcp_pcb), "TCP_PCB") +LWIP_MEMPOOL(TCP_PCB_LISTEN, MEMP_NUM_TCP_PCB_LISTEN, sizeof(struct tcp_pcb_listen), "TCP_PCB_LISTEN") +LWIP_MEMPOOL(TCP_SEG, MEMP_NUM_TCP_SEG, sizeof(struct tcp_seg), "TCP_SEG") +#endif /* LWIP_TCP */ + +#if LWIP_ALTCP && LWIP_TCP +LWIP_MEMPOOL(ALTCP_PCB, MEMP_NUM_ALTCP_PCB, sizeof(struct altcp_pcb), "ALTCP_PCB") +#endif /* LWIP_ALTCP && LWIP_TCP */ + +#if LWIP_IPV4 && IP_REASSEMBLY +LWIP_MEMPOOL(REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip_reassdata), "REASSDATA") +#endif /* LWIP_IPV4 && IP_REASSEMBLY */ +#if (IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG) +LWIP_MEMPOOL(FRAG_PBUF, MEMP_NUM_FRAG_PBUF, sizeof(struct pbuf_custom_ref),"FRAG_PBUF") +#endif /* IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF || (LWIP_IPV6 && LWIP_IPV6_FRAG) */ + +#if LWIP_NETCONN || LWIP_SOCKET +LWIP_MEMPOOL(NETBUF, MEMP_NUM_NETBUF, sizeof(struct netbuf), "NETBUF") +LWIP_MEMPOOL(NETCONN, MEMP_NUM_NETCONN, sizeof(struct netconn), "NETCONN") +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if NO_SYS==0 +LWIP_MEMPOOL(TCPIP_MSG_API, MEMP_NUM_TCPIP_MSG_API, sizeof(struct tcpip_msg), "TCPIP_MSG_API") +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL(API_MSG, MEMP_NUM_API_MSG, sizeof(struct api_msg), "API_MSG") +#if LWIP_DNS +LWIP_MEMPOOL(DNS_API_MSG, MEMP_NUM_DNS_API_MSG, sizeof(struct dns_api_msg), "DNS_API_MSG") +#endif +#if LWIP_SOCKET && !LWIP_TCPIP_CORE_LOCKING +LWIP_MEMPOOL(SOCKET_SETGETSOCKOPT_DATA, MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA, sizeof(struct lwip_setgetsockopt_data), "SOCKET_SETGETSOCKOPT_DATA") +#endif +#if LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) +LWIP_MEMPOOL(SELECT_CB, MEMP_NUM_SELECT_CB, sizeof(struct lwip_select_cb), "SELECT_CB") +#endif /* LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) */ +#if LWIP_NETIF_API +LWIP_MEMPOOL(NETIFAPI_MSG, MEMP_NUM_NETIFAPI_MSG, sizeof(struct netifapi_msg), "NETIFAPI_MSG") +#endif +#endif /* LWIP_MPU_COMPATIBLE */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT +LWIP_MEMPOOL(TCPIP_MSG_INPKT,MEMP_NUM_TCPIP_MSG_INPKT, sizeof(struct tcpip_msg), "TCPIP_MSG_INPKT") +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#endif /* NO_SYS==0 */ + +#if LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING +LWIP_MEMPOOL(ARP_QUEUE, MEMP_NUM_ARP_QUEUE, sizeof(struct etharp_q_entry), "ARP_QUEUE") +#endif /* LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING */ + +#if LWIP_IGMP +LWIP_MEMPOOL(IGMP_GROUP, MEMP_NUM_IGMP_GROUP, sizeof(struct igmp_group), "IGMP_GROUP") +#endif /* LWIP_IGMP */ + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM +LWIP_MEMPOOL(SYS_TIMEOUT, MEMP_NUM_SYS_TIMEOUT, sizeof(struct sys_timeo), "SYS_TIMEOUT") +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ + +#if LWIP_DNS && LWIP_SOCKET +LWIP_MEMPOOL(NETDB, MEMP_NUM_NETDB, NETDB_ELEM_SIZE, "NETDB") +#endif /* LWIP_DNS && LWIP_SOCKET */ +#if LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC +LWIP_MEMPOOL(LOCALHOSTLIST, MEMP_NUM_LOCALHOSTLIST, LOCALHOSTLIST_ELEM_SIZE, "LOCALHOSTLIST") +#endif /* LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +#if LWIP_IPV6 && LWIP_ND6_QUEUEING +LWIP_MEMPOOL(ND6_QUEUE, MEMP_NUM_ND6_QUEUE, sizeof(struct nd6_q_entry), "ND6_QUEUE") +#endif /* LWIP_IPV6 && LWIP_ND6_QUEUEING */ + +#if LWIP_IPV6 && LWIP_IPV6_REASS +LWIP_MEMPOOL(IP6_REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip6_reassdata), "IP6_REASSDATA") +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +LWIP_MEMPOOL(MLD6_GROUP, MEMP_NUM_MLD6_GROUP, sizeof(struct mld_group), "MLD6_GROUP") +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + +/* + * A list of pools of pbuf's used by LWIP. + * + * LWIP_PBUF_MEMPOOL(pool_name, number_elements, pbuf_payload_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + * This allocates enough space for the pbuf struct and a payload. + * (Example: pbuf_payload_size=0 allocates only size for the struct) + */ +LWIP_MEMPOOL(PBUF, MEMP_NUM_PBUF, sizeof(struct pbuf), "PBUF_REF/ROM") +LWIP_PBUF_MEMPOOL(PBUF_POOL, PBUF_POOL_SIZE, PBUF_POOL_BUFSIZE, "PBUF_POOL") + + +/* + * Allow for user-defined pools; this must be explicitly set in lwipopts.h + * since the default is to NOT look for lwippools.h + */ +#if MEMP_USE_CUSTOM_POOLS +#include "lwippools.h" +#endif /* MEMP_USE_CUSTOM_POOLS */ + +/* + * REQUIRED CLEANUP: Clear up so we don't get "multiply defined" error later + * (#undef is ignored for something that is not defined) + */ +#undef LWIP_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL_START +#undef LWIP_MALLOC_MEMPOOL_END +#undef LWIP_PBUF_MEMPOOL diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h new file mode 100644 index 000000000..cc3007d97 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h @@ -0,0 +1,142 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_PRIV_H +#define LWIP_HDR_ND6_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ND6_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct nd6_q_entry { + struct nd6_q_entry *next; + struct pbuf *p; +}; +#endif /* LWIP_ND6_QUEUEING */ + +/** Struct for tables. */ +struct nd6_neighbor_cache_entry { + ip6_addr_t next_hop_address; + struct netif *netif; + u8_t lladdr[NETIF_MAX_HWADDR_LEN]; + /*u32_t pmtu;*/ +#if LWIP_ND6_QUEUEING + /** Pointer to queue of pending outgoing packets on this entry. */ + struct nd6_q_entry *q; +#else /* LWIP_ND6_QUEUEING */ + /** Pointer to a single pending outgoing packet on this entry. */ + struct pbuf *q; +#endif /* LWIP_ND6_QUEUEING */ + u8_t state; + u8_t isrouter; + union { + u32_t reachable_time; /* in seconds */ + u32_t delay_time; /* ticks (ND6_TMR_INTERVAL) */ + u32_t probes_sent; + u32_t stale_time; /* ticks (ND6_TMR_INTERVAL) */ + } counter; +}; + +struct nd6_destination_cache_entry { + ip6_addr_t destination_addr; + ip6_addr_t next_hop_addr; + u16_t pmtu; + u32_t age; +}; + +struct nd6_prefix_list_entry { + ip6_addr_t prefix; + struct netif *netif; + u32_t invalidation_timer; /* in seconds */ +}; + +struct nd6_router_list_entry { + struct nd6_neighbor_cache_entry *neighbor_entry; + u32_t invalidation_timer; /* in seconds */ + u8_t flags; +}; + +enum nd6_neighbor_cache_entry_state { + ND6_NO_ENTRY = 0, + ND6_INCOMPLETE, + ND6_REACHABLE, + ND6_STALE, + ND6_DELAY, + ND6_PROBE +}; + +#define ND6_HOPLIM 255 /* maximum hop limit, required in all ND packets */ + +#define ND6_2HRS 7200 /* two hours, expressed in number of seconds */ + +/* Router tables. */ +/* @todo make these static? and entries accessible through API? */ +extern struct nd6_neighbor_cache_entry neighbor_cache[]; +extern struct nd6_destination_cache_entry destination_cache[]; +extern struct nd6_prefix_list_entry prefix_list[]; +extern struct nd6_router_list_entry default_router_list[]; + +/* Default values, can be updated by a RA message. */ +extern u32_t reachable_time; +extern u32_t retrans_timer; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h new file mode 100644 index 000000000..d4561d4f3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h @@ -0,0 +1,69 @@ +/** + * @file + * raw API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_PRIV_H +#define LWIP_HDR_RAW_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/raw.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** return codes for raw_input */ +typedef enum raw_input_state +{ + RAW_INPUT_NONE = 0, /* pbuf did not match any pcbs */ + RAW_INPUT_EATEN, /* pbuf handed off and delivered to pcb */ + RAW_INPUT_DELIVERED /* pbuf only delivered to pcb (pbuf can still be referenced) */ +} raw_input_state_t; + +/* The following functions are the lower layer interface to RAW. */ +raw_input_state_t raw_input(struct pbuf *p, struct netif *inp); + +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h new file mode 100644 index 000000000..d8f9904dc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h @@ -0,0 +1,175 @@ +/** + * @file + * Sockets API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_SOCKETS_PRIV_H +#define LWIP_HDR_SOCKETS_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/sockets.h" +#include "lwip/sys.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NUM_SOCKETS MEMP_NUM_NETCONN + +/** This is overridable for the rare case where more than 255 threads + * select on the same socket... + */ +#ifndef SELWAIT_T +#define SELWAIT_T u8_t +#endif + +union lwip_sock_lastdata { + struct netbuf *netbuf; + struct pbuf *pbuf; +}; + +/** Contains all internal pointers and states used for a socket */ +struct lwip_sock { + /** sockets currently are built on netconns, each socket has one netconn */ + struct netconn *conn; + /** data that was left from the previous read */ + union lwip_sock_lastdata lastdata; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + /** number of times data was received, set by event_callback(), + tested by the receive and select functions */ + s16_t rcvevent; + /** number of times data was ACKed (free send buffer), set by event_callback(), + tested by select */ + u16_t sendevent; + /** error happened for this socket, set by event_callback(), tested by select */ + u16_t errevent; + /** counter of how many threads are waiting for this socket using select */ + SELWAIT_T select_waiting; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ +#if LWIP_NETCONN_FULLDUPLEX + /* counter of how many threads are using a struct lwip_sock (not the 'int') */ + u8_t fd_used; + /* status of pending close/delete actions */ + u8_t fd_free_pending; +#define LWIP_SOCK_FD_FREE_TCP 1 +#define LWIP_SOCK_FD_FREE_FREE 2 +#endif +}; + +#ifndef set_errno +#define set_errno(err) do { if (err) { errno = (err); } } while(0) +#endif + +#if !LWIP_TCPIP_CORE_LOCKING +/** Maximum optlen used by setsockopt/getsockopt */ +#define LWIP_SETGETSOCKOPT_MAXOPTLEN LWIP_MAX(16, sizeof(struct ifreq)) + +/** This struct is used to pass data to the set/getsockopt_internal + * functions running in tcpip_thread context (only a void* is allowed) */ +struct lwip_setgetsockopt_data { + /** socket index for which to change options */ + int s; + /** level of the option to process */ + int level; + /** name of the option to process */ + int optname; + /** set: value to set the option to + * get: value of the option is stored here */ +#if LWIP_MPU_COMPATIBLE + u8_t optval[LWIP_SETGETSOCKOPT_MAXOPTLEN]; +#else + union { + void *p; + const void *pc; + } optval; +#endif + /** size of *optval */ + socklen_t optlen; + /** if an error occurs, it is temporarily stored here */ + int err; + /** semaphore to wake up the calling task */ + void* completed_sem; +}; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#ifdef __cplusplus +} +#endif + +struct lwip_sock* lwip_socket_dbg_get_socket(int fd); + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + +#if LWIP_NETCONN_SEM_PER_THREAD +#define SELECT_SEM_T sys_sem_t* +#define SELECT_SEM_PTR(sem) (sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define SELECT_SEM_T sys_sem_t +#define SELECT_SEM_PTR(sem) (&(sem)) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +/** Description for a task waiting in select */ +struct lwip_select_cb { + /** Pointer to the next waiting task */ + struct lwip_select_cb *next; + /** Pointer to the previous waiting task */ + struct lwip_select_cb *prev; +#if LWIP_SOCKET_SELECT + /** readset passed to select */ + fd_set *readset; + /** writeset passed to select */ + fd_set *writeset; + /** unimplemented: exceptset passed to select */ + fd_set *exceptset; +#endif /* LWIP_SOCKET_SELECT */ +#if LWIP_SOCKET_POLL + /** fds passed to poll; NULL if select */ + struct pollfd *poll_fds; + /** nfds passed to poll; 0 if select */ + nfds_t poll_nfds; +#endif /* LWIP_SOCKET_POLL */ + /** don't signal the same semaphore twice: set to 1 when signalled */ + int sem_signalled; + /** semaphore to wake up a task waiting for select */ + SELECT_SEM_T sem; +}; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h new file mode 100644 index 000000000..72f9126d4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h @@ -0,0 +1,523 @@ +/** + * @file + * TCP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_PRIV_H +#define LWIP_HDR_TCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcp.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/tcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Functions for interfacing with TCP: */ + +/* Lower layer interface to TCP: */ +void tcp_init (void); /* Initialize this module. */ +void tcp_tmr (void); /* Must be called every + TCP_TMR_INTERVAL + ms. (Typically 250 ms). */ +/* It is also possible to call these two functions at the right + intervals (instead of calling tcp_tmr()). */ +void tcp_slowtmr (void); +void tcp_fasttmr (void); + +/* Call this from a netif driver (watch out for threading issues!) that has + returned a memory error on transmit and now has free buffers to send more. + This iterates all active pcbs that had an error and tries to call + tcp_output, so use this with care as it might slow down the system. */ +void tcp_txnow (void); + +/* Only used by IP to pass a TCP segment to TCP: */ +void tcp_input (struct pbuf *p, struct netif *inp); +/* Used within the TCP code only: */ +struct tcp_pcb * tcp_alloc (u8_t prio); +void tcp_free (struct tcp_pcb *pcb); +void tcp_abandon (struct tcp_pcb *pcb, int reset); +err_t tcp_send_empty_ack(struct tcp_pcb *pcb); +err_t tcp_rexmit (struct tcp_pcb *pcb); +err_t tcp_rexmit_rto_prepare(struct tcp_pcb *pcb); +void tcp_rexmit_rto_commit(struct tcp_pcb *pcb); +void tcp_rexmit_rto (struct tcp_pcb *pcb); +void tcp_rexmit_fast (struct tcp_pcb *pcb); +u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb); +err_t tcp_process_refused_data(struct tcp_pcb *pcb); + +/** + * This is the Nagle algorithm: try to combine user data to send as few TCP + * segments as possible. Only send if + * - no previously transmitted data on the connection remains unacknowledged or + * - the TF_NODELAY flag is set (nagle algorithm turned off for this pcb) or + * - the only unsent segment is at least pcb->mss bytes long (or there is more + * than one unsent segment - with lwIP, this can happen although unsent->len < mss) + * - or if we are in fast-retransmit (TF_INFR) + */ +#define tcp_do_output_nagle(tpcb) ((((tpcb)->unacked == NULL) || \ + ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ + (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ + ((tpcb)->unsent->len >= (tpcb)->mss))) || \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ) ? 1 : 0) +#define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) + + +#define TCP_SEQ_LT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) < 0) +#define TCP_SEQ_LEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) <= 0) +#define TCP_SEQ_GT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) > 0) +#define TCP_SEQ_GEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) >= 0) +/* is b<=a<=c? */ +#if 0 /* see bug #10548 */ +#define TCP_SEQ_BETWEEN(a,b,c) ((c)-(b) >= (a)-(b)) +#endif +#define TCP_SEQ_BETWEEN(a,b,c) (TCP_SEQ_GEQ(a,b) && TCP_SEQ_LEQ(a,c)) + +#ifndef TCP_TMR_INTERVAL +#define TCP_TMR_INTERVAL 250 /* The TCP timer interval in milliseconds. */ +#endif /* TCP_TMR_INTERVAL */ + +#ifndef TCP_FAST_INTERVAL +#define TCP_FAST_INTERVAL TCP_TMR_INTERVAL /* the fine grained timeout in milliseconds */ +#endif /* TCP_FAST_INTERVAL */ + +#ifndef TCP_SLOW_INTERVAL +#define TCP_SLOW_INTERVAL (2*TCP_TMR_INTERVAL) /* the coarse grained timeout in milliseconds */ +#endif /* TCP_SLOW_INTERVAL */ + +#define TCP_FIN_WAIT_TIMEOUT 20000 /* milliseconds */ +#define TCP_SYN_RCVD_TIMEOUT 20000 /* milliseconds */ + +#define TCP_OOSEQ_TIMEOUT 6U /* x RTO */ + +#ifndef TCP_MSL +#define TCP_MSL 60000UL /* The maximum segment lifetime in milliseconds */ +#endif + +/* Keepalive values, compliant with RFC 1122. Don't change this unless you know what you're doing */ +#ifndef TCP_KEEPIDLE_DEFAULT +#define TCP_KEEPIDLE_DEFAULT 7200000UL /* Default KEEPALIVE timer in milliseconds */ +#endif + +#ifndef TCP_KEEPINTVL_DEFAULT +#define TCP_KEEPINTVL_DEFAULT 75000UL /* Default Time between KEEPALIVE probes in milliseconds */ +#endif + +#ifndef TCP_KEEPCNT_DEFAULT +#define TCP_KEEPCNT_DEFAULT 9U /* Default Counter for KEEPALIVE probes */ +#endif + +#define TCP_MAXIDLE TCP_KEEPCNT_DEFAULT * TCP_KEEPINTVL_DEFAULT /* Maximum KEEPALIVE probe time */ + +#define TCP_TCPLEN(seg) ((seg)->len + (((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0) ? 1U : 0U)) + +/** Flags used on input processing, not on pcb->flags +*/ +#define TF_RESET (u8_t)0x08U /* Connection was reset. */ +#define TF_CLOSED (u8_t)0x10U /* Connection was successfully closed. */ +#define TF_GOT_FIN (u8_t)0x20U /* Connection was closed by the remote end. */ + + +#if LWIP_EVENT_API + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) ret = lwip_tcp_event(arg, (pcb),\ + LWIP_EVENT_ACCEPT, NULL, 0, err) +#define TCP_EVENT_SENT(pcb,space,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_SENT, NULL, space, ERR_OK) +#define TCP_EVENT_RECV(pcb,p,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, (p), 0, (err)) +#define TCP_EVENT_CLOSED(pcb,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, NULL, 0, ERR_OK) +#define TCP_EVENT_CONNECTED(pcb,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_CONNECTED, NULL, 0, (err)) +#define TCP_EVENT_POLL(pcb,ret) do { if ((pcb)->state != SYN_RCVD) { \ + ret = lwip_tcp_event((pcb)->callback_arg, (pcb), LWIP_EVENT_POLL, NULL, 0, ERR_OK); \ + } else { \ + ret = ERR_ARG; } } while(0) +/* For event API, last state SYN_RCVD must be excluded here: the application + has not seen this pcb, yet! */ +#define TCP_EVENT_ERR(last_state,errf,arg,err) do { if (last_state != SYN_RCVD) { \ + lwip_tcp_event((arg), NULL, LWIP_EVENT_ERR, NULL, 0, (err)); } } while(0) + +#else /* LWIP_EVENT_API */ + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) \ + do { \ + if((lpcb)->accept != NULL) \ + (ret) = (lpcb)->accept((arg),(pcb),(err)); \ + else (ret) = ERR_ARG; \ + } while (0) + +#define TCP_EVENT_SENT(pcb,space,ret) \ + do { \ + if((pcb)->sent != NULL) \ + (ret) = (pcb)->sent((pcb)->callback_arg,(pcb),(space)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_RECV(pcb,p,err,ret) \ + do { \ + if((pcb)->recv != NULL) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),(p),(err));\ + } else { \ + (ret) = tcp_recv_null(NULL, (pcb), (p), (err)); \ + } \ + } while (0) + +#define TCP_EVENT_CLOSED(pcb,ret) \ + do { \ + if(((pcb)->recv != NULL)) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),NULL,ERR_OK);\ + } else { \ + (ret) = ERR_OK; \ + } \ + } while (0) + +#define TCP_EVENT_CONNECTED(pcb,err,ret) \ + do { \ + if((pcb)->connected != NULL) \ + (ret) = (pcb)->connected((pcb)->callback_arg,(pcb),(err)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_POLL(pcb,ret) \ + do { \ + if((pcb)->poll != NULL) \ + (ret) = (pcb)->poll((pcb)->callback_arg,(pcb)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_ERR(last_state,errf,arg,err) \ + do { \ + LWIP_UNUSED_ARG(last_state); \ + if((errf) != NULL) \ + (errf)((arg),(err)); \ + } while (0) + +#endif /* LWIP_EVENT_API */ + +/** Enabled extra-check for TCP_OVERSIZE if LWIP_DEBUG is enabled */ +#if TCP_OVERSIZE && defined(LWIP_DEBUG) +#define TCP_OVERSIZE_DBGCHECK 1 +#else +#define TCP_OVERSIZE_DBGCHECK 0 +#endif + +/** Don't generate checksum on copy if CHECKSUM_GEN_TCP is disabled */ +#define TCP_CHECKSUM_ON_COPY (LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_TCP) + +/* This structure represents a TCP segment on the unsent, unacked and ooseq queues */ +struct tcp_seg { + struct tcp_seg *next; /* used when putting segments on a queue */ + struct pbuf *p; /* buffer containing data + TCP header */ + u16_t len; /* the TCP length of this segment */ +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_left; /* Extra bytes available at the end of the last + pbuf in unsent (used for asserting vs. + tcp_pcb.unsent_oversize only) */ +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + u16_t chksum; + u8_t chksum_swapped; +#endif /* TCP_CHECKSUM_ON_COPY */ + u8_t flags; +#define TF_SEG_OPTS_MSS (u8_t)0x01U /* Include MSS option (only used in SYN segments) */ +#define TF_SEG_OPTS_TS (u8_t)0x02U /* Include timestamp option. */ +#define TF_SEG_DATA_CHECKSUMMED (u8_t)0x04U /* ALL data (not the header) is + checksummed into 'chksum' */ +#define TF_SEG_OPTS_WND_SCALE (u8_t)0x08U /* Include WND SCALE option (only used in SYN segments) */ +#define TF_SEG_OPTS_SACK_PERM (u8_t)0x10U /* Include SACK Permitted option (only used in SYN segments) */ + struct tcp_hdr *tcphdr; /* the TCP header */ +}; + +#define LWIP_TCP_OPT_EOL 0 +#define LWIP_TCP_OPT_NOP 1 +#define LWIP_TCP_OPT_MSS 2 +#define LWIP_TCP_OPT_WS 3 +#define LWIP_TCP_OPT_SACK_PERM 4 +#define LWIP_TCP_OPT_TS 8 + +#define LWIP_TCP_OPT_LEN_MSS 4 +#if LWIP_TCP_TIMESTAMPS +#define LWIP_TCP_OPT_LEN_TS 10 +#define LWIP_TCP_OPT_LEN_TS_OUT 12 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_TS_OUT 0 +#endif +#if LWIP_WND_SCALE +#define LWIP_TCP_OPT_LEN_WS 3 +#define LWIP_TCP_OPT_LEN_WS_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_WS_OUT 0 +#endif + +#if LWIP_TCP_SACK_OUT +#define LWIP_TCP_OPT_LEN_SACK_PERM 2 +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 0 +#endif + +#define LWIP_TCP_OPT_LENGTH(flags) \ + ((flags) & TF_SEG_OPTS_MSS ? LWIP_TCP_OPT_LEN_MSS : 0) + \ + ((flags) & TF_SEG_OPTS_TS ? LWIP_TCP_OPT_LEN_TS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_WND_SCALE ? LWIP_TCP_OPT_LEN_WS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_SACK_PERM ? LWIP_TCP_OPT_LEN_SACK_PERM_OUT : 0) + +/** This returns a TCP header option for MSS in an u32_t */ +#define TCP_BUILD_MSS_OPTION(mss) lwip_htonl(0x02040000 | ((mss) & 0xFFFF)) + +#if LWIP_WND_SCALE +#define TCPWNDSIZE_F U32_F +#define TCPWND_MAX 0xFFFFFFFFU +#define TCPWND_CHECK16(x) LWIP_ASSERT("window size > 0xFFFF", (x) <= 0xFFFF) +#define TCPWND_MIN16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#else /* LWIP_WND_SCALE */ +#define TCPWNDSIZE_F U16_F +#define TCPWND_MAX 0xFFFFU +#define TCPWND_CHECK16(x) +#define TCPWND_MIN16(x) x +#endif /* LWIP_WND_SCALE */ + +/* Global variables: */ +extern struct tcp_pcb *tcp_input_pcb; +extern u32_t tcp_ticks; +extern u8_t tcp_active_pcbs_changed; + +/* The TCP PCB lists. */ +union tcp_listen_pcbs_t { /* List of all TCP PCBs in LISTEN state. */ + struct tcp_pcb_listen *listen_pcbs; + struct tcp_pcb *pcbs; +}; +extern struct tcp_pcb *tcp_bound_pcbs; +extern union tcp_listen_pcbs_t tcp_listen_pcbs; +extern struct tcp_pcb *tcp_active_pcbs; /* List of all TCP PCBs that are in a + state in which they accept or send + data. */ +extern struct tcp_pcb *tcp_tw_pcbs; /* List of all TCP PCBs in TIME-WAIT. */ + +#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT 3 +#define NUM_TCP_PCB_LISTS 4 +extern struct tcp_pcb ** const tcp_pcb_lists[NUM_TCP_PCB_LISTS]; + +/* Axioms about the above lists: + 1) Every TCP PCB that is not CLOSED is in one of the lists. + 2) A PCB is only in one of the lists. + 3) All PCBs in the tcp_listen_pcbs list is in LISTEN state. + 4) All PCBs in the tcp_tw_pcbs list is in TIME-WAIT state. +*/ +/* Define two macros, TCP_REG and TCP_RMV that registers a TCP PCB + with a PCB list or removes a PCB from a list, respectively. */ +#ifndef TCP_DEBUG_PCB_LISTS +#define TCP_DEBUG_PCB_LISTS 0 +#endif +#if TCP_DEBUG_PCB_LISTS +#define TCP_REG(pcbs, npcb) do {\ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_REG %p local port %"U16_F"\n", (void *)(npcb), (npcb)->local_port)); \ + for (tcp_tmp_pcb = *(pcbs); \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + LWIP_ASSERT("TCP_REG: already registered\n", tcp_tmp_pcb != (npcb)); \ + } \ + LWIP_ASSERT("TCP_REG: pcb->state != CLOSED", ((pcbs) == &tcp_bound_pcbs) || ((npcb)->state != CLOSED)); \ + (npcb)->next = *(pcbs); \ + LWIP_ASSERT("TCP_REG: npcb->next != npcb", (npcb)->next != (npcb)); \ + *(pcbs) = (npcb); \ + LWIP_ASSERT("TCP_REG: tcp_pcbs sane", tcp_pcbs_sane()); \ + tcp_timer_needed(); \ + } while(0) +#define TCP_RMV(pcbs, npcb) do { \ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_ASSERT("TCP_RMV: pcbs != NULL", *(pcbs) != NULL); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removing %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + if(*(pcbs) == (npcb)) { \ + *(pcbs) = (*pcbs)->next; \ + } else for (tcp_tmp_pcb = *(pcbs); tcp_tmp_pcb != NULL; tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + (npcb)->next = NULL; \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removed %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + } while(0) + +#else /* LWIP_DEBUG */ + +#define TCP_REG(pcbs, npcb) \ + do { \ + (npcb)->next = *pcbs; \ + *(pcbs) = (npcb); \ + tcp_timer_needed(); \ + } while (0) + +#define TCP_RMV(pcbs, npcb) \ + do { \ + if(*(pcbs) == (npcb)) { \ + (*(pcbs)) = (*pcbs)->next; \ + } \ + else { \ + struct tcp_pcb *tcp_tmp_pcb; \ + for (tcp_tmp_pcb = *pcbs; \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + } \ + (npcb)->next = NULL; \ + } while(0) + +#endif /* LWIP_DEBUG */ + +#define TCP_REG_ACTIVE(npcb) \ + do { \ + TCP_REG(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_RMV_ACTIVE(npcb) \ + do { \ + TCP_RMV(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_PCB_REMOVE_ACTIVE(pcb) \ + do { \ + tcp_pcb_remove(&tcp_active_pcbs, pcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + + +/* Internal functions: */ +struct tcp_pcb *tcp_pcb_copy(struct tcp_pcb *pcb); +void tcp_pcb_purge(struct tcp_pcb *pcb); +void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb); + +void tcp_segs_free(struct tcp_seg *seg); +void tcp_seg_free(struct tcp_seg *seg); +struct tcp_seg *tcp_seg_copy(struct tcp_seg *seg); + +#define tcp_ack(pcb) \ + do { \ + if((pcb)->flags & TF_ACK_DELAY) { \ + tcp_clear_flags(pcb, TF_ACK_DELAY); \ + tcp_ack_now(pcb); \ + } \ + else { \ + tcp_set_flags(pcb, TF_ACK_DELAY); \ + } \ + } while (0) + +#define tcp_ack_now(pcb) \ + tcp_set_flags(pcb, TF_ACK_NOW) + +err_t tcp_send_fin(struct tcp_pcb *pcb); +err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags); + +void tcp_rexmit_seg(struct tcp_pcb *pcb, struct tcp_seg *seg); + +void tcp_rst(const struct tcp_pcb* pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port); + +u32_t tcp_next_iss(struct tcp_pcb *pcb); + +err_t tcp_keepalive(struct tcp_pcb *pcb); +err_t tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split); +err_t tcp_zero_window_probe(struct tcp_pcb *pcb); +void tcp_trigger_input_pcb_close(void); + +#if TCP_CALCULATE_EFF_SEND_MSS +u16_t tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, + const ip_addr_t *dest); +#define tcp_eff_send_mss(sendmss, src, dest) \ + tcp_eff_send_mss_netif(sendmss, ip_route(src, dest), dest) +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +#if LWIP_CALLBACK_API +err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err); +#endif /* LWIP_CALLBACK_API */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +void tcp_debug_print(struct tcp_hdr *tcphdr); +void tcp_debug_print_flags(u8_t flags); +void tcp_debug_print_state(enum tcp_state s); +void tcp_debug_print_pcbs(void); +s16_t tcp_pcbs_sane(void); +#else +# define tcp_debug_print(tcphdr) +# define tcp_debug_print_flags(flags) +# define tcp_debug_print_state(s) +# define tcp_debug_print_pcbs() +# define tcp_pcbs_sane() 1 +#endif /* TCP_DEBUG */ + +/** External function (implemented in timers.c), called when TCP detects + * that a timer is needed (i.e. active- or time-wait-pcb found). */ +void tcp_timer_needed(void); + +void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#if TCP_QUEUE_OOSEQ +void tcp_free_ooseq(struct tcp_pcb *pcb); +#endif + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +err_t tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h new file mode 100644 index 000000000..74be634be --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h @@ -0,0 +1,170 @@ +/** + * @file + * TCPIP API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_PRIV_H +#define LWIP_HDR_TCPIP_PRIV_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpip.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct pbuf; +struct netif; + +#if LWIP_MPU_COMPATIBLE +#define API_VAR_REF(name) (*(name)) +#define API_VAR_DECLARE(type, name) type * name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) do { \ + name = (type *)memp_malloc(pool); \ + if (name == NULL) { \ + errorblock; \ + } \ + } while(0) +#define API_VAR_ALLOC(type, pool, name, errorval) API_VAR_ALLOC_EXT(type, pool, name, return errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \ + name = (type *)LWIP_MEMPOOL_ALLOC(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_FREE(pool, name) memp_free(pool, name) +#define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name) +#define API_EXPR_REF(expr) (&(expr)) +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_EXPR_REF_SEM(expr) (expr) +#else +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#endif +#define API_EXPR_DEREF(expr) expr +#define API_MSG_M_DEF(m) m +#define API_MSG_M_DEF_C(t, m) t m +#else /* LWIP_MPU_COMPATIBLE */ +#define API_VAR_REF(name) name +#define API_VAR_DECLARE(type, name) type name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) +#define API_VAR_ALLOC(type, pool, name, errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) +#define API_VAR_FREE(pool, name) +#define API_VAR_FREE_POOL(pool, name) +#define API_EXPR_REF(expr) expr +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#define API_EXPR_DEREF(expr) (*(expr)) +#define API_MSG_M_DEF(m) *m +#define API_MSG_M_DEF_C(t, m) const t * m +#endif /* LWIP_MPU_COMPATIBLE */ + +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem); + +struct tcpip_api_call_data +{ +#if !LWIP_TCPIP_CORE_LOCKING + err_t err; +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +#else /* !LWIP_TCPIP_CORE_LOCKING */ + u8_t dummy; /* avoid empty struct :-( */ +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +}; +typedef err_t (*tcpip_api_call_fn)(struct tcpip_api_call_data* call); +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call); + +enum tcpip_msg_type { +#if !LWIP_TCPIP_CORE_LOCKING + TCPIP_MSG_API, + TCPIP_MSG_API_CALL, +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + TCPIP_MSG_INPKT, +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + TCPIP_MSG_TIMEOUT, + TCPIP_MSG_UNTIMEOUT, +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + TCPIP_MSG_CALLBACK, + TCPIP_MSG_CALLBACK_STATIC +}; + +struct tcpip_msg { + enum tcpip_msg_type type; + union { +#if !LWIP_TCPIP_CORE_LOCKING + struct { + tcpip_callback_fn function; + void* msg; + } api_msg; + struct { + tcpip_api_call_fn function; + struct tcpip_api_call_data *arg; + sys_sem_t *sem; + } api_call; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + struct { + struct pbuf *p; + struct netif *netif; + netif_input_fn input_fn; + } inp; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct { + tcpip_callback_fn function; + void *ctx; + } cb; +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + struct { + u32_t msecs; + sys_timeout_handler h; + void *arg; + } tmo; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + } msg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_PRIV_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h new file mode 100644 index 000000000..fd3af8a9f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h @@ -0,0 +1,78 @@ +/** + * @file + * AutoIP protocol definitions + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_PROT_AUTOIP_H +#define LWIP_HDR_PROT_AUTOIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* 169.254.0.0 */ +#define AUTOIP_NET 0xA9FE0000 +/* 169.254.1.0 */ +#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100) +/* 169.254.254.255 */ +#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF) + +/* RFC 3927 Constants */ +#define PROBE_WAIT 1 /* second (initial random delay) */ +#define PROBE_MIN 1 /* second (minimum delay till repeated probe) */ +#define PROBE_MAX 2 /* seconds (maximum delay till repeated probe) */ +#define PROBE_NUM 3 /* (number of probe packets) */ +#define ANNOUNCE_NUM 2 /* (number of announcement packets) */ +#define ANNOUNCE_INTERVAL 2 /* seconds (time between announcement packets) */ +#define ANNOUNCE_WAIT 2 /* seconds (delay before announcing) */ +#define MAX_CONFLICTS 10 /* (max conflicts before rate limiting) */ +#define RATE_LIMIT_INTERVAL 60 /* seconds (delay between successive attempts) */ +#define DEFEND_INTERVAL 10 /* seconds (min. wait between defensive ARPs) */ + +/* AutoIP client states */ +typedef enum { + AUTOIP_STATE_OFF = 0, + AUTOIP_STATE_PROBING = 1, + AUTOIP_STATE_ANNOUNCING = 2, + AUTOIP_STATE_BOUND = 3 +} autoip_state_enum_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_AUTOIP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h new file mode 100644 index 000000000..ab18dca31 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h @@ -0,0 +1,178 @@ +/** + * @file + * DHCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_PROT_DHCP_H +#define LWIP_HDR_PROT_DHCP_H + +#include "lwip/opt.h" +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* DHCP message item offsets and length */ +#define DHCP_CHADDR_LEN 16U +#define DHCP_SNAME_OFS 44U +#define DHCP_SNAME_LEN 64U +#define DHCP_FILE_OFS 108U +#define DHCP_FILE_LEN 128U +#define DHCP_MSG_LEN 236U +#define DHCP_OPTIONS_OFS (DHCP_MSG_LEN + 4U) /* 4 byte: cookie */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCP message */ +struct dhcp_msg +{ + PACK_STRUCT_FLD_8(u8_t op); + PACK_STRUCT_FLD_8(u8_t htype); + PACK_STRUCT_FLD_8(u8_t hlen); + PACK_STRUCT_FLD_8(u8_t hops); + PACK_STRUCT_FIELD(u32_t xid); + PACK_STRUCT_FIELD(u16_t secs); + PACK_STRUCT_FIELD(u16_t flags); + PACK_STRUCT_FLD_S(ip4_addr_p_t ciaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t yiaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t siaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t giaddr); + PACK_STRUCT_FLD_8(u8_t chaddr[DHCP_CHADDR_LEN]); + PACK_STRUCT_FLD_8(u8_t sname[DHCP_SNAME_LEN]); + PACK_STRUCT_FLD_8(u8_t file[DHCP_FILE_LEN]); + PACK_STRUCT_FIELD(u32_t cookie); +#define DHCP_MIN_OPTIONS_LEN 68U +/** make sure user does not configure this too small */ +#if ((defined(DHCP_OPTIONS_LEN)) && (DHCP_OPTIONS_LEN < DHCP_MIN_OPTIONS_LEN)) +# undef DHCP_OPTIONS_LEN +#endif +/** allow this to be configured in lwipopts.h, but not too small */ +#if (!defined(DHCP_OPTIONS_LEN)) +/** set this to be sufficient for your options in outgoing DHCP msgs */ +# define DHCP_OPTIONS_LEN DHCP_MIN_OPTIONS_LEN +#endif + PACK_STRUCT_FLD_8(u8_t options[DHCP_OPTIONS_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP client states */ +typedef enum { + DHCP_STATE_OFF = 0, + DHCP_STATE_REQUESTING = 1, + DHCP_STATE_INIT = 2, + DHCP_STATE_REBOOTING = 3, + DHCP_STATE_REBINDING = 4, + DHCP_STATE_RENEWING = 5, + DHCP_STATE_SELECTING = 6, + DHCP_STATE_INFORMING = 7, + DHCP_STATE_CHECKING = 8, + DHCP_STATE_PERMANENT = 9, /* not yet implemented */ + DHCP_STATE_BOUND = 10, + DHCP_STATE_RELEASING = 11, /* not yet implemented */ + DHCP_STATE_BACKING_OFF = 12 +} dhcp_state_enum_t; + +/* DHCP op codes */ +#define DHCP_BOOTREQUEST 1 +#define DHCP_BOOTREPLY 2 + +/* DHCP message types */ +#define DHCP_DISCOVER 1 +#define DHCP_OFFER 2 +#define DHCP_REQUEST 3 +#define DHCP_DECLINE 4 +#define DHCP_ACK 5 +#define DHCP_NAK 6 +#define DHCP_RELEASE 7 +#define DHCP_INFORM 8 + +#define DHCP_MAGIC_COOKIE 0x63825363UL + +/* This is a list of options for BOOTP and DHCP, see RFC 2132 for descriptions */ + +/* BootP options */ +#define DHCP_OPTION_PAD 0 +#define DHCP_OPTION_SUBNET_MASK 1 /* RFC 2132 3.3 */ +#define DHCP_OPTION_ROUTER 3 +#define DHCP_OPTION_DNS_SERVER 6 +#define DHCP_OPTION_HOSTNAME 12 +#define DHCP_OPTION_IP_TTL 23 +#define DHCP_OPTION_MTU 26 +#define DHCP_OPTION_BROADCAST 28 +#define DHCP_OPTION_TCP_TTL 37 +#define DHCP_OPTION_NTP 42 +#define DHCP_OPTION_END 255 + +/* DHCP options */ +#define DHCP_OPTION_REQUESTED_IP 50 /* RFC 2132 9.1, requested IP address */ +#define DHCP_OPTION_LEASE_TIME 51 /* RFC 2132 9.2, time in seconds, in 4 bytes */ +#define DHCP_OPTION_OVERLOAD 52 /* RFC2132 9.3, use file and/or sname field for options */ + +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_MESSAGE_TYPE_LEN 1 + +#define DHCP_OPTION_SERVER_ID 54 /* RFC 2132 9.7, server IP address */ +#define DHCP_OPTION_PARAMETER_REQUEST_LIST 55 /* RFC 2132 9.8, requested option types */ + +#define DHCP_OPTION_MAX_MSG_SIZE 57 /* RFC 2132 9.10, message size accepted >= 576 */ +#define DHCP_OPTION_MAX_MSG_SIZE_LEN 2 + +#define DHCP_OPTION_T1 58 /* T1 renewal time */ +#define DHCP_OPTION_T2 59 /* T2 rebinding time */ +#define DHCP_OPTION_US 60 +#define DHCP_OPTION_CLIENT_ID 61 +#define DHCP_OPTION_TFTP_SERVERNAME 66 +#define DHCP_OPTION_BOOTFILE 67 + +/* possible combinations of overloading the file and sname fields with options */ +#define DHCP_OVERLOAD_NONE 0 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_SNAME_FILE 3 + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h new file mode 100644 index 000000000..0754c91b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h @@ -0,0 +1,138 @@ +/** + * @file + * DHCPv6 protocol definitions + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_PROT_DHCP6_H +#define LWIP_HDR_PROT_DHCP6_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DHCP6_CLIENT_PORT 546 +#define DHCP6_SERVER_PORT 547 + + + /* DHCPv6 message item offsets and length */ +#define DHCP6_TRANSACTION_ID_LEN 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCPv6 message */ +struct dhcp6_msg +{ + PACK_STRUCT_FLD_8(u8_t msgtype); + PACK_STRUCT_FLD_8(u8_t transaction_id[DHCP6_TRANSACTION_ID_LEN]); + /* options follow */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP6 client states */ +typedef enum { + DHCP6_STATE_OFF = 0, + DHCP6_STATE_STATELESS_IDLE = 1, + DHCP6_STATE_REQUESTING_CONFIG = 2 +} dhcp6_state_enum_t; + +/* DHCPv6 message types */ +#define DHCP6_SOLICIT 1 +#define DHCP6_ADVERTISE 2 +#define DHCP6_REQUEST 3 +#define DHCP6_CONFIRM 4 +#define DHCP6_RENEW 5 +#define DHCP6_REBIND 6 +#define DHCP6_REPLY 7 +#define DHCP6_RELEASE 8 +#define DHCP6_DECLINE 9 +#define DHCP6_RECONFIGURE 10 +#define DHCP6_INFOREQUEST 11 +#define DHCP6_RELAYFORW 12 +#define DHCP6_RELAYREPL 13 +/* More message types see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 status codes */ +#define DHCP6_STATUS_SUCCESS 0 /* Success. */ +#define DHCP6_STATUS_UNSPECFAIL 1 /* Failure, reason unspecified; this status code is sent by either a client or a server to indicate a failure not explicitly specified in this document. */ +#define DHCP6_STATUS_NOADDRSAVAIL 2 /* Server has no addresses available to assign to the IA(s). */ +#define DHCP6_STATUS_NOBINDING 3 /* Client record (binding) unavailable. */ +#define DHCP6_STATUS_NOTONLINK 4 /* The prefix for the address is not appropriate for the link to which the client is attached. */ +#define DHCP6_STATUS_USEMULTICAST 5 /* Sent by a server to a client to force the client to send messages to the server using the All_DHCP_Relay_Agents_and_Servers address. */ +/* More status codes see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 DUID types */ +#define DHCP6_DUID_LLT 1 /* LLT: Link-layer Address Plus Time */ +#define DHCP6_DUID_EN 2 /* EN: Enterprise number */ +#define DHCP6_DUID_LL 3 /* LL: Link-layer Address */ +#define DHCP6_DUID_UUID 4 /* UUID (RFC 6355) */ + +/* DHCPv6 options */ +#define DHCP6_OPTION_CLIENTID 1 +#define DHCP6_OPTION_SERVERID 2 +#define DHCP6_OPTION_IA_NA 3 +#define DHCP6_OPTION_IA_TA 4 +#define DHCP6_OPTION_IAADDR 5 +#define DHCP6_OPTION_ORO 6 +#define DHCP6_OPTION_PREFERENCE 7 +#define DHCP6_OPTION_ELAPSED_TIME 8 +#define DHCP6_OPTION_RELAY_MSG 9 +#define DHCP6_OPTION_AUTH 11 +#define DHCP6_OPTION_UNICAST 12 +#define DHCP6_OPTION_STATUS_CODE 13 +#define DHCP6_OPTION_RAPID_COMMIT 14 +#define DHCP6_OPTION_USER_CLASS 15 +#define DHCP6_OPTION_VENDOR_CLASS 16 +#define DHCP6_OPTION_VENDOR_OPTS 17 +#define DHCP6_OPTION_INTERFACE_ID 18 +#define DHCP6_OPTION_RECONF_MSG 19 +#define DHCP6_OPTION_RECONF_ACCEPT 20 +/* More options see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ +#define DHCP6_OPTION_DNS_SERVERS 23 /* RFC 3646 */ +#define DHCP6_OPTION_DOMAIN_LIST 24 /* RFC 3646 */ +#define DHCP6_OPTION_SNTP_SERVERS 31 /* RFC 4075 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h new file mode 100644 index 000000000..94782d6e9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h @@ -0,0 +1,140 @@ +/** + * @file + * DNS - host name to IP address resolver. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_PROT_DNS_H +#define LWIP_HDR_PROT_DNS_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS server port address */ +#ifndef DNS_SERVER_PORT +#define DNS_SERVER_PORT 53 +#endif + +/* DNS field TYPE used for "Resource Records" */ +#define DNS_RRTYPE_A 1 /* a host address */ +#define DNS_RRTYPE_NS 2 /* an authoritative name server */ +#define DNS_RRTYPE_MD 3 /* a mail destination (Obsolete - use MX) */ +#define DNS_RRTYPE_MF 4 /* a mail forwarder (Obsolete - use MX) */ +#define DNS_RRTYPE_CNAME 5 /* the canonical name for an alias */ +#define DNS_RRTYPE_SOA 6 /* marks the start of a zone of authority */ +#define DNS_RRTYPE_MB 7 /* a mailbox domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_MG 8 /* a mail group member (EXPERIMENTAL) */ +#define DNS_RRTYPE_MR 9 /* a mail rename domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_NULL 10 /* a null RR (EXPERIMENTAL) */ +#define DNS_RRTYPE_WKS 11 /* a well known service description */ +#define DNS_RRTYPE_PTR 12 /* a domain name pointer */ +#define DNS_RRTYPE_HINFO 13 /* host information */ +#define DNS_RRTYPE_MINFO 14 /* mailbox or mail list information */ +#define DNS_RRTYPE_MX 15 /* mail exchange */ +#define DNS_RRTYPE_TXT 16 /* text strings */ +#define DNS_RRTYPE_AAAA 28 /* IPv6 address */ +#define DNS_RRTYPE_SRV 33 /* service location */ +#define DNS_RRTYPE_ANY 255 /* any type */ + +/* DNS field CLASS used for "Resource Records" */ +#define DNS_RRCLASS_IN 1 /* the Internet */ +#define DNS_RRCLASS_CS 2 /* the CSNET class (Obsolete - used only for examples in some obsolete RFCs) */ +#define DNS_RRCLASS_CH 3 /* the CHAOS class */ +#define DNS_RRCLASS_HS 4 /* Hesiod [Dyer 87] */ +#define DNS_RRCLASS_ANY 255 /* any class */ +#define DNS_RRCLASS_FLUSH 0x800 /* Flush bit */ + +/* DNS protocol flags */ +#define DNS_FLAG1_RESPONSE 0x80 +#define DNS_FLAG1_OPCODE_STATUS 0x10 +#define DNS_FLAG1_OPCODE_INVERSE 0x08 +#define DNS_FLAG1_OPCODE_STANDARD 0x00 +#define DNS_FLAG1_AUTHORATIVE 0x04 +#define DNS_FLAG1_TRUNC 0x02 +#define DNS_FLAG1_RD 0x01 +#define DNS_FLAG2_RA 0x80 +#define DNS_FLAG2_ERR_MASK 0x0f +#define DNS_FLAG2_ERR_NONE 0x00 +#define DNS_FLAG2_ERR_NAME 0x03 + +#define DNS_HDR_GET_OPCODE(hdr) ((((hdr)->flags1) >> 3) & 0xF) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** DNS message header */ +struct dns_hdr { + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FLD_8(u8_t flags1); + PACK_STRUCT_FLD_8(u8_t flags2); + PACK_STRUCT_FIELD(u16_t numquestions); + PACK_STRUCT_FIELD(u16_t numanswers); + PACK_STRUCT_FIELD(u16_t numauthrr); + PACK_STRUCT_FIELD(u16_t numextrarr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define SIZEOF_DNS_HDR 12 + + +/* Multicast DNS definitions */ + +/** UDP port for multicast DNS queries */ +#ifndef DNS_MQUERY_PORT +#define DNS_MQUERY_PORT 5353 +#endif + +/* IPv4 group for multicast DNS queries: 224.0.0.251 */ +#ifndef DNS_MQUERY_IPV4_GROUP_INIT +#define DNS_MQUERY_IPV4_GROUP_INIT IPADDR4_INIT_BYTES(224,0,0,251) +#endif + +/* IPv6 group for multicast DNS queries: FF02::FB */ +#ifndef DNS_MQUERY_IPV6_GROUP_INIT +#define DNS_MQUERY_IPV6_GROUP_INIT IPADDR6_INIT_HOST(0xFF020000,0,0,0xFB) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DNS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h new file mode 100644 index 000000000..811c22841 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h @@ -0,0 +1,114 @@ +/** + * @file + * ARP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHARP_H +#define LWIP_HDR_PROT_ETHARP_H + +#include "lwip/arch.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETHARP_HWADDR_LEN +#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN +#endif + +/** + * struct ip4_addr_wordaligned is used in the definition of the ARP packet format in + * order to support compilers that don't have structure packing. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_wordaligned { + PACK_STRUCT_FIELD(u16_t addrw[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + + /** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** the ARP message, see RFC 826 ("Packet format") */ +struct etharp_hdr { + PACK_STRUCT_FIELD(u16_t hwtype); + PACK_STRUCT_FIELD(u16_t proto); + PACK_STRUCT_FLD_8(u8_t hwlen); + PACK_STRUCT_FLD_8(u8_t protolen); + PACK_STRUCT_FIELD(u16_t opcode); + PACK_STRUCT_FLD_S(struct eth_addr shwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned sipaddr); + PACK_STRUCT_FLD_S(struct eth_addr dhwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned dipaddr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETHARP_HDR 28 + +/* ARP message types (opcodes) */ +enum etharp_opcode { + ARP_REQUEST = 1, + ARP_REPLY = 2 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHARP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h new file mode 100644 index 000000000..309e57465 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h @@ -0,0 +1,125 @@ +/** + * @file + * Ethernet protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHERNET_H +#define LWIP_HDR_PROT_ETHERNET_H + +#include "lwip/arch.h" +#include "lwip/prot/ieee.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETH_HWADDR_LEN +#ifdef ETHARP_HWADDR_LEN +#define ETH_HWADDR_LEN ETHARP_HWADDR_LEN /* compatibility mode */ +#else +#define ETH_HWADDR_LEN 6 +#endif +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** An Ethernet MAC address */ +struct eth_addr { + PACK_STRUCT_FLD_8(u8_t addr[ETH_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Initialize a struct eth_addr with its 6 bytes (takes care of correct braces) */ +#define ETH_ADDR(b0, b1, b2, b3, b4, b5) {{b0, b1, b2, b3, b4, b5}} + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** Ethernet header */ +struct eth_hdr { +#if ETH_PAD_SIZE + PACK_STRUCT_FLD_8(u8_t padding[ETH_PAD_SIZE]); +#endif + PACK_STRUCT_FLD_S(struct eth_addr dest); + PACK_STRUCT_FLD_S(struct eth_addr src); + PACK_STRUCT_FIELD(u16_t type); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETH_HDR (14 + ETH_PAD_SIZE) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** VLAN header inserted between ethernet header and payload + * if 'type' in ethernet header is ETHTYPE_VLAN. + * See IEEE802.Q */ +struct eth_vlan_hdr { + PACK_STRUCT_FIELD(u16_t prio_vid); + PACK_STRUCT_FIELD(u16_t tpid); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_VLAN_HDR 4 +#define VLAN_ID(vlan_hdr) (lwip_htons((vlan_hdr)->prio_vid) & 0xFFF) + +/** The 24-bit IANA IPv4-multicast OUI is 01-00-5e: */ +#define LL_IP4_MULTICAST_ADDR_0 0x01 +#define LL_IP4_MULTICAST_ADDR_1 0x00 +#define LL_IP4_MULTICAST_ADDR_2 0x5e + +/** IPv6 multicast uses this prefix */ +#define LL_IP6_MULTICAST_ADDR_0 0x33 +#define LL_IP6_MULTICAST_ADDR_1 0x33 + +#define eth_addr_cmp(addr1, addr2) (memcmp((addr1)->addr, (addr2)->addr, ETH_HWADDR_LEN) == 0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHERNET_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h new file mode 100644 index 000000000..32890cccd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h @@ -0,0 +1,97 @@ +/** + * @file + * IANA assigned numbers (RFC 1700 and successors) + * + * @defgroup iana IANA assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IANA_H +#define LWIP_HDR_PROT_IANA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup iana + * Hardware types + */ +enum lwip_iana_hwtype { + /** Ethernet */ + LWIP_IANA_HWTYPE_ETHERNET = 1 +}; + +/** + * @ingroup iana + * Port numbers + * https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt + */ +enum lwip_iana_port_number { + /** SMTP */ + LWIP_IANA_PORT_SMTP = 25, + /** DHCP server */ + LWIP_IANA_PORT_DHCP_SERVER = 67, + /** DHCP client */ + LWIP_IANA_PORT_DHCP_CLIENT = 68, + /** TFTP */ + LWIP_IANA_PORT_TFTP = 69, + /** HTTP */ + LWIP_IANA_PORT_HTTP = 80, + /** SNTP */ + LWIP_IANA_PORT_SNTP = 123, + /** NETBIOS */ + LWIP_IANA_PORT_NETBIOS = 137, + /** SNMP */ + LWIP_IANA_PORT_SNMP = 161, + /** SNMP traps */ + LWIP_IANA_PORT_SNMP_TRAP = 162, + /** HTTPS */ + LWIP_IANA_PORT_HTTPS = 443, + /** SMTPS */ + LWIP_IANA_PORT_SMTPS = 465, + /** MQTT */ + LWIP_IANA_PORT_MQTT = 1883, + /** MDNS */ + LWIP_IANA_PORT_MDNS = 5353, + /** Secure MQTT */ + LWIP_IANA_PORT_SECURE_MQTT = 8883 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IANA_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h new file mode 100644 index 000000000..7d19385c7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h @@ -0,0 +1,91 @@ +/** + * @file + * ICMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP_H +#define LWIP_HDR_PROT_ICMP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ICMP_ER 0 /* echo reply */ +#define ICMP_DUR 3 /* destination unreachable */ +#define ICMP_SQ 4 /* source quench */ +#define ICMP_RD 5 /* redirect */ +#define ICMP_ECHO 8 /* echo */ +#define ICMP_TE 11 /* time exceeded */ +#define ICMP_PP 12 /* parameter problem */ +#define ICMP_TS 13 /* timestamp */ +#define ICMP_TSR 14 /* timestamp reply */ +#define ICMP_IRQ 15 /* information request */ +#define ICMP_IR 16 /* information reply */ +#define ICMP_AM 17 /* address mask request */ +#define ICMP_AMR 18 /* address mask reply */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +/** This is the standard ICMP header only that the u32_t data + * is split to two u16_t like ICMP echo needs it. + * This header is also used for other ICMP types that do not + * use the data part. + */ +PACK_STRUCT_BEGIN +struct icmp_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Compatibility defines, old versions used to combine type and code to an u16_t */ +#define ICMPH_TYPE(hdr) ((hdr)->type) +#define ICMPH_CODE(hdr) ((hdr)->code) +#define ICMPH_TYPE_SET(hdr, t) ((hdr)->type = (t)) +#define ICMPH_CODE_SET(hdr, c) ((hdr)->code = (c)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h new file mode 100644 index 000000000..346112042 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h @@ -0,0 +1,170 @@ +/** + * @file + * ICMP6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP6_H +#define LWIP_HDR_PROT_ICMP6_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP type */ +enum icmp6_type { + /** Destination unreachable */ + ICMP6_TYPE_DUR = 1, + /** Packet too big */ + ICMP6_TYPE_PTB = 2, + /** Time exceeded */ + ICMP6_TYPE_TE = 3, + /** Parameter problem */ + ICMP6_TYPE_PP = 4, + /** Private experimentation */ + ICMP6_TYPE_PE1 = 100, + /** Private experimentation */ + ICMP6_TYPE_PE2 = 101, + /** Reserved for expansion of error messages */ + ICMP6_TYPE_RSV_ERR = 127, + + /** Echo request */ + ICMP6_TYPE_EREQ = 128, + /** Echo reply */ + ICMP6_TYPE_EREP = 129, + /** Multicast listener query */ + ICMP6_TYPE_MLQ = 130, + /** Multicast listener report */ + ICMP6_TYPE_MLR = 131, + /** Multicast listener done */ + ICMP6_TYPE_MLD = 132, + /** Router solicitation */ + ICMP6_TYPE_RS = 133, + /** Router advertisement */ + ICMP6_TYPE_RA = 134, + /** Neighbor solicitation */ + ICMP6_TYPE_NS = 135, + /** Neighbor advertisement */ + ICMP6_TYPE_NA = 136, + /** Redirect */ + ICMP6_TYPE_RD = 137, + /** Multicast router advertisement */ + ICMP6_TYPE_MRA = 151, + /** Multicast router solicitation */ + ICMP6_TYPE_MRS = 152, + /** Multicast router termination */ + ICMP6_TYPE_MRT = 153, + /** Private experimentation */ + ICMP6_TYPE_PE3 = 200, + /** Private experimentation */ + ICMP6_TYPE_PE4 = 201, + /** Reserved for expansion of informational messages */ + ICMP6_TYPE_RSV_INF = 255 +}; + +/** ICMP destination unreachable codes */ +enum icmp6_dur_code { + /** No route to destination */ + ICMP6_DUR_NO_ROUTE = 0, + /** Communication with destination administratively prohibited */ + ICMP6_DUR_PROHIBITED = 1, + /** Beyond scope of source address */ + ICMP6_DUR_SCOPE = 2, + /** Address unreachable */ + ICMP6_DUR_ADDRESS = 3, + /** Port unreachable */ + ICMP6_DUR_PORT = 4, + /** Source address failed ingress/egress policy */ + ICMP6_DUR_POLICY = 5, + /** Reject route to destination */ + ICMP6_DUR_REJECT_ROUTE = 6 +}; + +/** ICMP time exceeded codes */ +enum icmp6_te_code { + /** Hop limit exceeded in transit */ + ICMP6_TE_HL = 0, + /** Fragment reassembly time exceeded */ + ICMP6_TE_FRAG = 1 +}; + +/** ICMP parameter code */ +enum icmp6_pp_code { + /** Erroneous header field encountered */ + ICMP6_PP_FIELD = 0, + /** Unrecognized next header type encountered */ + ICMP6_PP_HEADER = 1, + /** Unrecognized IPv6 option encountered */ + ICMP6_PP_OPTION = 2 +}; + +/** This is the standard ICMP6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t data); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** This is the ICMP6 header adapted for echo req/resp. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h new file mode 100644 index 000000000..abbb9e31d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h @@ -0,0 +1,91 @@ +/** + * @file + * IEEE assigned numbers + * + * @defgroup ieee IEEE assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IEEE_H +#define LWIP_HDR_PROT_IEEE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup ieee + * A list of often ethtypes (although lwIP does not use all of them). + */ +enum lwip_ieee_eth_type { + /** Internet protocol v4 */ + ETHTYPE_IP = 0x0800U, + /** Address resolution protocol */ + ETHTYPE_ARP = 0x0806U, + /** Wake on lan */ + ETHTYPE_WOL = 0x0842U, + /** RARP */ + ETHTYPE_RARP = 0x8035U, + /** Virtual local area network */ + ETHTYPE_VLAN = 0x8100U, + /** Internet protocol v6 */ + ETHTYPE_IPV6 = 0x86DDU, + /** PPP Over Ethernet Discovery Stage */ + ETHTYPE_PPPOEDISC = 0x8863U, + /** PPP Over Ethernet Session Stage */ + ETHTYPE_PPPOE = 0x8864U, + /** Jumbo Frames */ + ETHTYPE_JUMBO = 0x8870U, + /** Process field network */ + ETHTYPE_PROFINET = 0x8892U, + /** Ethernet for control automation technology */ + ETHTYPE_ETHERCAT = 0x88A4U, + /** Link layer discovery protocol */ + ETHTYPE_LLDP = 0x88CCU, + /** Serial real-time communication system */ + ETHTYPE_SERCOS = 0x88CDU, + /** Media redundancy protocol */ + ETHTYPE_MRP = 0x88E3U, + /** Precision time protocol */ + ETHTYPE_PTP = 0x88F7U, + /** Q-in-Q, 802.1ad */ + ETHTYPE_QINQ = 0x9100U +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IEEE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h new file mode 100644 index 000000000..46b81a9dd --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h @@ -0,0 +1,90 @@ +/** + * @file + * IGMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IGMP_H +#define LWIP_HDR_PROT_IGMP_H + +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * IGMP constants + */ +#define IGMP_TTL 1 +#define IGMP_MINLEN 8 +#define ROUTER_ALERT 0x9404U +#define ROUTER_ALERTLEN 4 + +/* + * IGMP message types, including version number. + */ +#define IGMP_MEMB_QUERY 0x11 /* Membership query */ +#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */ + +/* Group membership states */ +#define IGMP_GROUP_NON_MEMBER 0 +#define IGMP_GROUP_DELAYING_MEMBER 1 +#define IGMP_GROUP_IDLE_MEMBER 2 + +/** + * IGMP packet format. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct igmp_msg { + PACK_STRUCT_FLD_8(u8_t igmp_msgtype); + PACK_STRUCT_FLD_8(u8_t igmp_maxresp); + PACK_STRUCT_FIELD(u16_t igmp_checksum); + PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IGMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h new file mode 100644 index 000000000..223158f5a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h @@ -0,0 +1,59 @@ +/** + * @file + * IP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP_H +#define LWIP_HDR_PROT_IP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IP_PROTO_ICMP 1 +#define IP_PROTO_IGMP 2 +#define IP_PROTO_UDP 17 +#define IP_PROTO_UDPLITE 136 +#define IP_PROTO_TCP 6 + +/** This operates on a void* by loading the first byte */ +#define IP_HDR_GET_VERSION(ptr) ((*(u8_t*)(ptr)) >> 4) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h new file mode 100644 index 000000000..934746115 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h @@ -0,0 +1,131 @@ +/** + * @file + * IPv4 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP4_H +#define LWIP_HDR_PROT_IP4_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip4_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_packed { + PACK_STRUCT_FIELD(u32_t addr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +typedef struct ip4_addr_packed ip4_addr_p_t; + +/* Size of the IPv4 header. Same as 'sizeof(struct ip_hdr)'. */ +#define IP_HLEN 20 +/* Maximum size of the IPv4 header with options. */ +#define IP_HLEN_MAX 60 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/* The IPv4 header */ +struct ip_hdr { + /* version / header length */ + PACK_STRUCT_FLD_8(u8_t _v_hl); + /* type of service */ + PACK_STRUCT_FLD_8(u8_t _tos); + /* total length */ + PACK_STRUCT_FIELD(u16_t _len); + /* identification */ + PACK_STRUCT_FIELD(u16_t _id); + /* fragment offset field */ + PACK_STRUCT_FIELD(u16_t _offset); +#define IP_RF 0x8000U /* reserved fragment flag */ +#define IP_DF 0x4000U /* don't fragment flag */ +#define IP_MF 0x2000U /* more fragments flag */ +#define IP_OFFMASK 0x1fffU /* mask for fragmenting bits */ + /* time to live */ + PACK_STRUCT_FLD_8(u8_t _ttl); + /* protocol*/ + PACK_STRUCT_FLD_8(u8_t _proto); + /* checksum */ + PACK_STRUCT_FIELD(u16_t _chksum); + /* source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip4_addr_p_t src); + PACK_STRUCT_FLD_S(ip4_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Macros to get struct ip_hdr fields: */ +#define IPH_V(hdr) ((hdr)->_v_hl >> 4) +#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f) +#define IPH_HL_BYTES(hdr) ((u8_t)(IPH_HL(hdr) * 4)) +#define IPH_TOS(hdr) ((hdr)->_tos) +#define IPH_LEN(hdr) ((hdr)->_len) +#define IPH_ID(hdr) ((hdr)->_id) +#define IPH_OFFSET(hdr) ((hdr)->_offset) +#define IPH_OFFSET_BYTES(hdr) ((u16_t)((lwip_ntohs(IPH_OFFSET(hdr)) & IP_OFFMASK) * 8U)) +#define IPH_TTL(hdr) ((hdr)->_ttl) +#define IPH_PROTO(hdr) ((hdr)->_proto) +#define IPH_CHKSUM(hdr) ((hdr)->_chksum) + +/* Macros to set struct ip_hdr fields: */ +#define IPH_VHL_SET(hdr, v, hl) (hdr)->_v_hl = (u8_t)((((v) << 4) | (hl))) +#define IPH_TOS_SET(hdr, tos) (hdr)->_tos = (tos) +#define IPH_LEN_SET(hdr, len) (hdr)->_len = (len) +#define IPH_ID_SET(hdr, id) (hdr)->_id = (id) +#define IPH_OFFSET_SET(hdr, off) (hdr)->_offset = (off) +#define IPH_TTL_SET(hdr, ttl) (hdr)->_ttl = (u8_t)(ttl) +#define IPH_PROTO_SET(hdr, proto) (hdr)->_proto = (u8_t)(proto) +#define IPH_CHKSUM_SET(hdr, chksum) (hdr)->_chksum = (chksum) + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP4_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h new file mode 100644 index 000000000..0f6de4559 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h @@ -0,0 +1,233 @@ +/** + * @file + * IPv6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP6_H +#define LWIP_HDR_PROT_IP6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip6_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_addr_packed { + PACK_STRUCT_FIELD(u32_t addr[4]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +typedef struct ip6_addr_packed ip6_addr_p_t; + +#define IP6_HLEN 40 + +#define IP6_NEXTH_HOPBYHOP 0 +#define IP6_NEXTH_TCP 6 +#define IP6_NEXTH_UDP 17 +#define IP6_NEXTH_ENCAPS 41 +#define IP6_NEXTH_ROUTING 43 +#define IP6_NEXTH_FRAGMENT 44 +#define IP6_NEXTH_ICMP6 58 +#define IP6_NEXTH_NONE 59 +#define IP6_NEXTH_DESTOPTS 60 +#define IP6_NEXTH_UDPLITE 136 + +/** The IPv6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hdr { + /** version / traffic class / flow label */ + PACK_STRUCT_FIELD(u32_t _v_tc_fl); + /** payload length */ + PACK_STRUCT_FIELD(u16_t _plen); + /** next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /** hop limit */ + PACK_STRUCT_FLD_8(u8_t _hoplim); + /** source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip6_addr_p_t src); + PACK_STRUCT_FLD_S(ip6_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6H_V(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 28) & 0x0f) +#define IP6H_TC(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 20) & 0xff) +#define IP6H_FL(hdr) (lwip_ntohl((hdr)->_v_tc_fl) & 0x000fffff) +#define IP6H_PLEN(hdr) (lwip_ntohs((hdr)->_plen)) +#define IP6H_NEXTH(hdr) ((hdr)->_nexth) +#define IP6H_NEXTH_P(hdr) ((u8_t *)(hdr) + 6) +#define IP6H_HOPLIM(hdr) ((hdr)->_hoplim) +#define IP6H_VTCFL_SET(hdr, v, tc, fl) (hdr)->_v_tc_fl = (lwip_htonl((((u32_t)(v)) << 28) | (((u32_t)(tc)) << 20) | (fl))) +#define IP6H_PLEN_SET(hdr, plen) (hdr)->_plen = lwip_htons(plen) +#define IP6H_NEXTH_SET(hdr, nexth) (hdr)->_nexth = (nexth) +#define IP6H_HOPLIM_SET(hdr, hl) (hdr)->_hoplim = (u8_t)(hl) + +/* ipv6 extended options header */ +#define IP6_PAD1_OPTION 0 +#define IP6_PADN_OPTION 1 +#define IP6_ROUTER_ALERT_OPTION 5 +#define IP6_JUMBO_OPTION 194 +#define IP6_HOME_ADDRESS_OPTION 201 +#define IP6_ROUTER_ALERT_DLEN 2 +#define IP6_ROUTER_ALERT_VALUE_MLD 0 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_opt_hdr { + /* router alert option type */ + PACK_STRUCT_FLD_8(u8_t _opt_type); + /* router alert option data len */ + PACK_STRUCT_FLD_8(u8_t _opt_dlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_OPT_HLEN 2 +#define IP6_OPT_TYPE_ACTION(hdr) ((((hdr)->_opt_type) >> 6) & 0x3) +#define IP6_OPT_TYPE_CHANGE(hdr) ((((hdr)->_opt_type) >> 5) & 0x1) +#define IP6_OPT_TYPE(hdr) ((hdr)->_opt_type) +#define IP6_OPT_DLEN(hdr) ((hdr)->_opt_dlen) + +/* Hop-by-Hop header. */ +#define IP6_HBH_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hbh_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_HBH_NEXTH(hdr) ((hdr)->_nexth) + +/* Destination header. */ +#define IP6_DEST_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_dest_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_DEST_NEXTH(hdr) ((hdr)->_nexth) + +/* Routing header */ +#define IP6_ROUT_TYPE2 2 +#define IP6_ROUT_RPL 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_rout_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t _hlen); + /* fragment offset */ + PACK_STRUCT_FIELD(u8_t _routing_type); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u8_t _segments_left); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_ROUT_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_ROUT_TYPE(hdr) ((hdr)->_routing_type) +#define IP6_ROUT_SEG_LEFT(hdr) ((hdr)->_segments_left) + +/* Fragment header. */ +#define IP6_FRAG_HLEN 8 +#define IP6_FRAG_OFFSET_MASK 0xfff8 +#define IP6_FRAG_MORE_FLAG 0x0001 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_frag_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t reserved); + /* fragment offset */ + PACK_STRUCT_FIELD(u16_t _fragment_offset); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u32_t _identification); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_FRAG_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_FRAG_MBIT(hdr) (lwip_ntohs((hdr)->_fragment_offset) & 0x1) +#define IP6_FRAG_ID(hdr) (lwip_ntohl((hdr)->_identification)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h new file mode 100644 index 000000000..71f1dcbdc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h @@ -0,0 +1,71 @@ +/** + * @file + * MLD6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_MLD6_H +#define LWIP_HDR_PROT_MLD6_H + +#include "lwip/arch.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MLD6_HBH_HLEN 8 +/** Multicast listener report/query/done message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mld_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t max_resp_delay); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t multicast_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_MLD6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h new file mode 100644 index 000000000..c270d07c1 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h @@ -0,0 +1,274 @@ +/** + * @file + * ND6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ND6_H +#define LWIP_HDR_PROT_ND6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Neighbor solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ns_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Neighbor advertisement message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct na_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FLD_8(u8_t reserved[3]); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define ND6_FLAG_ROUTER (0x80) +#define ND6_FLAG_SOLICITED (0x40) +#define ND6_FLAG_OVERRIDE (0x20) + +/** Router solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rs_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Router advertisement message header. */ +#define ND6_RA_FLAG_MANAGED_ADDR_CONFIG (0x80) +#define ND6_RA_FLAG_OTHER_CONFIG (0x40) +#define ND6_RA_FLAG_HOME_AGENT (0x20) +#define ND6_RA_PREFERENCE_MASK (0x18) +#define ND6_RA_PREFERENCE_HIGH (0x08) +#define ND6_RA_PREFERENCE_MEDIUM (0x00) +#define ND6_RA_PREFERENCE_LOW (0x18) +#define ND6_RA_PREFERENCE_DISABLED (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ra_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t current_hop_limit); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u16_t router_lifetime); + PACK_STRUCT_FIELD(u32_t reachable_time); + PACK_STRUCT_FIELD(u32_t retrans_timer); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirect message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirect_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + PACK_STRUCT_FLD_S(ip6_addr_p_t destination_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Link-layer address option. */ +#define ND6_OPTION_TYPE_SOURCE_LLADDR (0x01) +#define ND6_OPTION_TYPE_TARGET_LLADDR (0x02) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct lladdr_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t addr[NETIF_MAX_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Prefix information option. */ +#define ND6_OPTION_TYPE_PREFIX_INFO (0x03) +#define ND6_PREFIX_FLAG_ON_LINK (0x80) +#define ND6_PREFIX_FLAG_AUTONOMOUS (0x40) +#define ND6_PREFIX_FLAG_ROUTER_ADDRESS (0x20) +#define ND6_PREFIX_FLAG_SITE_PREFIX (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct prefix_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u32_t valid_lifetime); + PACK_STRUCT_FIELD(u32_t preferred_lifetime); + PACK_STRUCT_FLD_8(u8_t reserved2[3]); + PACK_STRUCT_FLD_8(u8_t site_prefix_length); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirected header option. */ +#define ND6_OPTION_TYPE_REDIR_HDR (0x04) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirected_header_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t reserved[6]); + /* Portion of redirected packet follows. */ + /* PACK_STRUCT_FLD_8(u8_t redirected[8]); */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MTU option. */ +#define ND6_OPTION_TYPE_MTU (0x05) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mtu_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t mtu); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Route information option. */ +#define ND6_OPTION_TYPE_ROUTE_INFO (24) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct route_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t preference); + PACK_STRUCT_FIELD(u32_t route_lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Recursive DNS Server Option. */ +#define ND6_OPTION_TYPE_RDNSS (25) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rdnss_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t rdnss_address[1]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_RDNSS_OPTION_BASE 8 /* size without addresses */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ND6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h new file mode 100644 index 000000000..c1d7de1fa --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h @@ -0,0 +1,100 @@ +/** + * @file + * TCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_TCP_H +#define LWIP_HDR_PROT_TCP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Length of the TCP header, excluding options. */ +#define TCP_HLEN 20 + +/* Fields are (of course) in network byte order. + * Some fields are converted to host byte order in tcp_input(). + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct tcp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); + PACK_STRUCT_FIELD(u32_t seqno); + PACK_STRUCT_FIELD(u32_t ackno); + PACK_STRUCT_FIELD(u16_t _hdrlen_rsvd_flags); + PACK_STRUCT_FIELD(u16_t wnd); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t urgp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* TCP header flags bits */ +#define TCP_FIN 0x01U +#define TCP_SYN 0x02U +#define TCP_RST 0x04U +#define TCP_PSH 0x08U +#define TCP_ACK 0x10U +#define TCP_URG 0x20U +#define TCP_ECE 0x40U +#define TCP_CWR 0x80U +/* Valid TCP header flags */ +#define TCP_FLAGS 0x3fU + +#define TCP_MAX_OPTION_BYTES 40 + +#define TCPH_HDRLEN(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)) +#define TCPH_HDRLEN_BYTES(phdr) ((u8_t)(TCPH_HDRLEN(phdr) << 2)) +#define TCPH_FLAGS(phdr) ((u8_t)((lwip_ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS))) + +#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = lwip_htons(((len) << 12) | TCPH_FLAGS(phdr)) +#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = (((phdr)->_hdrlen_rsvd_flags & PP_HTONS(~TCP_FLAGS)) | lwip_htons(flags)) +#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) (phdr)->_hdrlen_rsvd_flags = (u16_t)(lwip_htons((u16_t)((len) << 12) | (flags))) + +#define TCPH_SET_FLAG(phdr, flags ) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | lwip_htons(flags)) +#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags & ~lwip_htons(flags)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_TCP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h new file mode 100644 index 000000000..664f19a3e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h @@ -0,0 +1,68 @@ +/** + * @file + * UDP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_UDP_H +#define LWIP_HDR_PROT_UDP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_HLEN 8 + +/* Fields are (of course) in network byte order. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct udp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); /* src/dest UDP ports */ + PACK_STRUCT_FIELD(u16_t len); + PACK_STRUCT_FIELD(u16_t chksum); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_UDP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h new file mode 100644 index 000000000..b129bd3b9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h @@ -0,0 +1,143 @@ +/** + * @file + * raw API (to be used from TCPIP thread)\n + * See also @ref raw_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_H +#define LWIP_HDR_RAW_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/def.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RAW_FLAGS_CONNECTED 0x01U +#define RAW_FLAGS_HDRINCL 0x02U +#define RAW_FLAGS_MULTICAST_LOOP 0x04U + +struct raw_pcb; + +/** Function prototype for raw pcb receive callback functions. + * @param arg user supplied argument (raw_pcb.recv_arg) + * @param pcb the raw_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @return 1 if the packet was 'eaten' (aka. deleted), + * 0 if the packet lives on + * If returning 1, the callback is responsible for freeing the pbuf + * if it's not used any more. + */ +typedef u8_t (*raw_recv_fn)(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr); + +/** the RAW protocol control block */ +struct raw_pcb { + /* Common members of all PCB types */ + IP_PCB; + + struct raw_pcb *next; + + u8_t protocol; + u8_t flags; + +#if LWIP_MULTICAST_TX_OPTIONS + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /** receive callback function */ + raw_recv_fn recv; + /* user-supplied argument for the recv callback */ + void *recv_arg; +#if LWIP_IPV6 + /* fields for handling checksum computations as per RFC3542. */ + u16_t chksum_offset; + u8_t chksum_reqd; +#endif +}; + +/* The following functions is the application layer interface to the + RAW code. */ +struct raw_pcb * raw_new (u8_t proto); +struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto); +void raw_remove (struct raw_pcb *pcb); +err_t raw_bind (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_bind_netif (struct raw_pcb *pcb, const struct netif *netif); +err_t raw_connect (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_disconnect (struct raw_pcb *pcb); + +err_t raw_sendto (struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr); +err_t raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, struct netif *netif, const ip_addr_t *src_ip); +err_t raw_send (struct raw_pcb *pcb, struct pbuf *p); + +void raw_recv (struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg); + +#define raw_flags(pcb) ((pcb)->flags) +#define raw_setflags(pcb,f) ((pcb)->flags = (f)) + +#define raw_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define raw_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define raw_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#define raw_init() /* Compatibility define, no init needed. */ + +/* for compatibility with older implementation */ +#define raw_new_ip6(proto) raw_new_ip_type(IPADDR_TYPE_V6, proto) + +#if LWIP_MULTICAST_TX_OPTIONS +#define raw_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define raw_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define raw_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define raw_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h new file mode 100644 index 000000000..7643e1956 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + */ + +/* + * This is the interface to the platform specific serial IO module + * It needs to be implemented by those platforms which need SLIP or PPP + */ + +#ifndef SIO_H +#define SIO_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If you want to define sio_fd_t elsewhere or differently, + define this in your cc.h file. */ +#ifndef __sio_fd_t_defined +typedef void * sio_fd_t; +#endif + +/* The following functions can be defined to something else in your cc.h file + or be implemented in your custom sio.c file. */ + +#ifndef sio_open +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum); +#endif + +#ifndef sio_send +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd); +#endif + +#ifndef sio_recv +/** + * Receives a single character from the serial device. + * + * @param fd serial device handle + * + * @note This function will block until a character is received. + */ +u8_t sio_recv(sio_fd_t fd); +#endif + +#ifndef sio_read +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_tryread +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_write +/** + * Writes to the serial device. + * + * @param fd serial device handle + * @param data pointer to data to send + * @param len length (in bytes) of data to send + * @return number of bytes actually sent + * + * @note This function will block until all data can be sent. + */ +u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_read_abort +/** + * Aborts a blocking sio_read() call. + * + * @param fd serial device handle + */ +void sio_read_abort(sio_fd_t fd); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SIO_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h new file mode 100644 index 000000000..8704d0b4d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h @@ -0,0 +1,213 @@ +/** + * @file + * SNMP support API for implementing netifs and statitics for MIB2 + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_H +#define LWIP_HDR_SNMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct udp_pcb; +struct netif; + +/** + * @defgroup netif_mib2 MIB2 statistics + * @ingroup netif + */ + +/* MIB2 statistics functions */ +#if MIB2_STATS /* don't build if not configured for use in lwipopts.h */ +/** + * @ingroup netif_mib2 + * @see RFC1213, "MIB-II, 6. Definitions" + */ +enum snmp_ifType { + snmp_ifType_other=1, /* none of the following */ + snmp_ifType_regular1822, + snmp_ifType_hdh1822, + snmp_ifType_ddn_x25, + snmp_ifType_rfc877_x25, + snmp_ifType_ethernet_csmacd, + snmp_ifType_iso88023_csmacd, + snmp_ifType_iso88024_tokenBus, + snmp_ifType_iso88025_tokenRing, + snmp_ifType_iso88026_man, + snmp_ifType_starLan, + snmp_ifType_proteon_10Mbit, + snmp_ifType_proteon_80Mbit, + snmp_ifType_hyperchannel, + snmp_ifType_fddi, + snmp_ifType_lapb, + snmp_ifType_sdlc, + snmp_ifType_ds1, /* T-1 */ + snmp_ifType_e1, /* european equiv. of T-1 */ + snmp_ifType_basicISDN, + snmp_ifType_primaryISDN, /* proprietary serial */ + snmp_ifType_propPointToPointSerial, + snmp_ifType_ppp, + snmp_ifType_softwareLoopback, + snmp_ifType_eon, /* CLNP over IP [11] */ + snmp_ifType_ethernet_3Mbit, + snmp_ifType_nsip, /* XNS over IP */ + snmp_ifType_slip, /* generic SLIP */ + snmp_ifType_ultra, /* ULTRA technologies */ + snmp_ifType_ds3, /* T-3 */ + snmp_ifType_sip, /* SMDS */ + snmp_ifType_frame_relay +}; + +/** This macro has a precision of ~49 days because sys_now returns u32_t. \#define your own if you want ~490 days. */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) (*(ptrToVal) = (sys_now() / 10)) +#endif + +/** + * @ingroup netif_mib2 + * Increment stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_INC(n, x) do { ++(n)->mib2_counters.x; } while(0) +/** + * @ingroup netif_mib2 + * Add value to stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_ADD(n, x, val) do { (n)->mib2_counters.x += (val); } while(0) + +/** + * @ingroup netif_mib2 + * Init MIB2 statistic counters in netif + * @param netif Netif to init + * @param type one of enum @ref snmp_ifType + * @param speed your link speed here (units: bits per second) + */ +#define MIB2_INIT_NETIF(netif, type, speed) do { \ + (netif)->link_type = (type); \ + (netif)->link_speed = (speed);\ + (netif)->ts = 0; \ + (netif)->mib2_counters.ifinoctets = 0; \ + (netif)->mib2_counters.ifinucastpkts = 0; \ + (netif)->mib2_counters.ifinnucastpkts = 0; \ + (netif)->mib2_counters.ifindiscards = 0; \ + (netif)->mib2_counters.ifinerrors = 0; \ + (netif)->mib2_counters.ifinunknownprotos = 0; \ + (netif)->mib2_counters.ifoutoctets = 0; \ + (netif)->mib2_counters.ifoutucastpkts = 0; \ + (netif)->mib2_counters.ifoutnucastpkts = 0; \ + (netif)->mib2_counters.ifoutdiscards = 0; \ + (netif)->mib2_counters.ifouterrors = 0; } while(0) +#else /* MIB2_STATS */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) +#endif +#define MIB2_INIT_NETIF(netif, type, speed) +#define MIB2_STATS_NETIF_INC(n, x) +#define MIB2_STATS_NETIF_ADD(n, x, val) +#endif /* MIB2_STATS */ + +/* LWIP MIB2 callbacks */ +#if LWIP_MIB2_CALLBACKS /* don't build if not configured for use in lwipopts.h */ +/* network interface */ +void mib2_netif_added(struct netif *ni); +void mib2_netif_removed(struct netif *ni); + +#if LWIP_IPV4 && LWIP_ARP +/* ARP (for atTable and ipNetToMediaTable) */ +void mib2_add_arp_entry(struct netif *ni, ip4_addr_t *ip); +void mib2_remove_arp_entry(struct netif *ni, ip4_addr_t *ip); +#else /* LWIP_IPV4 && LWIP_ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) +#endif /* LWIP_IPV4 && LWIP_ARP */ + +/* IP */ +#if LWIP_IPV4 +void mib2_add_ip4(struct netif *ni); +void mib2_remove_ip4(struct netif *ni); +void mib2_add_route_ip4(u8_t dflt, struct netif *ni); +void mib2_remove_route_ip4(u8_t dflt, struct netif *ni); +#endif /* LWIP_IPV4 */ + +/* UDP */ +#if LWIP_UDP +void mib2_udp_bind(struct udp_pcb *pcb); +void mib2_udp_unbind(struct udp_pcb *pcb); +#endif /* LWIP_UDP */ + +#else /* LWIP_MIB2_CALLBACKS */ +/* LWIP_MIB2_CALLBACKS support not available */ +/* define everything to be empty */ + +/* network interface */ +#define mib2_netif_added(ni) +#define mib2_netif_removed(ni) + +/* ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) + +/* IP */ +#define mib2_add_ip4(ni) +#define mib2_remove_ip4(ni) +#define mib2_add_route_ip4(dflt, ni) +#define mib2_remove_route_ip4(dflt, ni) + +/* UDP */ +#define mib2_udp_bind(pcb) +#define mib2_udp_unbind(pcb) +#endif /* LWIP_MIB2_CALLBACKS */ + +/* for source-code compatibility reasons only, can be removed (not used internally) */ +#define NETIF_INIT_SNMP MIB2_INIT_NETIF +#define snmp_add_ifinoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifinoctets, value) +#define snmp_inc_ifinucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinucastpkts) +#define snmp_inc_ifinnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinnucastpkts) +#define snmp_inc_ifindiscards(ni) MIB2_STATS_NETIF_INC(ni, ifindiscards) +#define snmp_inc_ifinerrors(ni) MIB2_STATS_NETIF_INC(ni, ifinerrors) +#define snmp_inc_ifinunknownprotos(ni) MIB2_STATS_NETIF_INC(ni, ifinunknownprotos) +#define snmp_add_ifoutoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifoutoctets, value) +#define snmp_inc_ifoutucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutucastpkts) +#define snmp_inc_ifoutnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutnucastpkts) +#define snmp_inc_ifoutdiscards(ni) MIB2_STATS_NETIF_INC(ni, ifoutdiscards) +#define snmp_inc_ifouterrors(ni) MIB2_STATS_NETIF_INC(ni, ifouterrors) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SNMP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h new file mode 100644 index 000000000..d70d36c4d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h @@ -0,0 +1,688 @@ +/** + * @file + * Socket API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + + +#ifndef LWIP_HDR_SOCKETS_H +#define LWIP_HDR_SOCKETS_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/err.h" +#include "lwip/inet.h" +#include "lwip/errno.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's sa_family_t, define SA_FAMILY_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(sa_family_t) && !defined(SA_FAMILY_T_DEFINED) +typedef u8_t sa_family_t; +#endif +/* If your port already typedef's in_port_t, define IN_PORT_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_port_t) && !defined(IN_PORT_T_DEFINED) +typedef u16_t in_port_t; +#endif + +#if LWIP_IPV4 +/* members are in network byte order */ +struct sockaddr_in { + u8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; +#define SIN_ZERO_LEN 8 + char sin_zero[SIN_ZERO_LEN]; +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +struct sockaddr_in6 { + u8_t sin6_len; /* length of this structure */ + sa_family_t sin6_family; /* AF_INET6 */ + in_port_t sin6_port; /* Transport layer port # */ + u32_t sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + u32_t sin6_scope_id; /* Set of interfaces for scope */ +}; +#endif /* LWIP_IPV6 */ + +struct sockaddr { + u8_t sa_len; + sa_family_t sa_family; + char sa_data[14]; +}; + +struct sockaddr_storage { + u8_t s2_len; + sa_family_t ss_family; + char s2_data1[2]; + u32_t s2_data2[3]; +#if LWIP_IPV6 + u32_t s2_data3[3]; +#endif /* LWIP_IPV6 */ +}; + +/* If your port already typedef's socklen_t, define SOCKLEN_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(socklen_t) && !defined(SOCKLEN_T_DEFINED) +typedef u32_t socklen_t; +#endif + +#if !defined IOV_MAX +#define IOV_MAX 0xFFFF +#elif IOV_MAX > 0xFFFF +#error "IOV_MAX larger than supported by LwIP" +#endif /* IOV_MAX */ + +#if !defined(iovec) +struct iovec { + void *iov_base; + size_t iov_len; +}; +#endif + +struct msghdr { + void *msg_name; + socklen_t msg_namelen; + struct iovec *msg_iov; + int msg_iovlen; + void *msg_control; + socklen_t msg_controllen; + int msg_flags; +}; + +/* struct msghdr->msg_flags bit field values */ +#define MSG_TRUNC 0x04 +#define MSG_CTRUNC 0x08 + +/* RFC 3542, Section 20: Ancillary Data */ +struct cmsghdr { + socklen_t cmsg_len; /* number of bytes, including header */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; +/* Data section follows header and possible padding, typically referred to as + unsigned char cmsg_data[]; */ + +/* cmsg header/data alignment. NOTE: we align to native word size (double word +size on 16-bit arch) so structures are not placed at an unaligned address. +16-bit arch needs double word to ensure 32-bit alignment because socklen_t +could be 32 bits. If we ever have cmsg data with a 64-bit variable, alignment +will need to increase long long */ +#define ALIGN_H(size) (((size) + sizeof(long) - 1U) & ~(sizeof(long)-1U)) +#define ALIGN_D(size) ALIGN_H(size) + +#define CMSG_FIRSTHDR(mhdr) \ + ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(mhdr)->msg_control : \ + (struct cmsghdr *)NULL) + +#define CMSG_NXTHDR(mhdr, cmsg) \ + (((cmsg) == NULL) ? CMSG_FIRSTHDR(mhdr) : \ + (((u8_t *)(cmsg) + ALIGN_H((cmsg)->cmsg_len) \ + + ALIGN_D(sizeof(struct cmsghdr)) > \ + (u8_t *)((mhdr)->msg_control) + (mhdr)->msg_controllen) ? \ + (struct cmsghdr *)NULL : \ + (struct cmsghdr *)((void*)((u8_t *)(cmsg) + \ + ALIGN_H((cmsg)->cmsg_len))))) + +#define CMSG_DATA(cmsg) ((void*)((u8_t *)(cmsg) + \ + ALIGN_D(sizeof(struct cmsghdr)))) + +#define CMSG_SPACE(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + ALIGN_H(length)) + +#define CMSG_LEN(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + length) + +/* Set socket options argument */ +#define IFNAMSIZ NETIF_NAMESIZE +struct ifreq { + char ifr_name[IFNAMSIZ]; /* Interface name */ +}; + +/* Socket protocol types (TCP/UDP/RAW) */ +#define SOCK_STREAM 1 +#define SOCK_DGRAM 2 +#define SOCK_RAW 3 + +/* + * Option flags per-socket. These must match the SOF_ flags in ip.h (checked in init.c) + */ +#define SO_REUSEADDR 0x0004 /* Allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_BROADCAST 0x0020 /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + + +/* + * Additional options, not kept in so_options. + */ +#define SO_DEBUG 0x0001 /* Unimplemented: turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_DONTROUTE 0x0010 /* Unimplemented: just use interface addresses */ +#define SO_USELOOPBACK 0x0040 /* Unimplemented: bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_DONTLINGER ((int)(~SO_LINGER)) +#define SO_OOBINLINE 0x0100 /* Unimplemented: leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* Unimplemented: allow local address & port reuse */ +#define SO_SNDBUF 0x1001 /* Unimplemented: send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* Unimplemented: send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* Unimplemented: receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ +#define SO_NO_CHECK 0x100a /* don't create UDP checksum */ +#define SO_BINDTODEVICE 0x100b /* bind to device */ + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time in seconds */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xfff /* options for socket level */ + + +#define AF_UNSPEC 0 +#define AF_INET 2 +#if LWIP_IPV6 +#define AF_INET6 10 +#else /* LWIP_IPV6 */ +#define AF_INET6 AF_UNSPEC +#endif /* LWIP_IPV6 */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_UNSPEC AF_UNSPEC + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#if LWIP_IPV6 +#define IPPROTO_IPV6 41 +#define IPPROTO_ICMPV6 58 +#endif /* LWIP_IPV6 */ +#define IPPROTO_UDPLITE 136 +#define IPPROTO_RAW 255 + +/* Flags we can use with send and recv. */ +#define MSG_PEEK 0x01 /* Peeks at an incoming message */ +#define MSG_WAITALL 0x02 /* Unimplemented: Requests that the function block until the full amount of data requested can be returned */ +#define MSG_OOB 0x04 /* Unimplemented: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific */ +#define MSG_DONTWAIT 0x08 /* Nonblocking i/o for this operation only */ +#define MSG_MORE 0x10 /* Sender will send more */ +#define MSG_NOSIGNAL 0x20 /* Uninmplemented: Requests not to send the SIGPIPE signal if an attempt to send is made on a stream-oriented socket that is no longer connected. */ + + +/* + * Options for level IPPROTO_IP + */ +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_PKTINFO 8 + +#if LWIP_TCP +/* + * Options for level IPPROTO_TCP + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_KEEPALIVE 0x02 /* send KEEPALIVE probes when idle for pcb->keep_idle milliseconds */ +#define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ +#define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ +#define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* + * Options for level IPPROTO_IPV6 + */ +#define IPV6_CHECKSUM 7 /* RFC3542: calculate and insert the ICMPv6 checksum for raw sockets. */ +#define IPV6_V6ONLY 27 /* RFC3493: boolean control to restrict AF_INET6 sockets to IPv6 communications only. */ +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE +/* + * Options for level IPPROTO_UDPLITE + */ +#define UDPLITE_SEND_CSCOV 0x01 /* sender checksum coverage */ +#define UDPLITE_RECV_CSCOV 0x02 /* minimal receiver checksum coverage */ +#endif /* LWIP_UDP && LWIP_UDPLITE*/ + + +#if LWIP_MULTICAST_TX_OPTIONS +/* + * Options and types for UDP multicast traffic handling + */ +#define IP_MULTICAST_TTL 5 +#define IP_MULTICAST_IF 6 +#define IP_MULTICAST_LOOP 7 +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IGMP +/* + * Options and types related to multicast membership + */ +#define IP_ADD_MEMBERSHIP 3 +#define IP_DROP_MEMBERSHIP 4 + +typedef struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +} ip_mreq; +#endif /* LWIP_IGMP */ + +#if LWIP_IPV4 +struct in_pktinfo { + unsigned int ipi_ifindex; /* Interface index */ + struct in_addr ipi_addr; /* Destination (from header) address */ +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6_MLD +/* + * Options and types related to IPv6 multicast membership + */ +#define IPV6_JOIN_GROUP 12 +#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#define IPV6_LEAVE_GROUP 13 +#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP + +typedef struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; /* IPv6 multicast addr */ + unsigned int ipv6mr_interface; /* interface index, or 0 */ +} ipv6_mreq; +#endif /* LWIP_IPV6_MLD */ + +/* + * The Type of Service provides an indication of the abstract + * parameters of the quality of service desired. These parameters are + * to be used to guide the selection of the actual service parameters + * when transmitting a datagram through a particular network. Several + * networks offer service precedence, which somehow treats high + * precedence traffic as more important than other traffic (generally + * by accepting only traffic above a certain precedence at time of high + * load). The major choice is a three way tradeoff between low-delay, + * high-reliability, and high-throughput. + * The use of the Delay, Throughput, and Reliability indications may + * increase the cost (in some sense) of the service. In many networks + * better performance for one of these parameters is coupled with worse + * performance on another. Except for very unusual cases at most two + * of these three indications should be set. + */ +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_LOWCOST 0x02 +#define IPTOS_MINCOST IPTOS_LOWCOST + +/* + * The Network Control precedence designation is intended to be used + * within a network only. The actual use and control of that + * designation is up to each network. The Internetwork Control + * designation is intended for use by gateway control originators only. + * If the actual use of these precedence designations is of concern to + * a particular network, it is the responsibility of that network to + * control the access to, and use of, those precedence designations. + */ +#define IPTOS_PREC_MASK 0xe0 +#define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* + * Commands for ioctlsocket(), taken from the BSD file fcntl.h. + * lwip_ioctl only supports FIONREAD and FIONBIO, for now + * + * Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 128 bytes. + */ +#if !defined(FIONREAD) || !defined(FIONBIO) +#define IOCPARM_MASK 0x7fU /* parameters must be < 128 bytes */ +#define IOC_VOID 0x20000000UL /* no parameters */ +#define IOC_OUT 0x40000000UL /* copy out parameters */ +#define IOC_IN 0x80000000UL /* copy in parameters */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* 0x20000000 distinguishes new & + old ioctl's */ +#define _IO(x,y) ((long)(IOC_VOID|((x)<<8)|(y))) + +#define _IOR(x,y,t) ((long)(IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) + +#define _IOW(x,y,t) ((long)(IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) +#endif /* !defined(FIONREAD) || !defined(FIONBIO) */ + +#ifndef FIONREAD +#define FIONREAD _IOR('f', 127, unsigned long) /* get # bytes to read */ +#endif +#ifndef FIONBIO +#define FIONBIO _IOW('f', 126, unsigned long) /* set/clear non-blocking i/o */ +#endif + +/* Socket I/O Controls: unimplemented */ +#ifndef SIOCSHIWAT +#define SIOCSHIWAT _IOW('s', 0, unsigned long) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, unsigned long) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, unsigned long) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, unsigned long) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, unsigned long) /* at oob mark? */ +#endif + +/* commands for fnctl */ +#ifndef F_GETFL +#define F_GETFL 3 +#endif +#ifndef F_SETFL +#define F_SETFL 4 +#endif + +/* File status flags and file access modes for fnctl, + these are bits in an int. */ +#ifndef O_NONBLOCK +#define O_NONBLOCK 1 /* nonblocking I/O */ +#endif +#ifndef O_NDELAY +#define O_NDELAY O_NONBLOCK /* same as O_NONBLOCK, for compatibility */ +#endif +#ifndef O_RDONLY +#define O_RDONLY 2 +#endif +#ifndef O_WRONLY +#define O_WRONLY 4 +#endif +#ifndef O_RDWR +#define O_RDWR (O_RDONLY|O_WRONLY) +#endif + +#ifndef SHUT_RD + #define SHUT_RD 0 + #define SHUT_WR 1 + #define SHUT_RDWR 2 +#endif + +/* FD_SET used for lwip_select */ +#ifndef FD_SET +#undef FD_SETSIZE +/* Make FD_SETSIZE match NUM_SOCKETS in socket.c */ +#define FD_SETSIZE MEMP_NUM_NETCONN +#define LWIP_SELECT_MAXNFDS (FD_SETSIZE + LWIP_SOCKET_OFFSET) +#define FDSETSAFESET(n, code) do { \ + if (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0)) { \ + code; }} while(0) +#define FDSETSAFEGET(n, code) (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0) ?\ + (code) : 0) +#define FD_SET(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] | (1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_CLR(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & ~(1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_ISSET(n,p) FDSETSAFEGET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ZERO(p) memset((void*)(p), 0, sizeof(*(p))) + +typedef struct fd_set +{ + unsigned char fd_bits [(FD_SETSIZE+7)/8]; +} fd_set; + +#elif FD_SETSIZE < (LWIP_SOCKET_OFFSET + MEMP_NUM_NETCONN) +#error "external FD_SETSIZE too small for number of sockets" +#else +#define LWIP_SELECT_MAXNFDS FD_SETSIZE +#endif /* FD_SET */ + +/* poll-related defines and types */ +/* @todo: find a better way to guard the definition of these defines and types if already defined */ +#if !defined(POLLIN) && !defined(POLLOUT) +#define POLLIN 0x1 +#define POLLOUT 0x2 +#define POLLERR 0x4 +#define POLLNVAL 0x8 +/* Below values are unimplemented */ +#define POLLRDNORM 0x10 +#define POLLRDBAND 0x20 +#define POLLPRI 0x40 +#define POLLWRNORM 0x80 +#define POLLWRBAND 0x100 +#define POLLHUP 0x200 +typedef unsigned int nfds_t; +struct pollfd +{ + int fd; + short events; + short revents; +}; +#endif + +/** LWIP_TIMEVAL_PRIVATE: if you want to use the struct timeval provided + * by your system, set this to 0 and include in cc.h */ +#ifndef LWIP_TIMEVAL_PRIVATE +#define LWIP_TIMEVAL_PRIVATE 1 +#endif + +#if LWIP_TIMEVAL_PRIVATE +struct timeval { + long tv_sec; /* seconds */ + long tv_usec; /* and microseconds */ +}; +#endif /* LWIP_TIMEVAL_PRIVATE */ + +#define lwip_socket_init() /* Compatibility define, no init needed. */ +void lwip_socket_thread_init(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void lwip_socket_thread_cleanup(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ + +#if LWIP_COMPAT_SOCKETS == 2 +/* This helps code parsers/code completion by not having the COMPAT functions as defines */ +#define lwip_accept accept +#define lwip_bind bind +#define lwip_shutdown shutdown +#define lwip_getpeername getpeername +#define lwip_getsockname getsockname +#define lwip_setsockopt setsockopt +#define lwip_getsockopt getsockopt +#define lwip_close closesocket +#define lwip_connect connect +#define lwip_listen listen +#define lwip_recv recv +#define lwip_recvmsg recvmsg +#define lwip_recvfrom recvfrom +#define lwip_send send +#define lwip_sendmsg sendmsg +#define lwip_sendto sendto +#define lwip_socket socket +#if LWIP_SOCKET_SELECT +#define lwip_select select +#endif +#if LWIP_SOCKET_POLL +#define lwip_poll poll +#endif +#define lwip_ioctl ioctlsocket +#define lwip_inet_ntop inet_ntop +#define lwip_inet_pton inet_pton + +#if LWIP_POSIX_SOCKETS_IO_NAMES +#define lwip_read read +#define lwip_readv readv +#define lwip_write write +#define lwip_writev writev +#undef lwip_close +#define lwip_close close +#define closesocket(s) close(s) +int fcntl(int s, int cmd, ...); +#undef lwip_ioctl +#define lwip_ioctl ioctl +#define ioctlsocket ioctl +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS == 2 */ + +int lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int lwip_bind(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_shutdown(int s, int how); +int lwip_getpeername (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockname (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen); +int lwip_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen); + int lwip_close(int s); +int lwip_connect(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_listen(int s, int backlog); +ssize_t lwip_recv(int s, void *mem, size_t len, int flags); +ssize_t lwip_read(int s, void *mem, size_t len); +ssize_t lwip_readv(int s, const struct iovec *iov, int iovcnt); +ssize_t lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); +ssize_t lwip_recvmsg(int s, struct msghdr *message, int flags); +ssize_t lwip_send(int s, const void *dataptr, size_t size, int flags); +ssize_t lwip_sendmsg(int s, const struct msghdr *message, int flags); +ssize_t lwip_sendto(int s, const void *dataptr, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen); +int lwip_socket(int domain, int type, int protocol); +ssize_t lwip_write(int s, const void *dataptr, size_t size); +ssize_t lwip_writev(int s, const struct iovec *iov, int iovcnt); +#if LWIP_SOCKET_SELECT +int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout); +#endif +#if LWIP_SOCKET_POLL +int lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout); +#endif +int lwip_ioctl(int s, long cmd, void *argp); +int lwip_fcntl(int s, int cmd, int val); +const char *lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size); +int lwip_inet_pton(int af, const char *src, void *dst); + +#if LWIP_COMPAT_SOCKETS +#if LWIP_COMPAT_SOCKETS != 2 +/** @ingroup socket */ +#define accept(s,addr,addrlen) lwip_accept(s,addr,addrlen) +/** @ingroup socket */ +#define bind(s,name,namelen) lwip_bind(s,name,namelen) +/** @ingroup socket */ +#define shutdown(s,how) lwip_shutdown(s,how) +/** @ingroup socket */ +#define getpeername(s,name,namelen) lwip_getpeername(s,name,namelen) +/** @ingroup socket */ +#define getsockname(s,name,namelen) lwip_getsockname(s,name,namelen) +/** @ingroup socket */ +#define setsockopt(s,level,optname,opval,optlen) lwip_setsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define getsockopt(s,level,optname,opval,optlen) lwip_getsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define closesocket(s) lwip_close(s) +/** @ingroup socket */ +#define connect(s,name,namelen) lwip_connect(s,name,namelen) +/** @ingroup socket */ +#define listen(s,backlog) lwip_listen(s,backlog) +/** @ingroup socket */ +#define recv(s,mem,len,flags) lwip_recv(s,mem,len,flags) +/** @ingroup socket */ +#define recvmsg(s,message,flags) lwip_recvmsg(s,message,flags) +/** @ingroup socket */ +#define recvfrom(s,mem,len,flags,from,fromlen) lwip_recvfrom(s,mem,len,flags,from,fromlen) +/** @ingroup socket */ +#define send(s,dataptr,size,flags) lwip_send(s,dataptr,size,flags) +/** @ingroup socket */ +#define sendmsg(s,message,flags) lwip_sendmsg(s,message,flags) +/** @ingroup socket */ +#define sendto(s,dataptr,size,flags,to,tolen) lwip_sendto(s,dataptr,size,flags,to,tolen) +/** @ingroup socket */ +#define socket(domain,type,protocol) lwip_socket(domain,type,protocol) +#if LWIP_SOCKET_SELECT +/** @ingroup socket */ +#define select(maxfdp1,readset,writeset,exceptset,timeout) lwip_select(maxfdp1,readset,writeset,exceptset,timeout) +#endif +#if LWIP_SOCKET_POLL +/** @ingroup socket */ +#define poll(fds,nfds,timeout) lwip_poll(fds,nfds,timeout) +#endif +/** @ingroup socket */ +#define ioctlsocket(s,cmd,argp) lwip_ioctl(s,cmd,argp) +/** @ingroup socket */ +#define inet_ntop(af,src,dst,size) lwip_inet_ntop(af,src,dst,size) +/** @ingroup socket */ +#define inet_pton(af,src,dst) lwip_inet_pton(af,src,dst) + +#if LWIP_POSIX_SOCKETS_IO_NAMES +/** @ingroup socket */ +#define read(s,mem,len) lwip_read(s,mem,len) +/** @ingroup socket */ +#define readv(s,iov,iovcnt) lwip_readv(s,iov,iovcnt) +/** @ingroup socket */ +#define write(s,dataptr,len) lwip_write(s,dataptr,len) +/** @ingroup socket */ +#define writev(s,iov,iovcnt) lwip_writev(s,iov,iovcnt) +/** @ingroup socket */ +#define close(s) lwip_close(s) +/** @ingroup socket */ +#define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) +/** @ingroup socket */ +#define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS != 2 */ + +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h new file mode 100644 index 000000000..b570dbacf --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h @@ -0,0 +1,491 @@ +/** + * @file + * Statistics API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_STATS_H +#define LWIP_HDR_STATS_H + +#include "lwip/opt.h" + +#include "lwip/mem.h" +#include "lwip/memp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_STATS + +#ifndef LWIP_STATS_LARGE +#define LWIP_STATS_LARGE 0 +#endif + +#if LWIP_STATS_LARGE +#define STAT_COUNTER u32_t +#define STAT_COUNTER_F U32_F +#else +#define STAT_COUNTER u16_t +#define STAT_COUNTER_F U16_F +#endif + +/** Protocol related stats */ +struct stats_proto { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER fw; /* Forwarded packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER rterr; /* Routing error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER opterr; /* Error in options. */ + STAT_COUNTER err; /* Misc error. */ + STAT_COUNTER cachehit; +}; + +/** IGMP stats */ +struct stats_igmp { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER rx_v1; /* Received v1 frames. */ + STAT_COUNTER rx_group; /* Received group-specific queries. */ + STAT_COUNTER rx_general; /* Received general queries. */ + STAT_COUNTER rx_report; /* Received reports. */ + STAT_COUNTER tx_join; /* Sent joins. */ + STAT_COUNTER tx_leave; /* Sent leaves. */ + STAT_COUNTER tx_report; /* Sent reports. */ +}; + +/** Memory stats */ +struct stats_mem { +#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY + const char *name; +#endif /* defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY */ + STAT_COUNTER err; + mem_size_t avail; + mem_size_t used; + mem_size_t max; + STAT_COUNTER illegal; +}; + +/** System element stats */ +struct stats_syselem { + STAT_COUNTER used; + STAT_COUNTER max; + STAT_COUNTER err; +}; + +/** System stats */ +struct stats_sys { + struct stats_syselem sem; + struct stats_syselem mutex; + struct stats_syselem mbox; +}; + +/** SNMP MIB2 stats */ +struct stats_mib2 { + /* IP */ + u32_t ipinhdrerrors; + u32_t ipinaddrerrors; + u32_t ipinunknownprotos; + u32_t ipindiscards; + u32_t ipindelivers; + u32_t ipoutrequests; + u32_t ipoutdiscards; + u32_t ipoutnoroutes; + u32_t ipreasmoks; + u32_t ipreasmfails; + u32_t ipfragoks; + u32_t ipfragfails; + u32_t ipfragcreates; + u32_t ipreasmreqds; + u32_t ipforwdatagrams; + u32_t ipinreceives; + + /* TCP */ + u32_t tcpactiveopens; + u32_t tcppassiveopens; + u32_t tcpattemptfails; + u32_t tcpestabresets; + u32_t tcpoutsegs; + u32_t tcpretranssegs; + u32_t tcpinsegs; + u32_t tcpinerrs; + u32_t tcpoutrsts; + + /* UDP */ + u32_t udpindatagrams; + u32_t udpnoports; + u32_t udpinerrors; + u32_t udpoutdatagrams; + + /* ICMP */ + u32_t icmpinmsgs; + u32_t icmpinerrors; + u32_t icmpindestunreachs; + u32_t icmpintimeexcds; + u32_t icmpinparmprobs; + u32_t icmpinsrcquenchs; + u32_t icmpinredirects; + u32_t icmpinechos; + u32_t icmpinechoreps; + u32_t icmpintimestamps; + u32_t icmpintimestampreps; + u32_t icmpinaddrmasks; + u32_t icmpinaddrmaskreps; + u32_t icmpoutmsgs; + u32_t icmpouterrors; + u32_t icmpoutdestunreachs; + u32_t icmpouttimeexcds; + u32_t icmpoutechos; /* can be incremented by user application ('ping') */ + u32_t icmpoutechoreps; +}; + +/** + * @ingroup netif_mib2 + * SNMP MIB2 interface stats + */ +struct stats_mib2_netif_ctrs { + /** The total number of octets received on the interface, including framing characters */ + u32_t ifinoctets; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * not addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinucastpkts; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinnucastpkts; + /** The number of inbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being deliverable to a higher-layer protocol. One possible + * reason for discarding such a packet could be to free up buffer space */ + u32_t ifindiscards; + /** For packet-oriented interfaces, the number of inbound packets that contained errors + * preventing them from being deliverable to a higher-layer protocol. For character- + * oriented or fixed-length interfaces, the number of inbound transmission units that + * contained errors preventing them from being deliverable to a higher-layer protocol. */ + u32_t ifinerrors; + /** For packet-oriented interfaces, the number of packets received via the interface which + * were discarded because of an unknown or unsupported protocol. For character-oriented + * or fixed-length interfaces that support protocol multiplexing the number of transmission + * units received via the interface which were discarded because of an unknown or unsupported + * protocol. For any interface that does not support protocol multiplexing, this counter will + * always be 0 */ + u32_t ifinunknownprotos; + /** The total number of octets transmitted out of the interface, including framing characters. */ + u32_t ifoutoctets; + /** The total number of packets that higher-level protocols requested be transmitted, and + * which were not addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutucastpkts; + /** The total number of packets that higher-level protocols requested be transmitted, and which + * were addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutnucastpkts; + /** The number of outbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being transmitted. One possible reason for discarding + * such a packet could be to free up buffer space. */ + u32_t ifoutdiscards; + /** For packet-oriented interfaces, the number of outbound packets that could not be transmitted + * because of errors. For character-oriented or fixed-length interfaces, the number of outbound + * transmission units that could not be transmitted because of errors. */ + u32_t ifouterrors; +}; + +/** lwIP stats container */ +struct stats_ { +#if LINK_STATS + /** Link level */ + struct stats_proto link; +#endif +#if ETHARP_STATS + /** ARP */ + struct stats_proto etharp; +#endif +#if IPFRAG_STATS + /** Fragmentation */ + struct stats_proto ip_frag; +#endif +#if IP_STATS + /** IP */ + struct stats_proto ip; +#endif +#if ICMP_STATS + /** ICMP */ + struct stats_proto icmp; +#endif +#if IGMP_STATS + /** IGMP */ + struct stats_igmp igmp; +#endif +#if UDP_STATS + /** UDP */ + struct stats_proto udp; +#endif +#if TCP_STATS + /** TCP */ + struct stats_proto tcp; +#endif +#if MEM_STATS + /** Heap */ + struct stats_mem mem; +#endif +#if MEMP_STATS + /** Internal memory pools */ + struct stats_mem *memp[MEMP_MAX]; +#endif +#if SYS_STATS + /** System */ + struct stats_sys sys; +#endif +#if IP6_STATS + /** IPv6 */ + struct stats_proto ip6; +#endif +#if ICMP6_STATS + /** ICMP6 */ + struct stats_proto icmp6; +#endif +#if IP6_FRAG_STATS + /** IPv6 fragmentation */ + struct stats_proto ip6_frag; +#endif +#if MLD6_STATS + /** Multicast listener discovery */ + struct stats_igmp mld6; +#endif +#if ND6_STATS + /** Neighbor discovery */ + struct stats_proto nd6; +#endif +#if MIB2_STATS + /** SNMP MIB2 */ + struct stats_mib2 mib2; +#endif +}; + +/** Global variable containing lwIP internal statistics. Add this to your debugger's watchlist. */ +extern struct stats_ lwip_stats; + +/** Init statistics */ +void stats_init(void); + +#define STATS_INC(x) ++lwip_stats.x +#define STATS_DEC(x) --lwip_stats.x +#define STATS_INC_USED(x, y, type) do { lwip_stats.x.used = (type)(lwip_stats.x.used + y); \ + if (lwip_stats.x.max < lwip_stats.x.used) { \ + lwip_stats.x.max = lwip_stats.x.used; \ + } \ + } while(0) +#define STATS_GET(x) lwip_stats.x +#else /* LWIP_STATS */ +#define stats_init() +#define STATS_INC(x) +#define STATS_DEC(x) +#define STATS_INC_USED(x, y, type) +#endif /* LWIP_STATS */ + +#if TCP_STATS +#define TCP_STATS_INC(x) STATS_INC(x) +#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP") +#else +#define TCP_STATS_INC(x) +#define TCP_STATS_DISPLAY() +#endif + +#if UDP_STATS +#define UDP_STATS_INC(x) STATS_INC(x) +#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP") +#else +#define UDP_STATS_INC(x) +#define UDP_STATS_DISPLAY() +#endif + +#if ICMP_STATS +#define ICMP_STATS_INC(x) STATS_INC(x) +#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP") +#else +#define ICMP_STATS_INC(x) +#define ICMP_STATS_DISPLAY() +#endif + +#if IGMP_STATS +#define IGMP_STATS_INC(x) STATS_INC(x) +#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp, "IGMP") +#else +#define IGMP_STATS_INC(x) +#define IGMP_STATS_DISPLAY() +#endif + +#if IP_STATS +#define IP_STATS_INC(x) STATS_INC(x) +#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP") +#else +#define IP_STATS_INC(x) +#define IP_STATS_DISPLAY() +#endif + +#if IPFRAG_STATS +#define IPFRAG_STATS_INC(x) STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG") +#else +#define IPFRAG_STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() +#endif + +#if ETHARP_STATS +#define ETHARP_STATS_INC(x) STATS_INC(x) +#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP") +#else +#define ETHARP_STATS_INC(x) +#define ETHARP_STATS_DISPLAY() +#endif + +#if LINK_STATS +#define LINK_STATS_INC(x) STATS_INC(x) +#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK") +#else +#define LINK_STATS_INC(x) +#define LINK_STATS_DISPLAY() +#endif + +#if MEM_STATS +#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y +#define MEM_STATS_INC(x) STATS_INC(mem.x) +#define MEM_STATS_INC_USED(x, y) STATS_INC_USED(mem, y, mem_size_t) +#define MEM_STATS_DEC_USED(x, y) lwip_stats.mem.x = (mem_size_t)((lwip_stats.mem.x) - (y)) +#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP") +#else +#define MEM_STATS_AVAIL(x, y) +#define MEM_STATS_INC(x) +#define MEM_STATS_INC_USED(x, y) +#define MEM_STATS_DEC_USED(x, y) +#define MEM_STATS_DISPLAY() +#endif + + #if MEMP_STATS +#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i]->x) +#define MEMP_STATS_DISPLAY(i) stats_display_memp(lwip_stats.memp[i], i) +#define MEMP_STATS_GET(x, i) STATS_GET(memp[i]->x) + #else +#define MEMP_STATS_DEC(x, i) +#define MEMP_STATS_DISPLAY(i) +#define MEMP_STATS_GET(x, i) 0 +#endif + +#if SYS_STATS +#define SYS_STATS_INC(x) STATS_INC(sys.x) +#define SYS_STATS_DEC(x) STATS_DEC(sys.x) +#define SYS_STATS_INC_USED(x) STATS_INC_USED(sys.x, 1, STAT_COUNTER) +#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys) +#else +#define SYS_STATS_INC(x) +#define SYS_STATS_DEC(x) +#define SYS_STATS_INC_USED(x) +#define SYS_STATS_DISPLAY() +#endif + +#if IP6_STATS +#define IP6_STATS_INC(x) STATS_INC(x) +#define IP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6, "IPv6") +#else +#define IP6_STATS_INC(x) +#define IP6_STATS_DISPLAY() +#endif + +#if ICMP6_STATS +#define ICMP6_STATS_INC(x) STATS_INC(x) +#define ICMP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp6, "ICMPv6") +#else +#define ICMP6_STATS_INC(x) +#define ICMP6_STATS_DISPLAY() +#endif + +#if IP6_FRAG_STATS +#define IP6_FRAG_STATS_INC(x) STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6_frag, "IPv6 FRAG") +#else +#define IP6_FRAG_STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() +#endif + +#if MLD6_STATS +#define MLD6_STATS_INC(x) STATS_INC(x) +#define MLD6_STATS_DISPLAY() stats_display_igmp(&lwip_stats.mld6, "MLDv1") +#else +#define MLD6_STATS_INC(x) +#define MLD6_STATS_DISPLAY() +#endif + +#if ND6_STATS +#define ND6_STATS_INC(x) STATS_INC(x) +#define ND6_STATS_DISPLAY() stats_display_proto(&lwip_stats.nd6, "ND") +#else +#define ND6_STATS_INC(x) +#define ND6_STATS_DISPLAY() +#endif + +#if MIB2_STATS +#define MIB2_STATS_INC(x) STATS_INC(x) +#else +#define MIB2_STATS_INC(x) +#endif + +/* Display of statistics */ +#if LWIP_STATS_DISPLAY +void stats_display(void); +void stats_display_proto(struct stats_proto *proto, const char *name); +void stats_display_igmp(struct stats_igmp *igmp, const char *name); +void stats_display_mem(struct stats_mem *mem, const char *name); +void stats_display_memp(struct stats_mem *mem, int index); +void stats_display_sys(struct stats_sys *sys); +#else /* LWIP_STATS_DISPLAY */ +#define stats_display() +#define stats_display_proto(proto, name) +#define stats_display_igmp(igmp, name) +#define stats_display_mem(mem, name) +#define stats_display_memp(mem, index) +#define stats_display_sys(sys) +#endif /* LWIP_STATS_DISPLAY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_STATS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h new file mode 100644 index 000000000..168e465ba --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h @@ -0,0 +1,560 @@ +/** + * @file + * OS abstraction layer + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#ifndef LWIP_HDR_SYS_H +#define LWIP_HDR_SYS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if NO_SYS + +/* For a totally minimal and standalone system, we provide null + definitions of the sys_ functions. */ +typedef u8_t sys_sem_t; +typedef u8_t sys_mutex_t; +typedef u8_t sys_mbox_t; + +#define sys_sem_new(s, c) ERR_OK +#define sys_sem_signal(s) +#define sys_sem_wait(s) +#define sys_arch_sem_wait(s,t) +#define sys_sem_free(s) +#define sys_sem_valid(s) 0 +#define sys_sem_valid_val(s) 0 +#define sys_sem_set_invalid(s) +#define sys_sem_set_invalid_val(s) +#define sys_mutex_new(mu) ERR_OK +#define sys_mutex_lock(mu) +#define sys_mutex_unlock(mu) +#define sys_mutex_free(mu) +#define sys_mutex_valid(mu) 0 +#define sys_mutex_set_invalid(mu) +#define sys_mbox_new(m, s) ERR_OK +#define sys_mbox_fetch(m,d) +#define sys_mbox_tryfetch(m,d) +#define sys_mbox_post(m,d) +#define sys_mbox_trypost(m,d) +#define sys_mbox_free(m) +#define sys_mbox_valid(m) +#define sys_mbox_valid_val(m) +#define sys_mbox_set_invalid(m) +#define sys_mbox_set_invalid_val(m) + +#define sys_thread_new(n,t,a,s,p) + +#define sys_msleep(t) + +#else /* NO_SYS */ + +/** Return code for timeouts from sys_arch_mbox_fetch and sys_arch_sem_wait */ +#define SYS_ARCH_TIMEOUT 0xffffffffUL + +/** sys_mbox_tryfetch() returns SYS_MBOX_EMPTY if appropriate. + * For now we use the same magic value, but we allow this to change in future. + */ +#define SYS_MBOX_EMPTY SYS_ARCH_TIMEOUT + +#include "lwip/err.h" +#include "arch/sys_arch.h" + +/** Function prototype for thread functions */ +typedef void (*lwip_thread_fn)(void *arg); + +/* Function prototypes for functions to be implemented by platform ports + (in sys_arch.c) */ + +/* Mutex functions: */ + +/** Define LWIP_COMPAT_MUTEX if the port has no mutexes and binary semaphores + should be used instead */ +#ifndef LWIP_COMPAT_MUTEX +#define LWIP_COMPAT_MUTEX 0 +#endif + +#if LWIP_COMPAT_MUTEX +/* for old ports that don't have mutexes: define them to binary semaphores */ +#define sys_mutex_t sys_sem_t +#define sys_mutex_new(mutex) sys_sem_new(mutex, 1) +#define sys_mutex_lock(mutex) sys_sem_wait(mutex) +#define sys_mutex_unlock(mutex) sys_sem_signal(mutex) +#define sys_mutex_free(mutex) sys_sem_free(mutex) +#define sys_mutex_valid(mutex) sys_sem_valid(mutex) +#define sys_mutex_set_invalid(mutex) sys_sem_set_invalid(mutex) + +#else /* LWIP_COMPAT_MUTEX */ + +/** + * @ingroup sys_mutex + * Create a new mutex. + * Note that mutexes are expected to not be taken recursively by the lwIP code, + * so both implementation types (recursive or non-recursive) should work. + * The mutex is allocated to the memory that 'mutex' + * points to (which can be both a pointer or the actual OS structure). + * If the mutex has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mutex pointer to the mutex to create + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mutex_new(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Blocks the thread until the mutex can be grabbed. + * @param mutex the mutex to lock + */ +void sys_mutex_lock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Releases the mutex previously locked through 'sys_mutex_lock()'. + * @param mutex the mutex to unlock + */ +void sys_mutex_unlock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Deallocates a mutex. + * @param mutex the mutex to delete + */ +void sys_mutex_free(sys_mutex_t *mutex); +#ifndef sys_mutex_valid +/** + * @ingroup sys_mutex + * Returns 1 if the mutes is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mutex_valid(sys_mutex_t *mutex); +#endif +#ifndef sys_mutex_set_invalid +/** + * @ingroup sys_mutex + * Invalidate a mutex so that sys_mutex_valid() returns 0. + * ATTENTION: This does NOT mean that the mutex shall be deallocated: + * sys_mutex_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mutex_set_invalid(sys_mutex_t *mutex); +#endif +#endif /* LWIP_COMPAT_MUTEX */ + +/* Semaphore functions: */ + +/** + * @ingroup sys_sem + * Create a new semaphore + * Creates a new semaphore. The semaphore is allocated to the memory that 'sem' + * points to (which can be both a pointer or the actual OS structure). + * The "count" argument specifies the initial state of the semaphore (which is + * either 0 or 1). + * If the semaphore has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param sem pointer to the semaphore to create + * @param count initial count of the semaphore + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_sem_new(sys_sem_t *sem, u8_t count); +/** + * @ingroup sys_sem + * Signals a semaphore + * @param sem the semaphore to signal + */ +void sys_sem_signal(sys_sem_t *sem); +/** + * @ingroup sys_sem + * Blocks the thread while waiting for the semaphore to be signaled. If the + * "timeout" argument is non-zero, the thread should only be blocked for the + * specified time (measured in milliseconds). If the "timeout" argument is zero, + * the thread should be blocked until the semaphore is signalled. + * + * The return value is SYS_ARCH_TIMEOUT if the semaphore wasn't signaled within + * the specified time or any other value if it was signaled (with or without + * waiting). + * Notice that lwIP implements a function with a similar name, + * sys_sem_wait(), that uses the sys_arch_sem_wait() function. + * + * @param sem the semaphore to wait for + * @param timeout timeout in milliseconds to wait (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value on success + */ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +/** + * @ingroup sys_sem + * Deallocates a semaphore. + * @param sem semaphore to delete + */ +void sys_sem_free(sys_sem_t *sem); +/** Wait for a semaphore - forever/no timeout */ +#define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) +#ifndef sys_sem_valid +/** + * @ingroup sys_sem + * Returns 1 if the semaphore is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_sem_valid(sys_sem_t *sem); +#endif +#ifndef sys_sem_set_invalid +/** + * @ingroup sys_sem + * Invalidate a semaphore so that sys_sem_valid() returns 0. + * ATTENTION: This does NOT mean that the semaphore shall be deallocated: + * sys_sem_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_sem_set_invalid(sys_sem_t *sem); +#endif +#ifndef sys_sem_valid_val +/** + * Same as sys_sem_valid() but taking a value, not a pointer + */ +#define sys_sem_valid_val(sem) sys_sem_valid(&(sem)) +#endif +#ifndef sys_sem_set_invalid_val +/** + * Same as sys_sem_set_invalid() but taking a value, not a pointer + */ +#define sys_sem_set_invalid_val(sem) sys_sem_set_invalid(&(sem)) +#endif + +#ifndef sys_msleep +/** + * @ingroup sys_misc + * Sleep for specified number of ms + */ +void sys_msleep(u32_t ms); /* only has a (close to) 1 ms resolution. */ +#endif + +/* Mailbox functions. */ + +/** + * @ingroup sys_mbox + * Creates an empty mailbox for maximum "size" elements. Elements stored + * in mailboxes are pointers. You have to define macros "_MBOX_SIZE" + * in your lwipopts.h, or ignore this parameter in your implementation + * and use a default size. + * If the mailbox has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mbox pointer to the mbox to create + * @param size (minimum) number of messages in this mbox + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mbox_new(sys_mbox_t *mbox, int size); +/** + * @ingroup sys_mbox + * Post a message to an mbox - may not fail + * -> blocks if full, only to be used from tasks NOT from ISR! + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +void sys_mbox_post(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * Can be used from ISR (if the sys arch layer allows this). + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * To be be used from ISR. + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Blocks the thread until a message arrives in the mailbox, but does + * not block the thread longer than "timeout" milliseconds (similar to + * the sys_arch_sem_wait() function). If "timeout" is 0, the thread should + * be blocked until a message arrives. The "msg" argument is a result + * parameter that is set by the function (i.e., by doing "*msg = + * ptr"). The "msg" parameter maybe NULL to indicate that the message + * should be dropped. + * The return values are the same as for the sys_arch_sem_wait() function: + * SYS_ARCH_TIMEOUT if there was a timeout, any other value if a messages + * is received. + * + * Note that a function with a similar name, sys_mbox_fetch(), is + * implemented by lwIP. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @param timeout maximum time (in milliseconds) to wait for a message (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value if a message has been received + */ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +/* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ +#ifndef sys_arch_mbox_tryfetch +/** + * @ingroup sys_mbox + * This is similar to sys_arch_mbox_fetch, however if a message is not + * present in the mailbox, it immediately returns with the code + * SYS_MBOX_EMPTY. On success 0 is returned. + * To allow for efficient implementations, this can be defined as a + * function-like macro in sys_arch.h instead of a normal function. For + * example, a naive implementation could be: + * \#define sys_arch_mbox_tryfetch(mbox,msg) sys_arch_mbox_fetch(mbox,msg,1) + * although this would introduce unnecessary delays. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @return 0 (milliseconds) if a message has been received + * or SYS_MBOX_EMPTY if the mailbox is empty + */ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +#endif +/** + * For now, we map straight to sys_arch implementation. + */ +#define sys_mbox_tryfetch(mbox, msg) sys_arch_mbox_tryfetch(mbox, msg) +/** + * @ingroup sys_mbox + * Deallocates a mailbox. If there are messages still present in the + * mailbox when the mailbox is deallocated, it is an indication of a + * programming error in lwIP and the developer should be notified. + * + * @param mbox mbox to delete + */ +void sys_mbox_free(sys_mbox_t *mbox); +#define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) +#ifndef sys_mbox_valid +/** + * @ingroup sys_mbox + * Returns 1 if the mailbox is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mbox_valid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_set_invalid +/** + * @ingroup sys_mbox + * Invalidate a mailbox so that sys_mbox_valid() returns 0. + * ATTENTION: This does NOT mean that the mailbox shall be deallocated: + * sys_mbox_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mbox_set_invalid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_valid_val +/** + * Same as sys_mbox_valid() but taking a value, not a pointer + */ +#define sys_mbox_valid_val(mbox) sys_mbox_valid(&(mbox)) +#endif +#ifndef sys_mbox_set_invalid_val +/** + * Same as sys_mbox_set_invalid() but taking a value, not a pointer + */ +#define sys_mbox_set_invalid_val(mbox) sys_mbox_set_invalid(&(mbox)) +#endif + + +/** + * @ingroup sys_misc + * The only thread function: + * Starts a new thread named "name" with priority "prio" that will begin its + * execution in the function "thread()". The "arg" argument will be passed as an + * argument to the thread() function. The stack size to used for this thread is + * the "stacksize" parameter. The id of the new thread is returned. Both the id + * and the priority are system dependent. + * ATTENTION: although this function returns a value, it MUST NOT FAIL (ports have to assert this!) + * + * @param name human-readable name for the thread (used for debugging purposes) + * @param thread thread-function + * @param arg parameter passed to 'thread' + * @param stacksize stack size in bytes for the new thread (may be ignored by ports) + * @param prio priority of the new thread (may be ignored by ports) */ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio); + +#endif /* NO_SYS */ + +/** + * @ingroup sys_misc + * sys_init() must be called before anything else. + * Initialize the sys_arch layer. + */ +void sys_init(void); + +#ifndef sys_jiffies +/** + * Ticks/jiffies since power up. + */ +u32_t sys_jiffies(void); +#endif + +/** + * @ingroup sys_time + * Returns the current time in milliseconds, + * may be the same as sys_jiffies or at least based on it. + * Don't care for wraparound, this is only used for time diffs. + * Not implementing this function means you cannot use some modules (e.g. TCP + * timestamps, internal timeouts for NO_SYS==1). + */ +u32_t sys_now(void); + +/* Critical Region Protection */ +/* These functions must be implemented in the sys_arch.c file. + In some implementations they can provide a more light-weight protection + mechanism than using semaphores. Otherwise semaphores can be used for + implementation */ +#ifndef SYS_ARCH_PROTECT +/** SYS_LIGHTWEIGHT_PROT + * define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection + * for certain critical regions during buffer allocation, deallocation and memory + * allocation and deallocation. + */ +#if SYS_LIGHTWEIGHT_PROT + +/** + * @ingroup sys_prot + * SYS_ARCH_DECL_PROTECT + * declare a protection variable. This macro will default to defining a variable of + * type sys_prot_t. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h. + */ +#define SYS_ARCH_DECL_PROTECT(lev) sys_prot_t lev +/** + * @ingroup sys_prot + * SYS_ARCH_PROTECT + * Perform a "fast" protect. This could be implemented by + * disabling interrupts for an embedded system or by using a semaphore or + * mutex. The implementation should allow calling SYS_ARCH_PROTECT when + * already protected. The old protection level is returned in the variable + * "lev". This macro will default to calling the sys_arch_protect() function + * which should be implemented in sys_arch.c. If a particular port needs a + * different implementation, then this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_PROTECT(lev) lev = sys_arch_protect() +/** + * @ingroup sys_prot + * SYS_ARCH_UNPROTECT + * Perform a "fast" set of the protection level to "lev". This could be + * implemented by setting the interrupt level to "lev" within the MACRO or by + * using a semaphore or mutex. This macro will default to calling the + * sys_arch_unprotect() function which should be implemented in + * sys_arch.c. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_UNPROTECT(lev) sys_arch_unprotect(lev) +sys_prot_t sys_arch_protect(void); +void sys_arch_unprotect(sys_prot_t pval); + +#else + +#define SYS_ARCH_DECL_PROTECT(lev) +#define SYS_ARCH_PROTECT(lev) +#define SYS_ARCH_UNPROTECT(lev) + +#endif /* SYS_LIGHTWEIGHT_PROT */ + +#endif /* SYS_ARCH_PROTECT */ + +/* + * Macros to set/get and increase/decrease variables in a thread-safe way. + * Use these for accessing variable that are used from more than one thread. + */ + +#ifndef SYS_ARCH_INC +#define SYS_ARCH_INC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var += val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_INC */ + +#ifndef SYS_ARCH_DEC +#define SYS_ARCH_DEC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var -= val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_DEC */ + +#ifndef SYS_ARCH_GET +#define SYS_ARCH_GET(var, ret) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + ret = var; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_GET */ + +#ifndef SYS_ARCH_SET +#define SYS_ARCH_SET(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var = val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_SET */ + +#ifndef SYS_ARCH_LOCKED +#define SYS_ARCH_LOCKED(code) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + code; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_LOCKED */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SYS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h new file mode 100644 index 000000000..daf759946 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h @@ -0,0 +1,500 @@ +/** + * @file + * TCP API (to be used from TCPIP thread)\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_H +#define LWIP_HDR_TCP_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct tcp_pcb; +struct tcp_pcb_listen; + +/** Function prototype for tcp accept callback functions. Called when a new + * connection can be accepted on a listening pcb. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param newpcb The new connection pcb + * @param err An error code if there has been an error accepting. + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_accept_fn)(void *arg, struct tcp_pcb *newpcb, err_t err); + +/** Function prototype for tcp receive callback functions. Called when data has + * been received. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which received data + * @param p The received data (or NULL when the connection has been closed!) + * @param err An error code if there has been an error receiving + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_recv_fn)(void *arg, struct tcp_pcb *tpcb, + struct pbuf *p, err_t err); + +/** Function prototype for tcp sent callback functions. Called when sent data has + * been acknowledged by the remote side. Use it to free corresponding resources. + * This also means that the pcb has now space available to send new data. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb for which data has been acknowledged + * @param len The amount of bytes acknowledged + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_sent_fn)(void *arg, struct tcp_pcb *tpcb, + u16_t len); + +/** Function prototype for tcp poll callback functions. Called periodically as + * specified by @see tcp_poll. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb tcp pcb + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_poll_fn)(void *arg, struct tcp_pcb *tpcb); + +/** Function prototype for tcp error callback functions. Called when the pcb + * receives a RST or is unexpectedly closed for any other reason. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param err Error code to indicate why the pcb has been closed + * ERR_ABRT: aborted through tcp_abort or by a TCP timer + * ERR_RST: the connection was reset by the remote host + */ +typedef void (*tcp_err_fn)(void *arg, err_t err); + +/** Function prototype for tcp connected callback functions. Called when a pcb + * is connected to the remote side after initiating a connection attempt by + * calling tcp_connect(). + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which is connected + * @param err An unused error code, always ERR_OK currently ;-) @todo! + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + * + * @note When a connection attempt fails, the error callback is currently called! + */ +typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); + +#if LWIP_WND_SCALE +#define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) +#define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) +#define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +#else +#define RCV_WND_SCALE(pcb, wnd) (wnd) +#define SND_WND_SCALE(pcb, wnd) (wnd) +#define TCPWND16(x) (x) +#define TCP_WND_MAX(pcb) TCP_WND +#endif +/* Increments a tcpwnd_size_t and holds at max value rather than rollover */ +#define TCP_WND_INC(wnd, inc) do { \ + if ((tcpwnd_size_t)(wnd + inc) >= wnd) { \ + wnd = (tcpwnd_size_t)(wnd + inc); \ + } else { \ + wnd = (tcpwnd_size_t)-1; \ + } \ + } while(0) + +#if LWIP_TCP_SACK_OUT +/** SACK ranges to include in ACK packets. + * SACK entry is invalid if left==right. */ +struct tcp_sack_range { + /** Left edge of the SACK: the first acknowledged sequence number. */ + u32_t left; + /** Right edge of the SACK: the last acknowledged sequence number +1 (so first NOT acknowledged). */ + u32_t right; +}; +#endif /* LWIP_TCP_SACK_OUT */ + +/** Function prototype for deallocation of arguments. Called *just before* the + * pcb is freed, so don't expect to be able to do anything with this pcb! + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param data pointer to the data (set via @ref tcp_ext_arg_set before) + */ +typedef void (*tcp_extarg_callback_pcb_destroyed_fn)(u8_t id, void *data); + +/** Function prototype to transition arguments from a listening pcb to an accepted pcb + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param lpcb the listening pcb accepting a connection + * @param cpcb the newly allocated connection pcb + * @return ERR_OK if OK, any error if connection should be dropped + */ +typedef err_t (*tcp_extarg_callback_passive_open_fn)(u8_t id, struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); + +/** A table of callback functions that is invoked for ext arguments */ +struct tcp_ext_arg_callbacks { + /** @ref tcp_extarg_callback_pcb_destroyed_fn */ + tcp_extarg_callback_pcb_destroyed_fn destroy; + /** @ref tcp_extarg_callback_passive_open_fn */ + tcp_extarg_callback_passive_open_fn passive_open; +}; + +#define LWIP_TCP_PCB_NUM_EXT_ARG_ID_INVALID 0xFF + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/* This is the structure for ext args in tcp pcbs (used as array) */ +struct tcp_pcb_ext_args { + const struct tcp_ext_arg_callbacks *callbacks; + void *data; +}; +/* This is a helper define to prevent zero size arrays if disabled */ +#define TCP_PCB_EXTARGS struct tcp_pcb_ext_args ext_args[LWIP_TCP_PCB_NUM_EXT_ARGS]; +#else +#define TCP_PCB_EXTARGS +#endif + +typedef u16_t tcpflags_t; +#define TCP_ALLFLAGS 0xffffU + +/** + * members common to struct tcp_pcb and struct tcp_listen_pcb + */ +#define TCP_PCB_COMMON(type) \ + type *next; /* for the linked list */ \ + void *callback_arg; \ + TCP_PCB_EXTARGS \ + enum tcp_state state; /* TCP state */ \ + u8_t prio; \ + /* ports are in host byte order */ \ + u16_t local_port + + +/** the TCP protocol control block for listening pcbs */ +struct tcp_pcb_listen { +/** Common members of all PCB types */ + IP_PCB; +/** Protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb_listen); + +#if LWIP_CALLBACK_API + /* Function to call when a listener has been connected. */ + tcp_accept_fn accept; +#endif /* LWIP_CALLBACK_API */ + +#if TCP_LISTEN_BACKLOG + u8_t backlog; + u8_t accepts_pending; +#endif /* TCP_LISTEN_BACKLOG */ +}; + + +/** the TCP protocol control block */ +struct tcp_pcb { +/** common PCB members */ + IP_PCB; +/** protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb); + + /* ports are in host byte order */ + u16_t remote_port; + + tcpflags_t flags; +#define TF_ACK_DELAY 0x01U /* Delayed ACK. */ +#define TF_ACK_NOW 0x02U /* Immediate ACK. */ +#define TF_INFR 0x04U /* In fast recovery. */ +#define TF_CLOSEPEND 0x08U /* If this is set, tcp_close failed to enqueue the FIN (retried in tcp_tmr) */ +#define TF_RXCLOSED 0x10U /* rx closed by tcp_shutdown */ +#define TF_FIN 0x20U /* Connection was closed locally (FIN segment enqueued). */ +#define TF_NODELAY 0x40U /* Disable Nagle algorithm */ +#define TF_NAGLEMEMERR 0x80U /* nagle enabled, memerr, try to output to prevent delayed ACK to happen */ +#if LWIP_WND_SCALE +#define TF_WND_SCALE 0x0100U /* Window Scale option enabled */ +#endif +#if TCP_LISTEN_BACKLOG +#define TF_BACKLOGPEND 0x0200U /* If this is set, a connection pcb has increased the backlog on its listener */ +#endif +#if LWIP_TCP_TIMESTAMPS +#define TF_TIMESTAMP 0x0400U /* Timestamp option enabled */ +#endif +#define TF_RTO 0x0800U /* RTO timer has fired, in-flight data moved to unsent and being retransmitted */ +#if LWIP_TCP_SACK_OUT +#define TF_SACK 0x1000U /* Selective ACKs enabled */ +#endif + + /* the rest of the fields are in host byte order + as we have to do some math with them */ + + /* Timers */ + u8_t polltmr, pollinterval; + u8_t last_timer; + u32_t tmr; + + /* receiver variables */ + u32_t rcv_nxt; /* next seqno expected */ + tcpwnd_size_t rcv_wnd; /* receiver window available */ + tcpwnd_size_t rcv_ann_wnd; /* receiver window to announce */ + u32_t rcv_ann_right_edge; /* announced right edge of window */ + +#if LWIP_TCP_SACK_OUT + /* SACK ranges to include in ACK packets (entry is invalid if left==right) */ + struct tcp_sack_range rcv_sacks[LWIP_TCP_MAX_SACK_NUM]; +#define LWIP_TCP_SACK_VALID(pcb, idx) ((pcb)->rcv_sacks[idx].left != (pcb)->rcv_sacks[idx].right) +#endif /* LWIP_TCP_SACK_OUT */ + + /* Retransmission timer. */ + s16_t rtime; + + u16_t mss; /* maximum segment size */ + + /* RTT (round trip time) estimation variables */ + u32_t rttest; /* RTT estimate in 500ms ticks */ + u32_t rtseq; /* sequence number being timed */ + s16_t sa, sv; /* @see "Congestion Avoidance and Control" by Van Jacobson and Karels */ + + s16_t rto; /* retransmission time-out (in ticks of TCP_SLOW_INTERVAL) */ + u8_t nrtx; /* number of retransmissions */ + + /* fast retransmit/recovery */ + u8_t dupacks; + u32_t lastack; /* Highest acknowledged seqno. */ + + /* congestion avoidance/control variables */ + tcpwnd_size_t cwnd; + tcpwnd_size_t ssthresh; + + /* first byte following last rto byte */ + u32_t rto_end; + + /* sender variables */ + u32_t snd_nxt; /* next new seqno to be sent */ + u32_t snd_wl1, snd_wl2; /* Sequence and acknowledgement numbers of last + window update. */ + u32_t snd_lbb; /* Sequence number of next byte to be buffered. */ + tcpwnd_size_t snd_wnd; /* sender window */ + tcpwnd_size_t snd_wnd_max; /* the maximum sender window announced by the remote host */ + + tcpwnd_size_t snd_buf; /* Available buffer space for sending (in bytes). */ +#define TCP_SNDQUEUELEN_OVERFLOW (0xffffU-3) + u16_t snd_queuelen; /* Number of pbufs currently in the send buffer. */ + +#if TCP_OVERSIZE + /* Extra bytes available at the end of the last pbuf in unsent. */ + u16_t unsent_oversize; +#endif /* TCP_OVERSIZE */ + + tcpwnd_size_t bytes_acked; + + /* These are ordered by sequence number: */ + struct tcp_seg *unsent; /* Unsent (queued) segments. */ + struct tcp_seg *unacked; /* Sent but unacknowledged segments. */ +#if TCP_QUEUE_OOSEQ + struct tcp_seg *ooseq; /* Received out of sequence segments. */ +#endif /* TCP_QUEUE_OOSEQ */ + + struct pbuf *refused_data; /* Data previously received but not yet taken by upper layer */ + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + struct tcp_pcb_listen* listener; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + +#if LWIP_CALLBACK_API + /* Function to be called when more send buffer space is available. */ + tcp_sent_fn sent; + /* Function to be called when (in-sequence) data has arrived. */ + tcp_recv_fn recv; + /* Function to be called when a connection has been set up. */ + tcp_connected_fn connected; + /* Function which is called periodically. */ + tcp_poll_fn poll; + /* Function to be called whenever a fatal error occurs. */ + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + +#if LWIP_TCP_TIMESTAMPS + u32_t ts_lastacksent; + u32_t ts_recent; +#endif /* LWIP_TCP_TIMESTAMPS */ + + /* idle time before KEEPALIVE is sent */ + u32_t keep_idle; +#if LWIP_TCP_KEEPALIVE + u32_t keep_intvl; + u32_t keep_cnt; +#endif /* LWIP_TCP_KEEPALIVE */ + + /* Persist timer counter */ + u8_t persist_cnt; + /* Persist timer back-off */ + u8_t persist_backoff; + /* Number of persist probes */ + u8_t persist_probe; + + /* KEEPALIVE counter */ + u8_t keep_cnt_sent; + +#if LWIP_WND_SCALE + u8_t snd_scale; + u8_t rcv_scale; +#endif +}; + +#if LWIP_EVENT_API + +enum lwip_event { + LWIP_EVENT_ACCEPT, + LWIP_EVENT_SENT, + LWIP_EVENT_RECV, + LWIP_EVENT_CONNECTED, + LWIP_EVENT_POLL, + LWIP_EVENT_ERR +}; + +err_t lwip_tcp_event(void *arg, struct tcp_pcb *pcb, + enum lwip_event, + struct pbuf *p, + u16_t size, + err_t err); + +#endif /* LWIP_EVENT_API */ + +/* Application program's interface: */ +struct tcp_pcb * tcp_new (void); +struct tcp_pcb * tcp_new_ip_type (u8_t type); + +void tcp_arg (struct tcp_pcb *pcb, void *arg); +#if LWIP_CALLBACK_API +void tcp_recv (struct tcp_pcb *pcb, tcp_recv_fn recv); +void tcp_sent (struct tcp_pcb *pcb, tcp_sent_fn sent); +void tcp_err (struct tcp_pcb *pcb, tcp_err_fn err); +void tcp_accept (struct tcp_pcb *pcb, tcp_accept_fn accept); +#endif /* LWIP_CALLBACK_API */ +void tcp_poll (struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval); + +#define tcp_set_flags(pcb, set_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags | (set_flags)); } while(0) +#define tcp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags & (tcpflags_t)(~(clr_flags) & TCP_ALLFLAGS)); } while(0) +#define tcp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#if LWIP_TCP_TIMESTAMPS +#define tcp_mss(pcb) (((pcb)->flags & TF_TIMESTAMP) ? ((pcb)->mss - 12) : (pcb)->mss) +#else /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_mss(pcb) ((pcb)->mss) +#endif /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_sndbuf(pcb) (TCPWND16((pcb)->snd_buf)) +/** @ingroup tcp_raw */ +#define tcp_sndqueuelen(pcb) ((pcb)->snd_queuelen) +/** @ingroup tcp_raw */ +#define tcp_nagle_disable(pcb) tcp_set_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_enable(pcb) tcp_clear_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_disabled(pcb) tcp_is_flag_set(pcb, TF_NODELAY) + +#if TCP_LISTEN_BACKLOG +#define tcp_backlog_set(pcb, new_backlog) do { \ + LWIP_ASSERT("pcb->state == LISTEN (called for wrong pcb?)", (pcb)->state == LISTEN); \ + ((struct tcp_pcb_listen *)(pcb))->backlog = ((new_backlog) ? (new_backlog) : 1); } while(0) +void tcp_backlog_delayed(struct tcp_pcb* pcb); +void tcp_backlog_accepted(struct tcp_pcb* pcb); +#else /* TCP_LISTEN_BACKLOG */ +#define tcp_backlog_set(pcb, new_backlog) +#define tcp_backlog_delayed(pcb) +#define tcp_backlog_accepted(pcb) +#endif /* TCP_LISTEN_BACKLOG */ +#define tcp_accepted(pcb) do { LWIP_UNUSED_ARG(pcb); } while(0) /* compatibility define, not needed any more */ + +void tcp_recved (struct tcp_pcb *pcb, u16_t len); +err_t tcp_bind (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif); +err_t tcp_connect (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port, tcp_connected_fn connected); + +struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err); +struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog); +/** @ingroup tcp_raw */ +#define tcp_listen(pcb) tcp_listen_with_backlog(pcb, TCP_DEFAULT_LISTEN_BACKLOG) + +void tcp_abort (struct tcp_pcb *pcb); +err_t tcp_close (struct tcp_pcb *pcb); +err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx); + +err_t tcp_write (struct tcp_pcb *pcb, const void *dataptr, u16_t len, + u8_t apiflags); + +void tcp_setprio (struct tcp_pcb *pcb, u8_t prio); + +err_t tcp_output (struct tcp_pcb *pcb); + +err_t tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port); + +#define tcp_dbg_get_tcp_state(pcb) ((pcb)->state) + +/* for compatibility with older implementation */ +#define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +u8_t tcp_ext_arg_alloc_id(void); +void tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks); +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg); +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h new file mode 100644 index 000000000..002307465 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h @@ -0,0 +1,88 @@ +/** + * @file + * Base TCP API definitions shared by TCP and ALTCP\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPBASE_H +#define LWIP_HDR_TCPBASE_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_WND_SCALE +typedef u32_t tcpwnd_size_t; +#else +typedef u16_t tcpwnd_size_t; +#endif + +enum tcp_state { + CLOSED = 0, + LISTEN = 1, + SYN_SENT = 2, + SYN_RCVD = 3, + ESTABLISHED = 4, + FIN_WAIT_1 = 5, + FIN_WAIT_2 = 6, + CLOSE_WAIT = 7, + CLOSING = 8, + LAST_ACK = 9, + TIME_WAIT = 10 +}; +/* ATTENTION: this depends on state number ordering! */ +#define TCP_STATE_IS_CLOSING(state) ((state) >= FIN_WAIT_1) + +/* Flags for "apiflags" parameter in tcp_write */ +#define TCP_WRITE_FLAG_COPY 0x01 +#define TCP_WRITE_FLAG_MORE 0x02 + +#define TCP_PRIO_MIN 1 +#define TCP_PRIO_NORMAL 64 +#define TCP_PRIO_MAX 127 + +const char* tcp_debug_state_str(enum tcp_state s); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCPBASE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h new file mode 100644 index 000000000..0b8880a9f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h @@ -0,0 +1,113 @@ +/** + * @file + * Functions to sync with TCPIP thread + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_H +#define LWIP_HDR_TCPIP_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/timeouts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +extern sys_mutex_t lock_tcpip_core; +#if !defined LOCK_TCPIP_CORE || defined __DOXYGEN__ +/** Lock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define LOCK_TCPIP_CORE() sys_mutex_lock(&lock_tcpip_core) +/** Unlock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define UNLOCK_TCPIP_CORE() sys_mutex_unlock(&lock_tcpip_core) +#endif /* LOCK_TCPIP_CORE */ +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define LOCK_TCPIP_CORE() +#define UNLOCK_TCPIP_CORE() +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +struct pbuf; +struct netif; + +/** Function prototype for the init_done function passed to tcpip_init */ +typedef void (*tcpip_init_done_fn)(void *arg); +/** Function prototype for functions passed to tcpip_callback() */ +typedef void (*tcpip_callback_fn)(void *ctx); + +/* Forward declarations */ +struct tcpip_callback_msg; + +void tcpip_init(tcpip_init_done_fn tcpip_init_done, void *arg); + +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn); +err_t tcpip_input(struct pbuf *p, struct netif *inp); + +err_t tcpip_try_callback(tcpip_callback_fn function, void *ctx); +err_t tcpip_callback(tcpip_callback_fn function, void *ctx); +/** @ingroup lwip_os + * @deprecated use tcpip_try_callback() or tcpip_callback() instead + */ +#define tcpip_callback_with_block(function, ctx, block) ((block != 0)? tcpip_callback(function, ctx) : tcpip_try_callback(function, ctx)) + +struct tcpip_callback_msg* tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx); +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg* msg); + +/* free pbufs or heap memory from another context without blocking */ +err_t pbuf_free_callback(struct pbuf *p); +err_t mem_free_callback(void *m); + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg); +err_t tcpip_untimeout(sys_timeout_handler h, void *arg); +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + +#ifdef TCPIP_THREAD_TEST +int tcpip_thread_poll_one(void); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h new file mode 100644 index 000000000..b601f9eb3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h @@ -0,0 +1,128 @@ +/** + * @file + * Timer implementations + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_TIMEOUTS_H +#define LWIP_HDR_TIMEOUTS_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#if !NO_SYS +#include "lwip/sys.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef LWIP_DEBUG_TIMERNAMES +#ifdef LWIP_DEBUG +#define LWIP_DEBUG_TIMERNAMES SYS_DEBUG +#else /* LWIP_DEBUG */ +#define LWIP_DEBUG_TIMERNAMES 0 +#endif /* LWIP_DEBUG*/ +#endif + +/** Returned by sys_timeouts_sleeptime() to indicate there is no timer, so we + * can sleep forever. + */ +#define SYS_TIMEOUTS_SLEEPTIME_INFINITE 0xFFFFFFFF + +/** Function prototype for a stack-internal timer function that has to be + * called at a defined interval */ +typedef void (* lwip_cyclic_timer_handler)(void); + +/** This struct contains information about a stack-internal timer function + that has to be called at a defined interval */ +struct lwip_cyclic_timer { + u32_t interval_ms; + lwip_cyclic_timer_handler handler; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use lwip_num_cyclic_timers */ +extern const struct lwip_cyclic_timer lwip_cyclic_timers[]; +/** Array size of lwip_cyclic_timers[] */ +extern const int lwip_num_cyclic_timers; + +#if LWIP_TIMERS + +/** Function prototype for a timeout callback function. Register such a function + * using sys_timeout(). + * + * @param arg Additional argument to pass to the function - set up by sys_timeout() + */ +typedef void (* sys_timeout_handler)(void *arg); + +struct sys_timeo { + struct sys_timeo *next; + u32_t time; + sys_timeout_handler h; + void *arg; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +void sys_timeouts_init(void); + +#if LWIP_DEBUG_TIMERNAMES +void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name); +#define sys_timeout(msecs, handler, arg) sys_timeout_debug(msecs, handler, arg, #handler) +#else /* LWIP_DEBUG_TIMERNAMES */ +void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg); +#endif /* LWIP_DEBUG_TIMERNAMES */ + +void sys_untimeout(sys_timeout_handler handler, void *arg); +void sys_restart_timeouts(void); +void sys_check_timeouts(void); +u32_t sys_timeouts_sleeptime(void); + +#if LWIP_TESTMODE +struct sys_timeo** sys_timeouts_get_next_timeout(void); +void lwip_cyclic_timer(void *arg); +#endif + +#endif /* LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_TIMEOUTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h new file mode 100644 index 000000000..b1c78e58d --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h @@ -0,0 +1,195 @@ +/** + * @file + * UDP API (to be used from TCPIP thread)\n + * See also @ref udp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_UDP_H +#define LWIP_HDR_UDP_H + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_FLAGS_NOCHKSUM 0x01U +#define UDP_FLAGS_UDPLITE 0x02U +#define UDP_FLAGS_CONNECTED 0x04U +#define UDP_FLAGS_MULTICAST_LOOP 0x08U + +struct udp_pcb; + +/** Function prototype for udp pcb receive callback functions + * addr and port are in same byte order as in the pcb + * The callback is responsible for freeing the pbuf + * if it's not used any more. + * + * ATTENTION: Be aware that 'addr' might point into the pbuf 'p' so freeing this pbuf + * can make 'addr' invalid, too. + * + * @param arg user supplied argument (udp_pcb.recv_arg) + * @param pcb the udp_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @param port the remote port from which the packet was received + */ +typedef void (*udp_recv_fn)(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port); + +/** the UDP protocol control block */ +struct udp_pcb { +/** Common members of all PCB types */ + IP_PCB; + +/* Protocol specific PCB members */ + + struct udp_pcb *next; + + u8_t flags; + /** ports are in host byte order */ + u16_t local_port, remote_port; + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 + /** outgoing network interface for multicast packets, by IPv4 address (if not 'any') */ + ip4_addr_t mcast_ip4; +#endif /* LWIP_IPV4 */ + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_UDPLITE + /** used for UDP_LITE only */ + u16_t chksum_len_rx, chksum_len_tx; +#endif /* LWIP_UDPLITE */ + + /** receive callback function */ + udp_recv_fn recv; + /** user-supplied argument for the recv callback */ + void *recv_arg; +}; +/* udp_pcbs export for external reference (e.g. SNMP agent) */ +extern struct udp_pcb *udp_pcbs; + +/* The following functions is the application layer interface to the + UDP code. */ +struct udp_pcb * udp_new (void); +struct udp_pcb * udp_new_ip_type(u8_t type); +void udp_remove (struct udp_pcb *pcb); +err_t udp_bind (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_bind_netif (struct udp_pcb *pcb, const struct netif* netif); +err_t udp_connect (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_disconnect (struct udp_pcb *pcb); +void udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, + void *recv_arg); +err_t udp_sendto_if (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif); +err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, const ip_addr_t *src_ip); +err_t udp_sendto (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port); +err_t udp_send (struct udp_pcb *pcb, struct pbuf *p); + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, u8_t have_chksum, + u16_t chksum); +err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + u8_t have_chksum, u16_t chksum); +err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum); +err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, + u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +#define udp_flags(pcb) ((pcb)->flags) +#define udp_setflags(pcb, f) ((pcb)->flags = (f)) + +#define udp_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define udp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define udp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +/* The following functions are the lower layer interface to UDP. */ +void udp_input (struct pbuf *p, struct netif *inp); + +void udp_init (void); + +/* for compatibility with older implementation */ +#define udp_new_ip6() udp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 +#define udp_set_multicast_netif_addr(pcb, ip4addr) ip4_addr_copy((pcb)->mcast_ip4, *(ip4addr)) +#define udp_get_multicast_netif_addr(pcb) (&(pcb)->mcast_ip4) +#endif /* LWIP_IPV4 */ +#define udp_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define udp_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define udp_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define udp_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if UDP_DEBUG +void udp_debug_print(struct udp_hdr *udphdr); +#else +#define udp_debug_print(udphdr) +#endif + +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_UDP */ + +#endif /* LWIP_HDR_UDP_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h new file mode 100644 index 000000000..f4f8cf144 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h @@ -0,0 +1,127 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_BRIDGEIF_H +#define LWIP_HDR_NETIF_BRIDGEIF_H + +#include "netif/bridgeif_opts.h" + +#include "lwip/err.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif; + +#if (BRIDGEIF_MAX_PORTS < 0) || (BRIDGEIF_MAX_PORTS >= 64) +#error BRIDGEIF_MAX_PORTS must be [1..63] +#elif BRIDGEIF_MAX_PORTS < 8 +typedef u8_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 16 +typedef u16_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 32 +typedef u32_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 64 +typedef u64_t bridgeif_portmask_t; +#endif + +#define BR_FLOOD ((bridgeif_portmask_t)-1) + +/** @ingroup bridgeif + * Initialisation data for @ref bridgeif_init. + * An instance of this type must be passed as parameter 'state' to @ref netif_add + * when the bridge is added. + */ +typedef struct bridgeif_initdata_s { + /** MAC address of the bridge (cannot use the netif's addresses) */ + struct eth_addr ethaddr; + /** Maximum number of ports in the bridge (ports are stored in an array, this + influences memory allocated for netif->state of the bridge netif). */ + u8_t max_ports; + /** Maximum number of dynamic/learning entries in the bridge's forwarding database. + In the default implementation, this controls memory consumption only. */ + u16_t max_fdb_dynamic_entries; + /** Maximum number of static forwarding entries. Influences memory consumption! */ + u16_t max_fdb_static_entries; +} bridgeif_initdata_t; + +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (ethaddr must be passed as ETH_ADDR()) + */ +#define BRIDGEIF_INITDATA1(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, ethaddr) {ethaddr, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (each byte of ethaddr must be passed) + */ +#define BRIDGEIF_INITDATA2(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, e0, e1, e2, e3, e4, e5) {{e0, e1, e2, e3, e4, e5}, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} + +err_t bridgeif_init(struct netif *netif); +err_t bridgeif_add_port(struct netif *bridgeif, struct netif *portif); +err_t bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports); +err_t bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr); + +/* FDB interface, can be replaced by own implementation */ +void bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx); +bridgeif_portmask_t bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr); +void* bridgeif_fdb_init(u16_t max_fdb_entries); + +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#ifndef BRIDGEIF_DECL_PROTECT +/* define bridgeif protection to sys_arch_protect... */ +#include "lwip/sys.h" +#define BRIDGEIF_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif +#else /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ +#include "lwip/tcpip.h" +#define BRIDGEIF_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h new file mode 100644 index 000000000..b85c30174 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h @@ -0,0 +1,90 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_NETIF_BRIDGEIF_OPTS_H +#define LWIP_HDR_NETIF_BRIDGEIF_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup bridgeif_opts Options + * @ingroup bridgeif + * @{ + */ + +/** BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT==1: set port netif's 'input' function + * to call directly into bridgeif code and on top of that, directly call into + * the selected forwarding port's 'linkoutput' function. + * This means that the bridgeif input/output path is protected from concurrent access + * but as well, *all* bridge port netif's drivers must correctly handle concurrent access! + * == 0: get into tcpip_thread for every input packet (no multithreading) + * ATTENTION: as ==0 relies on tcpip.h, the default depends on NO_SYS setting + */ +#ifndef BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#define BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT NO_SYS +#endif + +/** BRIDGEIF_MAX_PORTS: this is used to create a typedef used for forwarding + * bit-fields: the number of bits required is this + 1 (for the internal/cpu port) + * (63 is the maximum, resulting in an u64_t for the bit mask) + * ATTENTION: this controls the maximum number of the implementation only! + * The max. number of ports per bridge must still be passed via netif_add parameter! + */ +#ifndef BRIDGEIF_MAX_PORTS +#define BRIDGEIF_MAX_PORTS 7 +#endif + +/** BRIDGEIF_DEBUG: Enable generic debugging in bridgeif.c. */ +#ifndef BRIDGEIF_DEBUG +#define BRIDGEIF_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable FDB debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FDB_DEBUG +#define BRIDGEIF_FDB_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable forwarding debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FW_DEBUG +#define BRIDGEIF_FW_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h new file mode 100644 index 000000000..b536fd280 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h @@ -0,0 +1,3 @@ +/* ARP has been moved to core/ipv4, provide this #include for compatibility only */ +#include "lwip/etharp.h" +#include "netif/ethernet.h" diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h new file mode 100644 index 000000000..49649cbf8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h @@ -0,0 +1,77 @@ +/** + * @file + * Ethernet input function - handles INCOMING ethernet level traffic + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHERNET_H +#define LWIP_HDR_NETIF_ETHERNET_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ARP || LWIP_ETHERNET + +/** Define this to 1 and define LWIP_ARP_FILTER_NETIF_FN(pbuf, netif, type) + * to a filter function that returns the correct netif when using multiple + * netifs on one hardware interface where the netif's low-level receive + * routine cannot decide for the correct netif (e.g. when mapping multiple + * IP addresses to one hardware interface). + */ +#ifndef LWIP_ARP_FILTER_NETIF +#define LWIP_ARP_FILTER_NETIF 0 +#endif + +err_t ethernet_input(struct pbuf *p, struct netif *netif); +err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type); + +extern const struct eth_addr ethbroadcast, ethzero; + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_ETHERNET_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h new file mode 100644 index 000000000..54e019fd0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h @@ -0,0 +1,112 @@ +/** + * @file + * Definitions for IEEE 802.15.4 MAC frames + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_IEEE802154_H +#define LWIP_HDR_NETIF_IEEE802154_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** General MAC frame format + * This shows the full featured header, mainly for documentation. + * Some fields are omitted or shortened to achieve frame compression. + */ +struct ieee_802154_hdr { + /** See IEEE_802154_FC_* defines */ + PACK_STRUCT_FIELD(u16_t frame_control); + /** Sequence number is omitted if IEEE_802154_FC_SEQNO_SUPPR is set in frame_control */ + PACK_STRUCT_FLD_8(u8_t sequence_number); + /** Destination PAN ID is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FIELD(u16_t destination_pan_id); + /** Destination Address is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t destination_address[8]); + /** Source PAN ID is omitted if Source Addressing Mode is 0 + or if IEEE_802154_FC_PANID_COMPR is set in frame control*/ + PACK_STRUCT_FIELD(u16_t source_pan_id); + /** Source Address is omitted if Source Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t source_address[8]); + /* The rest is variable */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Addressing modes (2 bits) */ +#define IEEE_802154_ADDR_MODE_NO_ADDR 0x00 /* PAN ID and address fields are not present */ +#define IEEE_802154_ADDR_MODE_RESERVED 0x01 /* Reserved */ +#define IEEE_802154_ADDR_MODE_SHORT 0x02 /* Address field contains a short address (16 bit) */ +#define IEEE_802154_ADDR_MODE_EXT 0x03 /* Address field contains an extended address (64 bit) */ + +/* IEEE 802.15.4 Frame Control definitions (2 bytes; see IEEE 802.15.4-2015 ch. 7.2.1) */ +#define IEEE_802154_FC_FT_MASK 0x0007 /* bits 0..2: Frame Type */ +#define IEEE_802154_FC_FT_BEACON 0x00 +#define IEEE_802154_FC_FT_DATA 0x01 +#define IEEE_802154_FC_FT_ACK 0x02 +#define IEEE_802154_FC_FT_MAC_CMD 0x03 +#define IEEE_802154_FC_FT_RESERVED 0x04 +#define IEEE_802154_FC_FT_MULTIPURPOSE 0x05 +#define IEEE_802154_FC_FT_FRAG 0x06 +#define IEEE_802154_FC_FT_EXT 0x07 +#define IEEE_802154_FC_SEC_EN 0x0008 /* bit 3: Security Enabled */ +#define IEEE_802154_FC_FRAME_PEND 0x0010 /* bit 4: Frame Pending */ +#define IEEE_802154_FC_ACK_REQ 0x0020 /* bit 5: AR (ACK required) */ +#define IEEE_802154_FC_PANID_COMPR 0x0040 /* bit 6: PAN ID Compression (src and dst are equal, src PAN ID omitted) */ +#define IEEE_802154_FC_RESERVED 0x0080 +#define IEEE_802154_FC_SEQNO_SUPPR 0x0100 /* bit 8: Sequence Number Suppression */ +#define IEEE_802154_FC_IE_PRESENT 0x0200 /* bit 9: IE Present */ +#define IEEE_802154_FC_DST_ADDR_MODE_MASK 0x0c00 /* bits 10..11: Destination Addressing Mode */ +#define IEEE_802154_FC_DST_ADDR_MODE_NO_ADDR (IEEE_802154_ADDR_MODE_NO_ADDR << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 10) +#define IEEE_802154_FC_FRAME_VERSION_MASK 0x3000 /* bits 12..13: Frame Version */ +#define IEEE_802154_FC_FRAME_VERSION_GET(x) (((x) & IEEE_802154_FC_FRAME_VERSION_MASK) >> 12) +#define IEEE_802154_FC_SRC_ADDR_MODE_MASK 0xc000 /* bits 14..15: Source Addressing Mode */ +#define IEEE_802154_FC_SRC_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 14) +#define IEEE_802154_FC_SRC_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 14) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_IEEE802154_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h new file mode 100644 index 000000000..ecff24ba6 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h @@ -0,0 +1,89 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_H +#define LWIP_HDR_LOWPAN6_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period for reassembly */ +#define LOWPAN6_TMR_INTERVAL 1000 + +void lowpan6_tmr(void); + +err_t lowpan6_set_context(u8_t idx, const ip6_addr_t * context); +err_t lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low); + +#if LWIP_IPV4 +err_t lowpan4_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ +err_t lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t lowpan6_input(struct pbuf * p, struct netif *netif); +err_t lowpan6_if_init(struct netif *netif); + +/* pan_id in network byte order. */ +err_t lowpan6_set_pan_id(u16_t pan_id); + +u16_t lowpan6_calc_crc(const void *buf, u16_t len); + +#if !NO_SYS +err_t tcpip_6lowpan_input(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h new file mode 100644 index 000000000..01896a7ff --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h @@ -0,0 +1,78 @@ +/** + * @file + * 6LowPAN over BLE for IPv6 (RFC7668). + */ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + +#ifndef LWIP_HDR_LOWPAN6_BLE_H +#define LWIP_HDR_LOWPAN6_BLE_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +err_t rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t rfc7668_input(struct pbuf * p, struct netif *netif); +err_t rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len); +err_t rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr); +err_t rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len); +err_t rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr); +err_t rfc7668_set_context(u8_t index, const ip6_addr_t * context); +err_t rfc7668_if_init(struct netif *netif); + +#if !NO_SYS +err_t tcpip_rfc7668_input(struct pbuf *p, struct netif *inp); +#endif + +void ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr); +void eui64_to_ble_addr(uint8_t *dst, const uint8_t *src); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_BLE_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h new file mode 100644 index 000000000..0dc13ab5b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h @@ -0,0 +1,82 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_COMMON_H +#define LWIP_HDR_LOWPAN6_COMMON_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if IPv6 is disabled in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Helper define for a link layer address, which can be encoded as 0, 2 or 8 bytes */ +struct lowpan6_link_addr { + /* encoded length of the address */ + u8_t addr_len; + /* address bytes */ + u8_t addr[8]; +}; + +s8_t lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr); + +#if LWIP_6LOWPAN_IPHC +err_t lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst); +struct pbuf *lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest); +#endif /* LWIP_6LOWPAN_IPHC */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_COMMON_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h new file mode 100644 index 000000000..17d46cdcc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h @@ -0,0 +1,122 @@ +/** + * @file + * 6LowPAN options list + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_OPTS_H +#define LWIP_HDR_LOWPAN6_OPTS_H + +#include "lwip/opt.h" + +/** LWIP_6LOWPAN_NUM_CONTEXTS: define the number of compression + * contexts per netif type + */ +#ifndef LWIP_6LOWPAN_NUM_CONTEXTS +#define LWIP_6LOWPAN_NUM_CONTEXTS 10 +#endif + +/** LWIP_6LOWPAN_INFER_SHORT_ADDRESS: set this to 0 to disable creating + * short addresses for matching addresses (debug only) + */ +#ifndef LWIP_6LOWPAN_INFER_SHORT_ADDRESS +#define LWIP_6LOWPAN_INFER_SHORT_ADDRESS 1 +#endif + +/** LWIP_6LOWPAN_IPHC: set this to 0 to disable IP header compression as per + * RFC 6282 (which is mandatory for BLE) + */ +#ifndef LWIP_6LOWPAN_IPHC +#define LWIP_6LOWPAN_IPHC 1 +#endif + +/** Set this to 1 if your IEEE 802.15.4 interface can calculate and check the + * CRC in hardware. This means TX packets get 2 zero bytes added on transmission + * which are to be filled with the CRC. + */ +#ifndef LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_802154_HW_CRC 0 +#endif + +/** If LWIP_6LOWPAN_802154_HW_CRC==0, this can override the default slow + * implementation of the CRC used for 6LoWPAN over IEEE 802.15.4 (which uses + * a shift register). + */ +#ifndef LWIP_6LOWPAN_CALC_CRC +#define LWIP_6LOWPAN_CALC_CRC(buf, len) lowpan6_calc_crc(buf, len) +#endif + +/** Debug level for 6LoWPAN in general */ +#ifndef LWIP_LOWPAN6_DEBUG +#define LWIP_LOWPAN6_DEBUG LWIP_DBG_OFF +#endif + +/** Debug level for 6LoWPAN over IEEE 802.15.4 */ +#ifndef LWIP_LOWPAN6_802154_DEBUG +#define LWIP_LOWPAN6_802154_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_IP_COMPRESSED_DEBUG: enable compressed IP frame + * output debugging + */ +#ifndef LWIP_LOWPAN6_IP_COMPRESSED_DEBUG +#define LWIP_LOWPAN6_IP_COMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_DECOMPRESSION_DEBUG: enable decompression debug output + */ +#ifndef LWIP_LOWPAN6_DECOMPRESSION_DEBUG +#define LWIP_LOWPAN6_DECOMPRESSION_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG: enable decompressed IP frame + * output debugging */ +#ifndef LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG +#define LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS: + * Currently, the linux kernel driver for 6lowpan sets/clears a bit in + * the address, depending on the BD address (either public or not). + * Might not be RFC7668 conform, so you may select to do that (=1) or + * not (=0) */ +#ifndef LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS +#define LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS 1 +#endif + + +#endif /* LWIP_HDR_LOWPAN6_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h new file mode 100644 index 000000000..b2285228b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h @@ -0,0 +1,164 @@ +/* + * ccp.h - Definitions for PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ccp.h,v 1.12 2004/11/04 10:02:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CCP_H +#define CCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CCP codes. + */ + +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ + +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ + +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +#if BSDCOMPRESS_SUPPORT +/* + * Definitions for BSD-Compress. + */ + +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ +#endif /* BSDCOMPRESS_SUPPORT */ + +#if DEFLATE_SUPPORT +/* + * Definitions for Deflate. + */ + +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 9 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + 8) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - 8) << 4) + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 +#endif /* DEFLATE_SUPPORT */ + +#if MPPE_SUPPORT +/* + * Definitions for MPPE. + */ + +#define CI_MPPE 18 /* config option for MPPE */ +#define CILEN_MPPE 6 /* length of config option */ +#endif /* MPPE_SUPPORT */ + +#if PREDICTOR_SUPPORT +/* + * Definitions for other, as yet unsupported, compression methods. + */ + +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ +#endif /* PREDICTOR_SUPPORT */ + +typedef struct ccp_options { +#if DEFLATE_SUPPORT + unsigned int deflate :1; /* do Deflate? */ + unsigned int deflate_correct :1; /* use correct code for deflate? */ + unsigned int deflate_draft :1; /* use draft RFC code for deflate? */ +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + unsigned int bsd_compress :1; /* do BSD Compress? */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + unsigned int predictor_1 :1; /* do Predictor-1? */ + unsigned int predictor_2 :1; /* do Predictor-2? */ +#endif /* PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + u8_t mppe; /* MPPE bitfield */ +#endif /* MPPE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + u_short bsd_bits; /* # bits/code for BSD Compress */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + u_short deflate_size; /* lg(window size) for Deflate */ +#endif /* DEFLATE_SUPPORT */ + u8_t method; /* code for chosen compression method */ +} ccp_options; + +extern const struct protent ccp_protent; + +void ccp_resetrequest(ppp_pcb *pcb); /* Issue a reset-request. */ + +#ifdef __cplusplus +} +#endif + +#endif /* CCP_H */ +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h new file mode 100644 index 000000000..eb0269fe5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h @@ -0,0 +1,36 @@ +/* + * chap-md5.h - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +extern const struct chap_digest_type md5_digest; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h new file mode 100644 index 000000000..2d8cd9ca9 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h @@ -0,0 +1,200 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAP_H +#define CHAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CHAP packets begin with a standard header with code, id, len (2 bytes). + */ +#define CHAP_HDRLEN 4 + +/* + * Values for the code field. + */ +#define CHAP_CHALLENGE 1 +#define CHAP_RESPONSE 2 +#define CHAP_SUCCESS 3 +#define CHAP_FAILURE 4 + +/* + * CHAP digest codes. + */ +#define CHAP_MD5 5 +#if MSCHAP_SUPPORT +#define CHAP_MICROSOFT 0x80 +#define CHAP_MICROSOFT_V2 0x81 +#endif /* MSCHAP_SUPPORT */ + +/* + * Semi-arbitrary limits on challenge and response fields. + */ +#define MAX_CHALLENGE_LEN 64 +#define MAX_RESPONSE_LEN 64 + +/* + * These limits apply to challenge and response packets we send. + * The +4 is the +1 that we actually need rounded up. + */ +#define CHAL_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_CHALLENGE_LEN + MAXNAMELEN) +#define RESP_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_RESPONSE_LEN + MAXNAMELEN) + +/* bitmask of supported algorithms */ +#if MSCHAP_SUPPORT +#define MDTYPE_MICROSOFT_V2 0x1 +#define MDTYPE_MICROSOFT 0x2 +#endif /* MSCHAP_SUPPORT */ +#define MDTYPE_MD5 0x4 +#define MDTYPE_NONE 0 + +#if MSCHAP_SUPPORT +/* Return the digest alg. ID for the most preferred digest type. */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + ((mdtype) & MDTYPE_MICROSOFT_V2)? CHAP_MICROSOFT_V2: \ + ((mdtype) & MDTYPE_MICROSOFT)? CHAP_MICROSOFT: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Return the bit flag (lsb set) for our most preferred digest type. */ +#define CHAP_MDTYPE(mdtype) ((mdtype) ^ ((mdtype) - 1)) & (mdtype) + +/* Return the bit flag for a given digest algorithm ID. */ +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MICROSOFT_V2)? MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Can we do the requested digest? */ +#if MSCHAP_SUPPORT +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MICROSOFT_V2)? (mdtype) & MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? (mdtype) & MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* + * The code for each digest type has to supply one of these. + */ +struct chap_digest_type { + int code; + +#if PPP_SERVER + /* + * Note: challenge and response arguments below are formatted as + * a length byte followed by the actual challenge/response data. + */ + void (*generate_challenge)(ppp_pcb *pcb, unsigned char *challenge); + int (*verify_response)(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ + void (*make_response)(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *priv); + int (*check_success)(ppp_pcb *pcb, unsigned char *pkt, int len, unsigned char *priv); + void (*handle_failure)(ppp_pcb *pcb, unsigned char *pkt, int len); +}; + +/* + * Each interface is described by chap structure. + */ +#if CHAP_SUPPORT +typedef struct chap_client_state { + u8_t flags; + const char *name; + const struct chap_digest_type *digest; + unsigned char priv[64]; /* private area for digest's use */ +} chap_client_state; + +#if PPP_SERVER +typedef struct chap_server_state { + u8_t flags; + u8_t id; + const char *name; + const struct chap_digest_type *digest; + int challenge_xmits; + int challenge_pktlen; + unsigned char challenge[CHAL_MAX_PKTLEN]; +} chap_server_state; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +extern int (*chap_verify_hook)(char *name, char *ourname, int id, + const struct chap_digest_type *digest, + unsigned char *challenge, unsigned char *response, + char *message, int message_space); +#endif /* UNUSED */ + +#if PPP_SERVER +/* Called by authentication code to start authenticating the peer. */ +extern void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code); +#endif /* PPP_SERVER */ + +/* Called by auth. code to start authenticating us to the peer. */ +extern void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code); + +/* Represents the CHAP protocol to the main pppd code */ +extern const struct protent chap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* CHAP_H */ +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h new file mode 100644 index 000000000..079529115 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h @@ -0,0 +1,44 @@ +/* + * chap_ms.h - Challenge Handshake Authentication Protocol definitions. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: chap_ms.h,v 1.13 2004/11/15 22:13:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAPMS_INCLUDE +#define CHAPMS_INCLUDE + +extern const struct chap_digest_type chapms_digest; +extern const struct chap_digest_type chapms2_digest; + +#endif /* CHAPMS_INCLUDE */ + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h new file mode 100644 index 000000000..3ee9aaf81 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h @@ -0,0 +1,169 @@ +/* + * eap.h - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * $Id: eap.h,v 1.2 2003/06/11 23:56:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_EAP_H +#define PPP_EAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define EAP_HEADERLEN 4 + + +/* EAP message codes. */ +#define EAP_REQUEST 1 +#define EAP_RESPONSE 2 +#define EAP_SUCCESS 3 +#define EAP_FAILURE 4 + +/* EAP types */ +#define EAPT_IDENTITY 1 +#define EAPT_NOTIFICATION 2 +#define EAPT_NAK 3 /* (response only) */ +#define EAPT_MD5CHAP 4 +#define EAPT_OTP 5 /* One-Time Password; RFC 1938 */ +#define EAPT_TOKEN 6 /* Generic Token Card */ +/* 7 and 8 are unassigned. */ +#define EAPT_RSA 9 /* RSA Public Key Authentication */ +#define EAPT_DSS 10 /* DSS Unilateral */ +#define EAPT_KEA 11 /* KEA */ +#define EAPT_KEA_VALIDATE 12 /* KEA-VALIDATE */ +#define EAPT_TLS 13 /* EAP-TLS */ +#define EAPT_DEFENDER 14 /* Defender Token (AXENT) */ +#define EAPT_W2K 15 /* Windows 2000 EAP */ +#define EAPT_ARCOT 16 /* Arcot Systems */ +#define EAPT_CISCOWIRELESS 17 /* Cisco Wireless */ +#define EAPT_NOKIACARD 18 /* Nokia IP smart card */ +#define EAPT_SRP 19 /* Secure Remote Password */ +/* 20 is deprecated */ + +/* EAP SRP-SHA1 Subtypes */ +#define EAPSRP_CHALLENGE 1 /* Request 1 - Challenge */ +#define EAPSRP_CKEY 1 /* Response 1 - Client Key */ +#define EAPSRP_SKEY 2 /* Request 2 - Server Key */ +#define EAPSRP_CVALIDATOR 2 /* Response 2 - Client Validator */ +#define EAPSRP_SVALIDATOR 3 /* Request 3 - Server Validator */ +#define EAPSRP_ACK 3 /* Response 3 - final ack */ +#define EAPSRP_LWRECHALLENGE 4 /* Req/resp 4 - Lightweight rechal */ + +#define SRPVAL_EBIT 0x00000001 /* Use shared key for ECP */ + +#define SRP_PSEUDO_ID "pseudo_" +#define SRP_PSEUDO_LEN 7 + +#define MD5_SIGNATURE_SIZE 16 +#define EAP_MIN_CHALLENGE_LENGTH 17 +#define EAP_MAX_CHALLENGE_LENGTH 24 +#define EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#define EAP_STATES \ + "Initial", "Pending", "Closed", "Listen", "Identify", \ + "SRP1", "SRP2", "SRP3", "MD5Chall", "Open", "SRP4", "BadAuth" + +#define eap_client_active(pcb) ((pcb)->eap.es_client.ea_state == eapListen) +#if PPP_SERVER +#define eap_server_active(pcb) \ + ((pcb)->eap.es_server.ea_state >= eapIdentify && \ + (pcb)->eap.es_server.ea_state <= eapMD5Chall) +#endif /* PPP_SERVER */ + +/* + * Complete EAP state for one PPP session. + */ +enum eap_state_code { + eapInitial = 0, /* No EAP authentication yet requested */ + eapPending, /* Waiting for LCP (no timer) */ + eapClosed, /* Authentication not in use */ + eapListen, /* Client ready (and timer running) */ + eapIdentify, /* EAP Identify sent */ + eapSRP1, /* Sent EAP SRP-SHA1 Subtype 1 */ + eapSRP2, /* Sent EAP SRP-SHA1 Subtype 2 */ + eapSRP3, /* Sent EAP SRP-SHA1 Subtype 3 */ + eapMD5Chall, /* Sent MD5-Challenge */ + eapOpen, /* Completed authentication */ + eapSRP4, /* Sent EAP SRP-SHA1 Subtype 4 */ + eapBadAuth /* Failed authentication */ +}; + +struct eap_auth { + const char *ea_name; /* Our name */ + char ea_peer[MAXNAMELEN +1]; /* Peer's name */ + void *ea_session; /* Authentication library linkage */ + u_char *ea_skey; /* Shared encryption key */ + u_short ea_namelen; /* Length of our name */ + u_short ea_peerlen; /* Length of peer's name */ + enum eap_state_code ea_state; + u_char ea_id; /* Current id */ + u_char ea_requests; /* Number of Requests sent/received */ + u_char ea_responses; /* Number of Responses */ + u_char ea_type; /* One of EAPT_* */ + u32_t ea_keyflags; /* SRP shared key usage flags */ +}; + +#ifndef EAP_MAX_CHALLENGE_LENGTH +#define EAP_MAX_CHALLENGE_LENGTH 24 +#endif +typedef struct eap_state { + struct eap_auth es_client; /* Client (authenticatee) data */ +#if PPP_SERVER + struct eap_auth es_server; /* Server (authenticator) data */ +#endif /* PPP_SERVER */ + int es_savedtime; /* Saved timeout */ + int es_rechallenge; /* EAP rechallenge interval */ + int es_lwrechallenge; /* SRP lightweight rechallenge inter */ + u8_t es_usepseudo; /* Use SRP Pseudonym if offered one */ + int es_usedpseudo; /* Set if we already sent PN */ + int es_challen; /* Length of challenge string */ + u_char es_challenge[EAP_MAX_CHALLENGE_LENGTH]; +} eap_state; + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define EAP_DEFTIMEOUT 3 /* Timeout (seconds) for rexmit */ +#define EAP_DEFTRANSMITS 10 /* max # times to transmit */ +#define EAP_DEFREQTIME 20 /* Time to wait for peer request */ +#define EAP_DEFALLOWREQ 20 /* max # times to accept requests */ +#endif /* moved to ppp_opts.h */ + +void eap_authwithpeer(ppp_pcb *pcb, const char *localname); +void eap_authpeer(ppp_pcb *pcb, const char *localname); + +extern const struct protent eap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_EAP_H */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h new file mode 100644 index 000000000..d8808c3ab --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h @@ -0,0 +1,62 @@ +/* + * ecp.h - Definitions for PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ecp.h,v 1.2 2003/01/10 07:12:36 fcusack Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef ECP_H +#define ECP_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ecp_options { + bool required; /* Is ECP required? */ + unsigned enctype; /* Encryption type */ +} ecp_options; + +extern fsm ecp_fsm[]; +extern ecp_options ecp_wantoptions[]; +extern ecp_options ecp_gotoptions[]; +extern ecp_options ecp_allowoptions[]; +extern ecp_options ecp_hisoptions[]; + +extern const struct protent ecp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* ECP_H */ +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h new file mode 100644 index 000000000..5adeb482a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h @@ -0,0 +1,102 @@ +/* + * eui64.h - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.h,v 1.6 2002/12/04 23:03:32 paulus Exp $ +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef EUI64_H +#define EUI64_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @todo: + * + * Maybe this should be done by processing struct in6_addr directly... + */ +typedef union +{ + u8_t e8[8]; + u16_t e16[4]; + u32_t e32[2]; +} eui64_t; + +#define eui64_iszero(e) (((e).e32[0] | (e).e32[1]) == 0) +#define eui64_equals(e, o) (((e).e32[0] == (o).e32[0]) && \ + ((e).e32[1] == (o).e32[1])) +#define eui64_zero(e) (e).e32[0] = (e).e32[1] = 0; + +#define eui64_copy(s, d) memcpy(&(d), &(s), sizeof(eui64_t)) + +#define eui64_magic(e) do { \ + (e).e32[0] = magic(); \ + (e).e32[1] = magic(); \ + (e).e8[0] &= ~2; \ + } while (0) +#define eui64_magic_nz(x) do { \ + eui64_magic(x); \ + } while (eui64_iszero(x)) +#define eui64_magic_ne(x, y) do { \ + eui64_magic(x); \ + } while (eui64_equals(x, y)) + +#define eui64_get(ll, cp) do { \ + eui64_copy((*cp), (ll)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_put(ll, cp) do { \ + eui64_copy((ll), (*cp)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_set32(e, l) do { \ + (e).e32[0] = 0; \ + (e).e32[1] = lwip_htonl(l); \ + } while (0) +#define eui64_setlo32(e, l) eui64_set32(e, l) + +char *eui64_ntoa(eui64_t); /* Returns ascii representation of id */ + +#ifdef __cplusplus +} +#endif + +#endif /* EUI64_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h new file mode 100644 index 000000000..8dec700e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h @@ -0,0 +1,182 @@ +/* + * fsm.h - {Link, IP} Control Protocol Finite State Machine definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: fsm.h,v 1.10 2004/11/13 02:28:15 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef FSM_H +#define FSM_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define HEADERLEN 4 + + +/* + * CP (LCP, IPCP, etc.) codes. + */ +#define CONFREQ 1 /* Configuration Request */ +#define CONFACK 2 /* Configuration Ack */ +#define CONFNAK 3 /* Configuration Nak */ +#define CONFREJ 4 /* Configuration Reject */ +#define TERMREQ 5 /* Termination Request */ +#define TERMACK 6 /* Termination Ack */ +#define CODEREJ 7 /* Code Reject */ + + +/* + * Each FSM is described by an fsm structure and fsm callbacks. + */ +typedef struct fsm { + ppp_pcb *pcb; /* PPP Interface */ + const struct fsm_callbacks *callbacks; /* Callback routines */ + const char *term_reason; /* Reason for closing protocol */ + u8_t seen_ack; /* Have received valid Ack/Nak/Rej to Req */ + /* -- This is our only flag, we might use u_int :1 if we have more flags */ + u16_t protocol; /* Data Link Layer Protocol field value */ + u8_t state; /* State */ + u8_t flags; /* Contains option bits */ + u8_t id; /* Current id */ + u8_t reqid; /* Current request id */ + u8_t retransmits; /* Number of retransmissions left */ + u8_t nakloops; /* Number of nak loops since last ack */ + u8_t rnakloops; /* Number of naks received */ + u8_t maxnakloops; /* Maximum number of nak loops tolerated + (necessary because IPCP require a custom large max nak loops value) */ + u8_t term_reason_len; /* Length of term_reason */ +} fsm; + + +typedef struct fsm_callbacks { + void (*resetci) /* Reset our Configuration Information */ + (fsm *); + int (*cilen) /* Length of our Configuration Information */ + (fsm *); + void (*addci) /* Add our Configuration Information */ + (fsm *, u_char *, int *); + int (*ackci) /* ACK our Configuration Information */ + (fsm *, u_char *, int); + int (*nakci) /* NAK our Configuration Information */ + (fsm *, u_char *, int, int); + int (*rejci) /* Reject our Configuration Information */ + (fsm *, u_char *, int); + int (*reqci) /* Request peer's Configuration Information */ + (fsm *, u_char *, int *, int); + void (*up) /* Called when fsm reaches PPP_FSM_OPENED state */ + (fsm *); + void (*down) /* Called when fsm leaves PPP_FSM_OPENED state */ + (fsm *); + void (*starting) /* Called when we want the lower layer */ + (fsm *); + void (*finished) /* Called when we don't want the lower layer */ + (fsm *); + void (*protreject) /* Called when Protocol-Reject received */ + (int); + void (*retransmit) /* Retransmission is necessary */ + (fsm *); + int (*extcode) /* Called when unknown code received */ + (fsm *, int, int, u_char *, int); + const char *proto_name; /* String name for protocol (for messages) */ +} fsm_callbacks; + + +/* + * Link states. + */ +#define PPP_FSM_INITIAL 0 /* Down, hasn't been opened */ +#define PPP_FSM_STARTING 1 /* Down, been opened */ +#define PPP_FSM_CLOSED 2 /* Up, hasn't been opened */ +#define PPP_FSM_STOPPED 3 /* Open, waiting for down event */ +#define PPP_FSM_CLOSING 4 /* Terminating the connection, not open */ +#define PPP_FSM_STOPPING 5 /* Terminating, but open */ +#define PPP_FSM_REQSENT 6 /* We've sent a Config Request */ +#define PPP_FSM_ACKRCVD 7 /* We've received a Config Ack */ +#define PPP_FSM_ACKSENT 8 /* We've sent a Config Ack */ +#define PPP_FSM_OPENED 9 /* Connection available */ + + +/* + * Flags - indicate options controlling FSM operation + */ +#define OPT_PASSIVE 1 /* Don't die if we don't get a response */ +#define OPT_RESTART 2 /* Treat 2nd OPEN as DOWN, UP */ +#define OPT_SILENT 4 /* Wait for peer to speak first */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define DEFTIMEOUT 3 /* Timeout time in seconds */ +#define DEFMAXTERMREQS 2 /* Maximum Terminate-Request transmissions */ +#define DEFMAXCONFREQS 10 /* Maximum Configure-Request transmissions */ +#define DEFMAXNAKLOOPS 5 /* Maximum number of nak loops */ +#endif /* moved to ppp_opts.h */ + + +/* + * Prototypes + */ +void fsm_init(fsm *f); +void fsm_lowerup(fsm *f); +void fsm_lowerdown(fsm *f); +void fsm_open(fsm *f); +void fsm_close(fsm *f, const char *reason); +void fsm_input(fsm *f, u_char *inpacket, int l); +void fsm_protreject(fsm *f); +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen); + +#ifdef __cplusplus +} +#endif + +#endif /* FSM_H */ +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h new file mode 100644 index 000000000..32fdd1c64 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h @@ -0,0 +1,134 @@ +/* + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipcp.h,v 1.14 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPCP_H +#define IPCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_ADDRS 1 /* IP Addresses */ +#if VJ_SUPPORT +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* VJ_SUPPORT */ +#define CI_ADDR 3 + +#if LWIP_DNS +#define CI_MS_DNS1 129 /* Primary DNS value */ +#define CI_MS_DNS2 131 /* Secondary DNS value */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define CI_MS_WINS1 130 /* Primary WINS value */ +#define CI_MS_WINS2 132 /* Secondary WINS value */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT +#define MAX_STATES 16 /* from slcompress.h */ + +#define IPCP_VJMODE_OLD 1 /* "old" mode (option # = 0x0037) */ +#define IPCP_VJMODE_RFC1172 2 /* "old-rfc"mode (option # = 0x002d) */ +#define IPCP_VJMODE_RFC1332 3 /* "new-rfc"mode (option # = 0x002d, */ + /* maxslot and slot number compression) */ + +#define IPCP_VJ_COMP 0x002d /* current value for VJ compression option*/ +#define IPCP_VJ_COMP_OLD 0x0037 /* "old" (i.e, broken) value for VJ */ + /* compression option*/ +#endif /* VJ_SUPPORT */ + +typedef struct ipcp_options { + unsigned int neg_addr :1; /* Negotiate IP Address? */ + unsigned int old_addrs :1; /* Use old (IP-Addresses) option? */ + unsigned int req_addr :1; /* Ask peer to send IP address? */ +#if 0 /* UNUSED */ + unsigned int default_route :1; /* Assign default route through interface? */ + unsigned int replace_default_route :1; /* Replace default route through interface? */ +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp :1; /* Make proxy ARP entry for peer? */ +#endif /* UNUSED - PROXY ARP */ +#if VJ_SUPPORT + unsigned int neg_vj :1; /* Van Jacobson Compression? */ + unsigned int old_vj :1; /* use old (short) form of VJ option? */ + unsigned int cflag :1; +#endif /* VJ_SUPPORT */ + unsigned int accept_local :1; /* accept peer's value for ouraddr */ + unsigned int accept_remote :1; /* accept peer's value for hisaddr */ +#if LWIP_DNS + unsigned int req_dns1 :1; /* Ask peer to send primary DNS address? */ + unsigned int req_dns2 :1; /* Ask peer to send secondary DNS address? */ +#endif /* LWIP_DNS */ + + u32_t ouraddr, hisaddr; /* Addresses in NETWORK BYTE ORDER */ +#if LWIP_DNS + u32_t dnsaddr[2]; /* Primary and secondary MS DNS entries */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + u32_t winsaddr[2]; /* Primary and secondary MS WINS entries */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + u16_t vj_protocol; /* protocol value to use in VJ option */ + u8_t maxslotindex; /* values for RFC1332 VJ compression neg. */ +#endif /* VJ_SUPPORT */ +} ipcp_options; + +#if 0 /* UNUSED, already defined by lwIP */ +char *ip_ntoa (u32_t); +#endif /* UNUSED, already defined by lwIP */ + +extern const struct protent ipcp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPCP_H */ +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h new file mode 100644 index 000000000..909997386 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h @@ -0,0 +1,191 @@ +/* + * ipv6cp.h - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.h,v 1.7 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPV6CP_H +#define IPV6CP_H + +#include "eui64.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_IFACEID 1 /* Interface Identifier */ +#ifdef IPV6CP_COMP +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* IPV6CP_COMP */ + +/* No compression types yet defined. + *#define IPV6CP_COMP 0x004f + */ +typedef struct ipv6cp_options { + unsigned int neg_ifaceid :1; /* Negotiate interface identifier? */ + unsigned int req_ifaceid :1; /* Ask peer to send interface identifier? */ + unsigned int accept_local :1; /* accept peer's value for iface id? */ + unsigned int opt_local :1; /* ourtoken set by option */ + unsigned int opt_remote :1; /* histoken set by option */ + unsigned int use_ip :1; /* use IP as interface identifier */ +#if 0 + unsigned int use_persistent :1; /* use uniquely persistent value for address */ +#endif +#ifdef IPV6CP_COMP + unsigned int neg_vj :1; /* Van Jacobson Compression? */ +#endif /* IPV6CP_COMP */ + +#ifdef IPV6CP_COMP + u_short vj_protocol; /* protocol value to use in VJ option */ +#endif /* IPV6CP_COMP */ + eui64_t ourid, hisid; /* Interface identifiers */ +} ipv6cp_options; + +extern const struct protent ipv6cp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPV6CP_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h new file mode 100644 index 000000000..18ad1cb23 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h @@ -0,0 +1,179 @@ +/* + * lcp.h - Link Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: lcp.h,v 1.20 2004/11/14 22:53:42 carlsonj Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef LCP_H +#define LCP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_VENDOR 0 /* Vendor Specific */ +#define CI_MRU 1 /* Maximum Receive Unit */ +#define CI_ASYNCMAP 2 /* Async Control Character Map */ +#define CI_AUTHTYPE 3 /* Authentication Type */ +#define CI_QUALITY 4 /* Quality Protocol */ +#define CI_MAGICNUMBER 5 /* Magic Number */ +#define CI_PCOMPRESSION 7 /* Protocol Field Compression */ +#define CI_ACCOMPRESSION 8 /* Address/Control Field Compression */ +#define CI_FCSALTERN 9 /* FCS-Alternatives */ +#define CI_SDP 10 /* Self-Describing-Pad */ +#define CI_NUMBERED 11 /* Numbered-Mode */ +#define CI_CALLBACK 13 /* callback */ +#define CI_MRRU 17 /* max reconstructed receive unit; multilink */ +#define CI_SSNHF 18 /* short sequence numbers for multilink */ +#define CI_EPDISC 19 /* endpoint discriminator */ +#define CI_MPPLUS 22 /* Multi-Link-Plus-Procedure */ +#define CI_LDISC 23 /* Link-Discriminator */ +#define CI_LCPAUTH 24 /* LCP Authentication */ +#define CI_COBS 25 /* Consistent Overhead Byte Stuffing */ +#define CI_PREFELIS 26 /* Prefix Elision */ +#define CI_MPHDRFMT 27 /* MP Header Format */ +#define CI_I18N 28 /* Internationalization */ +#define CI_SDL 29 /* Simple Data Link */ + +/* + * LCP-specific packet types (code numbers). + */ +#define PROTREJ 8 /* Protocol Reject */ +#define ECHOREQ 9 /* Echo Request */ +#define ECHOREP 10 /* Echo Reply */ +#define DISCREQ 11 /* Discard Request */ +#define IDENTIF 12 /* Identification */ +#define TIMEREM 13 /* Time Remaining */ + +/* Value used as data for CI_CALLBACK option */ +#define CBCP_OPT 6 /* Use callback control protocol */ + +#if 0 /* moved to ppp_opts.h */ +#define DEFMRU 1500 /* Try for this */ +#define MINMRU 128 /* No MRUs below this */ +#define MAXMRU 16384 /* Normally limit MRU to this */ +#endif /* moved to ppp_opts.h */ + +/* An endpoint discriminator, used with multilink. */ +#define MAX_ENDP_LEN 20 /* maximum length of discriminator value */ +struct epdisc { + unsigned char class_; /* -- The word "class" is reserved in C++. */ + unsigned char length; + unsigned char value[MAX_ENDP_LEN]; +}; + +/* + * The state of options is described by an lcp_options structure. + */ +typedef struct lcp_options { + unsigned int passive :1; /* Don't die if we don't get a response */ + unsigned int silent :1; /* Wait for the other end to start first */ +#if 0 /* UNUSED */ + unsigned int restart :1; /* Restart vs. exit after close */ +#endif /* UNUSED */ + unsigned int neg_mru :1; /* Negotiate the MRU? */ + unsigned int neg_asyncmap :1; /* Negotiate the async map? */ +#if PAP_SUPPORT + unsigned int neg_upap :1; /* Ask for UPAP authentication? */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int neg_chap :1; /* Ask for CHAP authentication? */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int neg_eap :1; /* Ask for EAP authentication? */ +#endif /* EAP_SUPPORT */ + unsigned int neg_magicnumber :1; /* Ask for magic number? */ + unsigned int neg_pcompression :1; /* HDLC Protocol Field Compression? */ + unsigned int neg_accompression :1; /* HDLC Address/Control Field Compression? */ +#if LQR_SUPPORT + unsigned int neg_lqr :1; /* Negotiate use of Link Quality Reports */ +#endif /* LQR_SUPPORT */ + unsigned int neg_cbcp :1; /* Negotiate use of CBCP */ +#ifdef HAVE_MULTILINK + unsigned int neg_mrru :1; /* negotiate multilink MRRU */ +#endif /* HAVE_MULTILINK */ + unsigned int neg_ssnhf :1; /* negotiate short sequence numbers */ + unsigned int neg_endpoint :1; /* negotiate endpoint discriminator */ + + u16_t mru; /* Value of MRU */ +#ifdef HAVE_MULTILINK + u16_t mrru; /* Value of MRRU, and multilink enable */ +#endif /* MULTILINK */ +#if CHAP_SUPPORT + u8_t chap_mdtype; /* which MD types (hashing algorithm) */ +#endif /* CHAP_SUPPORT */ + u32_t asyncmap; /* Value of async map */ + u32_t magicnumber; + u8_t numloops; /* Number of loops during magic number neg. */ +#if LQR_SUPPORT + u32_t lqr_period; /* Reporting period for LQR 1/100ths second */ +#endif /* LQR_SUPPORT */ + struct epdisc endpoint; /* endpoint discriminator */ +} lcp_options; + +void lcp_open(ppp_pcb *pcb); +void lcp_close(ppp_pcb *pcb, const char *reason); +void lcp_lowerup(ppp_pcb *pcb); +void lcp_lowerdown(ppp_pcb *pcb); +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len); /* send protocol reject */ + +extern const struct protent lcp_protent; + +#if 0 /* moved to ppp_opts.h */ +/* Default number of times we receive our magic number from the peer + before deciding the link is looped-back. */ +#define DEFLOOPBACKFAIL 10 +#endif /* moved to ppp_opts.h */ + +#ifdef __cplusplus +} +#endif + +#endif /* LCP_H */ +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h new file mode 100644 index 000000000..a165e18fa --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h @@ -0,0 +1,130 @@ +/* + * magic.h - PPP Magic Number definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: magic.h,v 1.5 2003/06/11 23:56:26 paulus Exp $ + */ +/***************************************************************************** +* randm.h - Random number generator header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-05-29 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MAGIC_H +#define MAGIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** +*** PUBLIC FUNCTIONS *** +***********************/ + +/* + * Initialize the random number generator. + */ +void magic_init(void); + +/* + * Randomize our random seed value. To be called for truely random events + * such as user operations and network traffic. + */ +void magic_randomize(void); + +/* + * Return a new random number. + */ +u32_t magic(void); /* Returns the next magic number */ + +/* + * Fill buffer with random bytes + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Thus it's important to make sure that the results of this are not + * published directly because one could predict the next result to at + * least some degree. Also, it's important to get a good seed before + * the first use. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len); + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow); + +#ifdef __cplusplus +} +#endif + +#endif /* MAGIC_H */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h new file mode 100644 index 000000000..5de112847 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h @@ -0,0 +1,181 @@ +/* + * mppe.h - Definitions for MPPE + * + * Copyright (c) 2008 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MPPE_H +#define MPPE_H + +#include "netif/ppp/pppcrypt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MPPE_PAD 4 /* MPPE growth per frame */ +#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */ + +/* option bits for ccp_options.mppe */ +#define MPPE_OPT_40 0x01 /* 40 bit */ +#define MPPE_OPT_128 0x02 /* 128 bit */ +#define MPPE_OPT_STATEFUL 0x04 /* stateful mode */ +/* unsupported opts */ +#define MPPE_OPT_56 0x08 /* 56 bit */ +#define MPPE_OPT_MPPC 0x10 /* MPPC compression */ +#define MPPE_OPT_D 0x20 /* Unknown */ +#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D) +#define MPPE_OPT_UNKNOWN 0x40 /* Bits !defined in RFC 3078 were set */ + +/* + * This is not nice ... the alternative is a bitfield struct though. + * And unfortunately, we cannot share the same bits for the option + * names above since C and H are the same bit. We could do a u_int32 + * but then we have to do a lwip_htonl() all the time and/or we still need + * to know which octet is which. + */ +#define MPPE_C_BIT 0x01 /* MPPC */ +#define MPPE_D_BIT 0x10 /* Obsolete, usage unknown */ +#define MPPE_L_BIT 0x20 /* 40-bit */ +#define MPPE_S_BIT 0x40 /* 128-bit */ +#define MPPE_M_BIT 0x80 /* 56-bit, not supported */ +#define MPPE_H_BIT 0x01 /* Stateless (in a different byte) */ + +/* Does not include H bit; used for least significant octet only. */ +#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT) + +/* Build a CI from mppe opts (see RFC 3078) */ +#define MPPE_OPTS_TO_CI(opts, ci) \ + do { \ + u_char *ptr = ci; /* u_char[4] */ \ + \ + /* H bit */ \ + if (opts & MPPE_OPT_STATEFUL) \ + *ptr++ = 0x0; \ + else \ + *ptr++ = MPPE_H_BIT; \ + *ptr++ = 0; \ + *ptr++ = 0; \ + \ + /* S,L bits */ \ + *ptr = 0; \ + if (opts & MPPE_OPT_128) \ + *ptr |= MPPE_S_BIT; \ + if (opts & MPPE_OPT_40) \ + *ptr |= MPPE_L_BIT; \ + /* M,D,C bits not supported */ \ + } while (/* CONSTCOND */ 0) + +/* The reverse of the above */ +#define MPPE_CI_TO_OPTS(ci, opts) \ + do { \ + const u_char *ptr = ci; /* u_char[4] */ \ + \ + opts = 0; \ + \ + /* H bit */ \ + if (!(ptr[0] & MPPE_H_BIT)) \ + opts |= MPPE_OPT_STATEFUL; \ + \ + /* S,L bits */ \ + if (ptr[3] & MPPE_S_BIT) \ + opts |= MPPE_OPT_128; \ + if (ptr[3] & MPPE_L_BIT) \ + opts |= MPPE_OPT_40; \ + \ + /* M,D,C bits */ \ + if (ptr[3] & MPPE_M_BIT) \ + opts |= MPPE_OPT_56; \ + if (ptr[3] & MPPE_D_BIT) \ + opts |= MPPE_OPT_D; \ + if (ptr[3] & MPPE_C_BIT) \ + opts |= MPPE_OPT_MPPC; \ + \ + /* Other bits */ \ + if (ptr[0] & ~MPPE_H_BIT) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[1] || ptr[2]) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[3] & ~MPPE_ALL_BITS) \ + opts |= MPPE_OPT_UNKNOWN; \ + } while (/* CONSTCOND */ 0) + +/* Shared MPPE padding between MSCHAP and MPPE */ +#define SHA1_PAD_SIZE 40 + +static const u8_t mppe_sha1_pad1[SHA1_PAD_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8_t mppe_sha1_pad2[SHA1_PAD_SIZE] = { + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2 +}; + +/* + * State for an MPPE (de)compressor. + */ +typedef struct ppp_mppe_state { + lwip_arc4_context arc4; + u8_t master_key[MPPE_MAX_KEY_LEN]; + u8_t session_key[MPPE_MAX_KEY_LEN]; + u8_t keylen; /* key length in bytes */ + /* NB: 128-bit == 16, 40-bit == 8! + * If we want to support 56-bit, the unit has to change to bits + */ + u8_t bits; /* MPPE control bits */ + u16_t ccount; /* 12-bit coherency count (seqno) */ + u16_t sanity_errors; /* take down LCP if too many */ + unsigned int stateful :1; /* stateful mode flag */ + unsigned int discard :1; /* stateful mode packet loss flag */ +} ppp_mppe_state; + +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key); +void mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options); +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol); +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb); + +#ifdef __cplusplus +} +#endif + +#endif /* MPPE_H */ +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h new file mode 100644 index 000000000..3d73c3657 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h @@ -0,0 +1,698 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_H +#define PPP_H + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" +#if PPP_IPV6_SUPPORT +#include "lwip/ip6_addr.h" +#endif /* PPP_IPV6_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Disable non-working or rarely used PPP feature, so rarely that we don't want to bloat ppp_opts.h with them */ +#ifndef PPP_OPTIONS +#define PPP_OPTIONS 0 +#endif + +#ifndef PPP_NOTIFY +#define PPP_NOTIFY 0 +#endif + +#ifndef PPP_REMOTENAME +#define PPP_REMOTENAME 0 +#endif + +#ifndef PPP_IDLETIMELIMIT +#define PPP_IDLETIMELIMIT 0 +#endif + +#ifndef PPP_LCP_ADAPTIVE +#define PPP_LCP_ADAPTIVE 0 +#endif + +#ifndef PPP_MAXCONNECT +#define PPP_MAXCONNECT 0 +#endif + +#ifndef PPP_ALLOWED_ADDRS +#define PPP_ALLOWED_ADDRS 0 +#endif + +#ifndef PPP_PROTOCOLNAME +#define PPP_PROTOCOLNAME 0 +#endif + +#ifndef PPP_STATS_SUPPORT +#define PPP_STATS_SUPPORT 0 +#endif + +#ifndef DEFLATE_SUPPORT +#define DEFLATE_SUPPORT 0 +#endif + +#ifndef BSDCOMPRESS_SUPPORT +#define BSDCOMPRESS_SUPPORT 0 +#endif + +#ifndef PREDICTOR_SUPPORT +#define PREDICTOR_SUPPORT 0 +#endif + +/************************* +*** PUBLIC DEFINITIONS *** +*************************/ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ + +/* + * Values for phase. + */ +#define PPP_PHASE_DEAD 0 +#define PPP_PHASE_MASTER 1 +#define PPP_PHASE_HOLDOFF 2 +#define PPP_PHASE_INITIALIZE 3 +#define PPP_PHASE_SERIALCONN 4 +#define PPP_PHASE_DORMANT 5 +#define PPP_PHASE_ESTABLISH 6 +#define PPP_PHASE_AUTHENTICATE 7 +#define PPP_PHASE_CALLBACK 8 +#define PPP_PHASE_NETWORK 9 +#define PPP_PHASE_RUNNING 10 +#define PPP_PHASE_TERMINATE 11 +#define PPP_PHASE_DISCONNECT 12 + +/* Error codes. */ +#define PPPERR_NONE 0 /* No error. */ +#define PPPERR_PARAM 1 /* Invalid parameter. */ +#define PPPERR_OPEN 2 /* Unable to open PPP session. */ +#define PPPERR_DEVICE 3 /* Invalid I/O device for PPP. */ +#define PPPERR_ALLOC 4 /* Unable to allocate resources. */ +#define PPPERR_USER 5 /* User interrupt. */ +#define PPPERR_CONNECT 6 /* Connection lost. */ +#define PPPERR_AUTHFAIL 7 /* Failed authentication challenge. */ +#define PPPERR_PROTOCOL 8 /* Failed to meet protocol. */ +#define PPPERR_PEERDEAD 9 /* Connection timeout */ +#define PPPERR_IDLETIMEOUT 10 /* Idle Timeout */ +#define PPPERR_CONNECTTIME 11 /* Max connect time reached */ +#define PPPERR_LOOPBACK 12 /* Loopback detected */ + +/* Whether auth support is enabled at all */ +#define PPP_AUTH_SUPPORT (PAP_SUPPORT || CHAP_SUPPORT || EAP_SUPPORT) + +/************************ +*** PUBLIC DATA TYPES *** +************************/ + +/* + * Other headers require ppp_pcb definition for prototypes, but ppp_pcb + * require some structure definition from other headers as well, we are + * fixing the dependency loop here by declaring the ppp_pcb type then + * by including headers containing necessary struct definition for ppp_pcb + */ +typedef struct ppp_pcb_s ppp_pcb; + +/* Type definitions for BSD code. */ +#ifndef __u_char_defined +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef unsigned char u_char; +#endif + +#include "fsm.h" +#include "lcp.h" +#if CCP_SUPPORT +#include "ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "mppe.h" +#endif /* MPPE_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ +#if PAP_SUPPORT +#include "upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "eap.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "vj.h" +#endif /* VJ_SUPPORT */ + +/* Link status callback function prototype */ +typedef void (*ppp_link_status_cb_fn)(ppp_pcb *pcb, int err_code, void *ctx); + +/* + * PPP configuration. + */ +typedef struct ppp_settings_s { + +#if PPP_SERVER && PPP_AUTH_SUPPORT + unsigned int auth_required :1; /* Peer is required to authenticate */ + unsigned int null_login :1; /* Username of "" and a password of "" are acceptable */ +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ +#if PPP_REMOTENAME + unsigned int explicit_remote :1; /* remote_name specified with remotename opt */ +#endif /* PPP_REMOTENAME */ +#if PAP_SUPPORT + unsigned int refuse_pap :1; /* Don't proceed auth. with PAP */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int refuse_chap :1; /* Don't proceed auth. with CHAP */ +#endif /* CHAP_SUPPORT */ +#if MSCHAP_SUPPORT + unsigned int refuse_mschap :1; /* Don't proceed auth. with MS-CHAP */ + unsigned int refuse_mschap_v2 :1; /* Don't proceed auth. with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int refuse_eap :1; /* Don't proceed auth. with EAP */ +#endif /* EAP_SUPPORT */ +#if LWIP_DNS + unsigned int usepeerdns :1; /* Ask peer for DNS adds */ +#endif /* LWIP_DNS */ + unsigned int persist :1; /* Persist mode, always try to open the connection */ +#if PRINTPKT_SUPPORT + unsigned int hide_password :1; /* Hide password in dumped packets */ +#endif /* PRINTPKT_SUPPORT */ + unsigned int noremoteip :1; /* Let him have no IP address */ + unsigned int lax_recv :1; /* accept control chars in asyncmap */ + unsigned int noendpoint :1; /* don't send/accept endpoint discriminator */ +#if PPP_LCP_ADAPTIVE + unsigned int lcp_echo_adaptive :1; /* request echo only if the link was idle */ +#endif /* PPP_LCP_ADAPTIVE */ +#if MPPE_SUPPORT + unsigned int require_mppe :1; /* Require MPPE (Microsoft Point to Point Encryption) */ + unsigned int refuse_mppe_40 :1; /* Allow MPPE 40-bit mode? */ + unsigned int refuse_mppe_128 :1; /* Allow MPPE 128-bit mode? */ + unsigned int refuse_mppe_stateful :1; /* Allow MPPE stateful mode? */ +#endif /* MPPE_SUPPORT */ + + u16_t listen_time; /* time to listen first (ms), waiting for peer to send LCP packet */ + +#if PPP_IDLETIMELIMIT + u16_t idle_time_limit; /* Disconnect if idle for this many seconds */ +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + u32_t maxconnect; /* Maximum connect time (seconds) */ +#endif /* PPP_MAXCONNECT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ + const char *user; /* Username for PAP */ + const char *passwd; /* Password for PAP, secret for CHAP */ +#if PPP_REMOTENAME + char remote_name[MAXNAMELEN + 1]; /* Peer's name for authentication */ +#endif /* PPP_REMOTENAME */ + +#if PAP_SUPPORT + u8_t pap_timeout_time; /* Timeout (seconds) for auth-req retrans. */ + u8_t pap_max_transmits; /* Number of auth-reqs sent */ +#if PPP_SERVER + u8_t pap_req_timeout; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPPORT */ + +#if CHAP_SUPPORT + u8_t chap_timeout_time; /* Timeout (seconds) for retransmitting req */ + u8_t chap_max_transmits; /* max # times to send challenge */ +#if PPP_SERVER + u8_t chap_rechallenge_time; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + u8_t eap_req_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_allow_req; /* Max Requests allowed */ +#if PPP_SERVER + u8_t eap_timeout_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_max_transmits; /* Max Requests allowed */ +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + +#endif /* PPP_AUTH_SUPPORT */ + + u8_t fsm_timeout_time; /* Timeout time in seconds */ + u8_t fsm_max_conf_req_transmits; /* Maximum Configure-Request transmissions */ + u8_t fsm_max_term_transmits; /* Maximum Terminate-Request transmissions */ + u8_t fsm_max_nak_loops; /* Maximum number of nak loops tolerated */ + + u8_t lcp_loopbackfail; /* Number of times we receive our magic number from the peer + before deciding the link is looped-back. */ + u8_t lcp_echo_interval; /* Interval between LCP echo-requests */ + u8_t lcp_echo_fails; /* Tolerance to unanswered echo-requests */ + +} ppp_settings; + +#if PPP_SERVER +struct ppp_addrs { +#if PPP_IPV4_SUPPORT + ip4_addr_t our_ipaddr, his_ipaddr, netmask; +#if LWIP_DNS + ip4_addr_t dns1, dns2; +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + ip6_addr_t our6_ipaddr, his6_ipaddr; +#endif /* PPP_IPV6_SUPPORT */ +}; +#endif /* PPP_SERVER */ + +/* + * PPP interface control block. + */ +struct ppp_pcb_s { + ppp_settings settings; + const struct link_callbacks *link_cb; + void *link_ctx_cb; + void (*link_status_cb)(ppp_pcb *pcb, int err_code, void *ctx); /* Status change callback */ +#if PPP_NOTIFY_PHASE + void (*notify_phase_cb)(ppp_pcb *pcb, u8_t phase, void *ctx); /* Notify phase callback */ +#endif /* PPP_NOTIFY_PHASE */ + void *ctx_cb; /* Callbacks optional pointer */ + struct netif *netif; /* PPP interface */ + u8_t phase; /* where the link is at */ + u8_t err_code; /* Code indicating why interface is down. */ + + /* flags */ +#if PPP_IPV4_SUPPORT + unsigned int ask_for_local :1; /* request our address from peer */ + unsigned int ipcp_is_open :1; /* haven't called np_finished() */ + unsigned int ipcp_is_up :1; /* have called ipcp_up() */ + unsigned int if4_up :1; /* True when the IPv4 interface is up. */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp_set :1; /* Have created proxy arp entry */ +#endif /* UNUSED - PROXY ARP */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + unsigned int ipv6cp_is_up :1; /* have called ip6cp_up() */ + unsigned int if6_up :1; /* True when the IPv6 interface is up. */ +#endif /* PPP_IPV6_SUPPORT */ + unsigned int lcp_echo_timer_running :1; /* set if a timer is running */ +#if VJ_SUPPORT + unsigned int vj_enabled :1; /* Flag indicating VJ compression enabled. */ +#endif /* VJ_SUPPORT */ +#if CCP_SUPPORT + unsigned int ccp_all_rejected :1; /* we rejected all peer's options */ +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT + unsigned int mppe_keys_set :1; /* Have the MPPE keys been set? */ +#endif /* MPPE_SUPPORT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ +#if PPP_SERVER && defined(HAVE_MULTILINK) + char peer_authname[MAXNAMELEN + 1]; /* The name by which the peer authenticated itself to us. */ +#endif /* PPP_SERVER && defined(HAVE_MULTILINK) */ + u16_t auth_pending; /* Records which authentication operations haven't completed yet. */ + u16_t auth_done; /* Records which authentication operations have been completed. */ + +#if PAP_SUPPORT + upap_state upap; /* PAP data */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + chap_client_state chap_client; /* CHAP client data */ +#if PPP_SERVER + chap_server_state chap_server; /* CHAP server data */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if EAP_SUPPORT + eap_state eap; /* EAP data */ +#endif /* EAP_SUPPORT */ +#endif /* PPP_AUTH_SUPPORT */ + + fsm lcp_fsm; /* LCP fsm structure */ + lcp_options lcp_wantoptions; /* Options that we want to request */ + lcp_options lcp_gotoptions; /* Options that peer ack'd */ + lcp_options lcp_allowoptions; /* Options we allow peer to request */ + lcp_options lcp_hisoptions; /* Options that we ack'd */ + u16_t peer_mru; /* currently negotiated peer MRU */ + u8_t lcp_echos_pending; /* Number of outstanding echo msgs */ + u8_t lcp_echo_number; /* ID number of next echo frame */ + + u8_t num_np_open; /* Number of network protocols which we have opened. */ + u8_t num_np_up; /* Number of network protocols which have come up. */ + +#if VJ_SUPPORT + struct vjcompress vj_comp; /* Van Jacobson compression header. */ +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + fsm ccp_fsm; /* CCP fsm structure */ + ccp_options ccp_wantoptions; /* what to request the peer to use */ + ccp_options ccp_gotoptions; /* what the peer agreed to do */ + ccp_options ccp_allowoptions; /* what we'll agree to do */ + ccp_options ccp_hisoptions; /* what we agreed to do */ + u8_t ccp_localstate; /* Local state (mainly for handling reset-reqs and reset-acks). */ + u8_t ccp_receive_method; /* Method chosen on receive path */ + u8_t ccp_transmit_method; /* Method chosen on transmit path */ +#if MPPE_SUPPORT + ppp_mppe_state mppe_comp; /* MPPE "compressor" structure */ + ppp_mppe_state mppe_decomp; /* MPPE "decompressor" structure */ +#endif /* MPPE_SUPPORT */ +#endif /* CCP_SUPPORT */ + +#if PPP_IPV4_SUPPORT + fsm ipcp_fsm; /* IPCP fsm structure */ + ipcp_options ipcp_wantoptions; /* Options that we want to request */ + ipcp_options ipcp_gotoptions; /* Options that peer ack'd */ + ipcp_options ipcp_allowoptions; /* Options we allow peer to request */ + ipcp_options ipcp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + fsm ipv6cp_fsm; /* IPV6CP fsm structure */ + ipv6cp_options ipv6cp_wantoptions; /* Options that we want to request */ + ipv6cp_options ipv6cp_gotoptions; /* Options that peer ack'd */ + ipv6cp_options ipv6cp_allowoptions; /* Options we allow peer to request */ + ipv6cp_options ipv6cp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV6_SUPPORT */ +}; + +/************************ + *** PUBLIC FUNCTIONS *** + ************************/ + +/* + * WARNING: For multi-threads environment, all ppp_set_* functions most + * only be called while the PPP is in the dead phase (i.e. disconnected). + */ + +#if PPP_AUTH_SUPPORT +/* + * Set PPP authentication. + * + * Warning: Using PPPAUTHTYPE_ANY might have security consequences. + * RFC 1994 says: + * + * In practice, within or associated with each PPP server, there is a + * database which associates "user" names with authentication + * information ("secrets"). It is not anticipated that a particular + * named user would be authenticated by multiple methods. This would + * make the user vulnerable to attacks which negotiate the least secure + * method from among a set (such as PAP rather than CHAP). If the same + * secret was used, PAP would reveal the secret to be used later with + * CHAP. + * + * Instead, for each user name there should be an indication of exactly + * one method used to authenticate that user name. If a user needs to + * make use of different authentication methods under different + * circumstances, then distinct user names SHOULD be employed, each of + * which identifies exactly one authentication method. + * + * Default is none auth type, unset (NULL) user and passwd. + */ +#define PPPAUTHTYPE_NONE 0x00 +#define PPPAUTHTYPE_PAP 0x01 +#define PPPAUTHTYPE_CHAP 0x02 +#define PPPAUTHTYPE_MSCHAP 0x04 +#define PPPAUTHTYPE_MSCHAP_V2 0x08 +#define PPPAUTHTYPE_EAP 0x10 +#define PPPAUTHTYPE_ANY 0xff +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd); + +/* + * If set, peer is required to authenticate. This is mostly necessary for PPP server support. + * + * Default is false. + */ +#define ppp_set_auth_required(ppp, boolval) (ppp->settings.auth_required = boolval) +#endif /* PPP_AUTH_SUPPORT */ + +#if PPP_IPV4_SUPPORT +/* + * Set PPP interface "our" and "his" IPv4 addresses. This is mostly necessary for PPP server + * support but it can also be used on a PPP link where each side choose its own IP address. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_ouraddr(ppp, addr) do { ppp->ipcp_wantoptions.ouraddr = ip4_addr_get_u32(addr); \ + ppp->ask_for_local = ppp->ipcp_wantoptions.ouraddr != 0; } while(0) +#define ppp_set_ipcp_hisaddr(ppp, addr) (ppp->ipcp_wantoptions.hisaddr = ip4_addr_get_u32(addr)) +#if LWIP_DNS +/* + * Set DNS server addresses that are sent if the peer asks for them. This is mostly necessary + * for PPP server support. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_dnsaddr(ppp, index, addr) (ppp->ipcp_allowoptions.dnsaddr[index] = ip4_addr_get_u32(addr)) + +/* + * If set, we ask the peer for up to 2 DNS server addresses. Received DNS server addresses are + * registered using the dns_setserver() function. + * + * Default is false. + */ +#define ppp_set_usepeerdns(ppp, boolval) (ppp->settings.usepeerdns = boolval) +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ + +#if MPPE_SUPPORT +/* Disable MPPE (Microsoft Point to Point Encryption). This parameter is exclusive. */ +#define PPP_MPPE_DISABLE 0x00 +/* Require the use of MPPE (Microsoft Point to Point Encryption). */ +#define PPP_MPPE_ENABLE 0x01 +/* Allow MPPE to use stateful mode. Stateless mode is still attempted first. */ +#define PPP_MPPE_ALLOW_STATEFUL 0x02 +/* Refuse the use of MPPE with 40-bit encryption. Conflict with PPP_MPPE_REFUSE_128. */ +#define PPP_MPPE_REFUSE_40 0x04 +/* Refuse the use of MPPE with 128-bit encryption. Conflict with PPP_MPPE_REFUSE_40. */ +#define PPP_MPPE_REFUSE_128 0x08 +/* + * Set MPPE configuration + * + * Default is disabled. + */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags); +#endif /* MPPE_SUPPORT */ + +/* + * Wait for up to intval milliseconds for a valid PPP packet from the peer. + * At the end of this time, or when a valid PPP packet is received from the + * peer, we commence negotiation by sending our first LCP packet. + * + * Default is 0. + */ +#define ppp_set_listen_time(ppp, intval) (ppp->settings.listen_time = intval) + +/* + * If set, we will attempt to initiate a connection but if no reply is received from + * the peer, we will then just wait passively for a valid LCP packet from the peer. + * + * Default is false. + */ +#define ppp_set_passive(ppp, boolval) (ppp->lcp_wantoptions.passive = boolval) + +/* + * If set, we will not transmit LCP packets to initiate a connection until a valid + * LCP packet is received from the peer. This is what we usually call the server mode. + * + * Default is false. + */ +#define ppp_set_silent(ppp, boolval) (ppp->lcp_wantoptions.silent = boolval) + +/* + * If set, enable protocol field compression negotiation in both the receive and + * the transmit direction. + * + * Default is true. + */ +#define ppp_set_neg_pcomp(ppp, boolval) (ppp->lcp_wantoptions.neg_pcompression = \ + ppp->lcp_allowoptions.neg_pcompression = boolval) + +/* + * If set, enable Address/Control compression in both the receive and the transmit + * direction. + * + * Default is true. + */ +#define ppp_set_neg_accomp(ppp, boolval) (ppp->lcp_wantoptions.neg_accompression = \ + ppp->lcp_allowoptions.neg_accompression = boolval) + +/* + * If set, enable asyncmap negotiation. Otherwise forcing all control characters to + * be escaped for both the transmit and the receive direction. + * + * Default is true. + */ +#define ppp_set_neg_asyncmap(ppp, boolval) (ppp->lcp_wantoptions.neg_asyncmap = \ + ppp->lcp_allowoptions.neg_asyncmap = boolval) + +/* + * This option sets the Async-Control-Character-Map (ACCM) for this end of the link. + * The ACCM is a set of 32 bits, one for each of the ASCII control characters with + * values from 0 to 31, where a 1 bit indicates that the corresponding control + * character should not be used in PPP packets sent to this system. The map is + * an unsigned 32 bits integer where the least significant bit (00000001) represents + * character 0 and the most significant bit (80000000) represents character 31. + * We will then ask the peer to send these characters as a 2-byte escape sequence. + * + * Default is 0. + */ +#define ppp_set_asyncmap(ppp, intval) (ppp->lcp_wantoptions.asyncmap = intval) + +/* + * Set a PPP interface as the default network interface + * (used to output all packets for which no specific route is found). + */ +#define ppp_set_default(ppp) netif_set_default(ppp->netif) + +#if PPP_NOTIFY_PHASE +/* + * Set a PPP notify phase callback. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +typedef void (*ppp_notify_phase_cb_fn)(ppp_pcb *pcb, u8_t phase, void *ctx); +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff); + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_close(ppp_pcb *pcb, u8_t nocarrier); + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb); + +/* + * PPP IOCTL commands. + * + * Get the up status - 0 for down, non-zero for up. The argument must + * point to an int. + */ +#define PPPCTLG_UPSTATUS 0 + +/* + * Get the PPP error code. The argument must point to an int. + * Returns a PPPERR_* value. + */ +#define PPPCTLG_ERRCODE 1 + +/* + * Get the fd associated with a PPP over serial + */ +#define PPPCTLG_FD 2 + +/* + * Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. + */ +err_t ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +/* Get the PPP netif interface */ +#define ppp_netif(ppp) (ppp->netif) + +/* Set an lwIP-style status-callback for the selected PPP device */ +#define ppp_set_netif_statuscallback(ppp, status_cb) \ + netif_set_status_callback(ppp->netif, status_cb); + +/* Set an lwIP-style link-callback for the selected PPP device */ +#define ppp_set_netif_linkcallback(ppp, link_cb) \ + netif_set_link_callback(ppp->netif, link_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_H */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h new file mode 100644 index 000000000..40843d5a0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h @@ -0,0 +1,722 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ +#ifndef LWIP_HDR_PPP_IMPL_H +#define LWIP_HDR_PPP_IMPL_H + +#include "netif/ppp/ppp_opts.h" + +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifdef PPP_INCLUDE_SETTINGS_HEADER +#include "ppp_settings.h" +#endif + +#include /* formats */ +#include +#include +#include /* strtol() */ + +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/timeouts.h" + +#include "ppp.h" +#include "pppdebug.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Memory used for control packets. + * + * PPP_CTRL_PBUF_MAX_SIZE is the amount of memory we allocate when we + * cannot figure out how much we are going to use before filling the buffer. + */ +#if PPP_USE_PBUF_RAM +#define PPP_CTRL_PBUF_TYPE PBUF_RAM +#define PPP_CTRL_PBUF_MAX_SIZE 512 +#else /* PPP_USE_PBUF_RAM */ +#define PPP_CTRL_PBUF_TYPE PBUF_POOL +#define PPP_CTRL_PBUF_MAX_SIZE PBUF_POOL_BUFSIZE +#endif /* PPP_USE_PBUF_RAM */ + +/* + * The basic PPP frame. + */ +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#if 0 /* UNUSED */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX protocol */ +#endif /* UNUSED */ +#if VJ_SUPPORT +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#endif /* VJ_SUPPORT */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_COMP 0xfd /* compressed packet */ +#endif /* CCP_SUPPORT */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#if 0 /* UNUSED */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol */ +#endif /* UNUSED */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#define PPP_ECP 0x8053 /* Encryption Control Protocol */ +#endif /* ECP_SUPPORT */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#if PAP_SUPPORT +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#endif /* LQR_SUPPORT */ +#if CHAP_SUPPORT +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ +#endif /* CBCP_SUPPORT */ +#if EAP_SUPPORT +#define PPP_EAP 0xc227 /* Extensible Authentication Protocol */ +#endif /* EAP_SUPPORT */ + +/* + * The following struct gives the addresses of procedures to call + * for a particular lower link level protocol. + */ +struct link_callbacks { + /* Start a connection (e.g. Initiate discovery phase) */ + void (*connect) (ppp_pcb *pcb, void *ctx); +#if PPP_SERVER + /* Listen for an incoming connection (Passive mode) */ + void (*listen) (ppp_pcb *pcb, void *ctx); +#endif /* PPP_SERVER */ + /* End a connection (i.e. initiate disconnect phase) */ + void (*disconnect) (ppp_pcb *pcb, void *ctx); + /* Free lower protocol control block */ + err_t (*free) (ppp_pcb *pcb, void *ctx); + /* Write a pbuf to a ppp link, only used from PPP functions to send PPP packets. */ + err_t (*write)(ppp_pcb *pcb, void *ctx, struct pbuf *p); + /* Send a packet from lwIP core (IPv4 or IPv6) */ + err_t (*netif_output)(ppp_pcb *pcb, void *ctx, struct pbuf *p, u_short protocol); + /* configure the transmit-side characteristics of the PPP interface */ + void (*send_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); + /* confire the receive-side characteristics of the PPP interface */ + void (*recv_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); +}; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics. + */ +#if PPP_STATS_SUPPORT +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ +}; + +#if VJ_SUPPORT +struct vjstat { + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ +}; +#endif /* VJ_SUPPORT */ + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ +#if VJ_SUPPORT + struct vjstat vj; /* VJ header compression statistics */ +#endif /* VJ_SUPPORT */ +}; + +#if CCP_SUPPORT +struct compstat { + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; +#endif /* CCP_SUPPORT */ + +#endif /* PPP_STATS_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ +}; +#endif /* PPP_IDLETIMELIMIT */ + +/* values for epdisc.class */ +#define EPD_NULL 0 /* null discriminator, no data */ +#define EPD_LOCAL 1 +#define EPD_IP 2 +#define EPD_MAC 3 +#define EPD_MAGIC 4 +#define EPD_PHONENUM 5 + +/* + * Global variables. + */ +#ifdef HAVE_MULTILINK +extern u8_t multilink; /* enable multilink operation */ +extern u8_t doing_multilink; +extern u8_t multilink_master; +extern u8_t bundle_eof; +extern u8_t bundle_terminating; +#endif + +#ifdef MAXOCTETS +extern unsigned int maxoctets; /* Maximum octetes per session (in bytes) */ +extern int maxoctets_dir; /* Direction : + 0 - in+out (default) + 1 - in + 2 - out + 3 - max(in,out) */ +extern int maxoctets_timeout; /* Timeout for check of octets limit */ +#define PPP_OCTETS_DIRECTION_SUM 0 +#define PPP_OCTETS_DIRECTION_IN 1 +#define PPP_OCTETS_DIRECTION_OUT 2 +#define PPP_OCTETS_DIRECTION_MAXOVERAL 3 +/* same as previos, but little different on RADIUS side */ +#define PPP_OCTETS_DIRECTION_MAXSESSION 4 +#endif + +/* Data input may be used by CCP and ECP, remove this entry + * from struct protent to save some flash + */ +#define PPP_DATAINPUT 0 + +/* + * The following struct gives the addresses of procedures to call + * for a particular protocol. + */ +struct protent { + u_short protocol; /* PPP protocol number */ + /* Initialization procedure */ + void (*init) (ppp_pcb *pcb); + /* Process a received packet */ + void (*input) (ppp_pcb *pcb, u_char *pkt, int len); + /* Process a received protocol-reject */ + void (*protrej) (ppp_pcb *pcb); + /* Lower layer has come up */ + void (*lowerup) (ppp_pcb *pcb); + /* Lower layer has gone down */ + void (*lowerdown) (ppp_pcb *pcb); + /* Open the protocol */ + void (*open) (ppp_pcb *pcb); + /* Close the protocol */ + void (*close) (ppp_pcb *pcb, const char *reason); +#if PRINTPKT_SUPPORT + /* Print a packet in readable form */ + int (*printpkt) (const u_char *pkt, int len, + void (*printer) (void *, const char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + /* Process a received data packet */ + void (*datainput) (ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + const char *name; /* Text name of protocol */ + const char *data_name; /* Text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + option_t *options; /* List of command-line options */ + /* Check requested options, assign defaults */ + void (*check_options) (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + /* Configure interface for demand-dial */ + int (*demand_conf) (int unit); + /* Say whether to bring up link for this pkt */ + int (*active_pkt) (u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ +}; + +/* Table of pointers to supported protocols */ +extern const struct protent* const protocols[]; + + +/* Values for auth_pending, auth_done */ +#if PAP_SUPPORT +#define PAP_WITHPEER 0x1 +#define PAP_PEER 0x2 +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#define CHAP_WITHPEER 0x4 +#define CHAP_PEER 0x8 +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#define EAP_WITHPEER 0x10 +#define EAP_PEER 0x20 +#endif /* EAP_SUPPORT */ + +/* Values for auth_done only */ +#if CHAP_SUPPORT +#define CHAP_MD5_WITHPEER 0x40 +#define CHAP_MD5_PEER 0x80 +#if MSCHAP_SUPPORT +#define CHAP_MS_SHIFT 8 /* LSB position for MS auths */ +#define CHAP_MS_WITHPEER 0x100 +#define CHAP_MS_PEER 0x200 +#define CHAP_MS2_WITHPEER 0x400 +#define CHAP_MS2_PEER 0x800 +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ + +/* Supported CHAP protocols */ +#if CHAP_SUPPORT + +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MICROSOFT_V2 | MDTYPE_MICROSOFT | MDTYPE_MD5) +#else /* MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MD5) +#endif /* MSCHAP_SUPPORT */ + +#else /* CHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_NONE) +#endif /* CHAP_SUPPORT */ + +#if PPP_STATS_SUPPORT +/* + * PPP statistics structure + */ +struct pppd_stats { + unsigned int bytes_in; + unsigned int bytes_out; + unsigned int pkts_in; + unsigned int pkts_out; +}; +#endif /* PPP_STATS_SUPPORT */ + + +/* + * PPP private functions + */ + + +/* + * Functions called from lwIP core. + */ + +/* initialize the PPP subsystem */ +int ppp_init(void); + +/* + * Functions called from PPP link protocols. + */ + +/* Create a new PPP control block */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb); + +/* Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb); + +/* Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb); + +/* function called to process input packet */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb); + + +/* + * Functions called by PPP protocols. + */ + +/* function called by all PPP subsystems to send packets */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p); + +/* functions called by auth.c link_terminated() */ +void ppp_link_terminated(ppp_pcb *pcb); + +void new_phase(ppp_pcb *pcb, int p); + +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp); +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp); + +#if PPP_IPV4_SUPPORT +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask); +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr); +#if 0 /* UNUSED - PROXY ARP */ +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr); +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr); +#endif /* UNUSED - PROXY ARP */ +#if LWIP_DNS +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +#endif /* LWIP_DNS */ +#if VJ_SUPPORT +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid); +#endif /* VJ_SUPPORT */ +int sifup(ppp_pcb *pcb); +int sifdown (ppp_pcb *pcb); +u32_t get_mask(u32_t addr); +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int sif6up(ppp_pcb *pcb); +int sif6down (ppp_pcb *pcb); +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode); +#endif /* DEMAND_SUPPORt */ + +void netif_set_mtu(ppp_pcb *pcb, int mtu); +int netif_get_mtu(ppp_pcb *pcb); + +#if CCP_SUPPORT +#if 0 /* unused */ +int ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit); +#endif /* unused */ +void ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method); +void ccp_reset_comp(ppp_pcb *pcb); +void ccp_reset_decomp(ppp_pcb *pcb); +#if 0 /* unused */ +int ccp_fatal_error(ppp_pcb *pcb); +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip); +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +int get_loop_output(void); +#endif /* DEMAND_SUPPORT */ + +/* Optional protocol names list, to make our messages a little more informative. */ +#if PPP_PROTOCOLNAME +const char * protocol_name(int proto); +#endif /* PPP_PROTOCOLNAME */ + +/* Optional stats support, to get some statistics on the PPP interface */ +#if PPP_STATS_SUPPORT +void print_link_stats(void); /* Print stats, if available */ +void reset_link_stats(int u); /* Reset (init) stats when link goes up */ +void update_link_stats(int u); /* Get stats at link termination */ +#endif /* PPP_STATS_SUPPORT */ + + + +/* + * Inline versions of get/put char/short/long. + * Pointer is advanced; we assume that both arguments + * are lvalues and will already be in registers. + * cp MUST be u_char *. + */ +#define GETCHAR(c, cp) { \ + (c) = *(cp)++; \ +} +#define PUTCHAR(c, cp) { \ + *(cp)++ = (u_char) (c); \ +} +#define GETSHORT(s, cp) { \ + (s) = *(cp)++ << 8; \ + (s) |= *(cp)++; \ +} +#define PUTSHORT(s, cp) { \ + *(cp)++ = (u_char) ((s) >> 8); \ + *(cp)++ = (u_char) (s); \ +} +#define GETLONG(l, cp) { \ + (l) = *(cp)++ << 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; \ +} +#define PUTLONG(l, cp) { \ + *(cp)++ = (u_char) ((l) >> 24); \ + *(cp)++ = (u_char) ((l) >> 16); \ + *(cp)++ = (u_char) ((l) >> 8); \ + *(cp)++ = (u_char) (l); \ +} + +#define INCPTR(n, cp) ((cp) += (n)) +#define DECPTR(n, cp) ((cp) -= (n)) + +/* + * System dependent definitions for user-level 4.3BSD UNIX implementation. + */ +#define TIMEOUT(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t)*1000, (f), (a)); } while(0) +#define TIMEOUTMS(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t), (f), (a)); } while(0) +#define UNTIMEOUT(f, a) sys_untimeout((f), (a)) + +#define BZERO(s, n) memset(s, 0, n) +#define BCMP(s1, s2, l) memcmp(s1, s2, l) + +#define PRINTMSG(m, l) { ppp_info("Remote message: %0.*v", l, m); } + +/* + * MAKEHEADER - Add Header fields to a packet. + */ +#define MAKEHEADER(p, t) { \ + PUTCHAR(PPP_ALLSTATIONS, p); \ + PUTCHAR(PPP_UI, p); \ + PUTSHORT(t, p); } + +/* Procedures exported from auth.c */ +void link_required(ppp_pcb *pcb); /* we are starting to use the link */ +void link_terminated(ppp_pcb *pcb); /* we are finished with the link */ +void link_down(ppp_pcb *pcb); /* the LCP layer has left the Opened state */ +void upper_layers_down(ppp_pcb *pcb); /* take all NCPs down */ +void link_established(ppp_pcb *pcb); /* the link is up; authenticate now */ +void start_networks(ppp_pcb *pcb); /* start all the network control protos */ +void continue_networks(ppp_pcb *pcb); /* start network [ip, etc] control protos */ +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen); + /* check the user name and passwd against configuration */ +void auth_peer_fail(ppp_pcb *pcb, int protocol); + /* peer failed to authenticate itself */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen); + /* peer successfully authenticated itself */ +#endif /* PPP_SERVER */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol); + /* we failed to authenticate ourselves */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor); + /* we successfully authenticated ourselves */ +#endif /* PPP_AUTH_SUPPORT */ +void np_up(ppp_pcb *pcb, int proto); /* a network protocol has come up */ +void np_down(ppp_pcb *pcb, int proto); /* a network protocol has gone down */ +void np_finished(ppp_pcb *pcb, int proto); /* a network protocol no longer needs link */ +#if PPP_AUTH_SUPPORT +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server); + /* get "secret" for chap */ +#endif /* PPP_AUTH_SUPPORT */ + +/* Procedures exported from ipcp.c */ +/* int parse_dotted_ip (char *, u32_t *); */ + +/* Procedures exported from demand.c */ +#if DEMAND_SUPPORT +void demand_conf (void); /* config interface(s) for demand-dial */ +void demand_block (void); /* set all NPs to queue up packets */ +void demand_unblock (void); /* set all NPs to pass packets */ +void demand_discard (void); /* set all NPs to discard packets */ +void demand_rexmit (int, u32_t); /* retransmit saved frames for an NP*/ +int loop_chars (unsigned char *, int); /* process chars from loopback */ +int loop_frame (unsigned char *, int); /* should we bring link up? */ +#endif /* DEMAND_SUPPORT */ + +/* Procedures exported from multilink.c */ +#ifdef HAVE_MULTILINK +void mp_check_options (void); /* Check multilink-related options */ +int mp_join_bundle (void); /* join our link to an appropriate bundle */ +void mp_exit_bundle (void); /* have disconnected our link from bundle */ +void mp_bundle_terminated (void); +char *epdisc_to_str (struct epdisc *); /* string from endpoint discrim. */ +int str_to_epdisc (struct epdisc *, char *); /* endpt disc. from str */ +#else +#define mp_bundle_terminated() /* nothing */ +#define mp_exit_bundle() /* nothing */ +#define doing_multilink 0 +#define multilink_master 0 +#endif + +/* Procedures exported from utils.c. */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg); /* Format a string for output */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...); /* sprintf++ */ +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args); /* vsprintf++ */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len); /* safe strcpy */ +size_t ppp_strlcat(char *dest, const char *src, size_t len); /* safe strncpy */ +void ppp_dbglog(const char *fmt, ...); /* log a debug message */ +void ppp_info(const char *fmt, ...); /* log an informational message */ +void ppp_notice(const char *fmt, ...); /* log a notice-level message */ +void ppp_warn(const char *fmt, ...); /* log a warning message */ +void ppp_error(const char *fmt, ...); /* log an error message */ +void ppp_fatal(const char *fmt, ...); /* log an error message and die(1) */ +#if PRINTPKT_SUPPORT +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len); + /* dump packet to debug log if interesting */ +#endif /* PRINTPKT_SUPPORT */ + +/* + * Number of necessary timers analysis. + * + * PPP use at least one timer per each of its protocol, but not all protocols are + * active at the same time, thus the number of necessary timeouts is actually + * lower than enabled protocols. Here is the actual necessary timeouts based + * on code analysis. + * + * Note that many features analysed here are not working at all and are only + * there for a comprehensive analysis of necessary timers in order to prevent + * having to redo that each time we add a feature. + * + * Timer list + * + * | holdoff timeout + * | low level protocol timeout (PPPoE or PPPoL2P) + * | LCP delayed UP + * | LCP retransmit (FSM) + * | LCP Echo timer + * .| PAP or CHAP or EAP authentication + * . | ECP retransmit (FSM) + * . | CCP retransmit (FSM) when MPPE is enabled + * . | CCP retransmit (FSM) when MPPE is NOT enabled + * . | IPCP retransmit (FSM) + * . .| IP6CP retransmit (FSM) + * . . | Idle time limit + * . . | Max connect time + * . . | Max octets + * . . | CCP RACK timeout + * . . . + * PPP_PHASE_DEAD + * PPP_PHASE_HOLDOFF + * | . . . + * PPP_PHASE_INITIALIZE + * | . . . + * PPP_PHASE_ESTABLISH + * | . . . + * |. . . + * | . . + * PPP_PHASE_AUTHENTICATE + * | . . + * || . . + * PPP_PHASE_NETWORK + * | || . . + * | ||| . + * PPP_PHASE_RUNNING + * | .||||| + * | . |||| + * PPP_PHASE_TERMINATE + * | . |||| + * PPP_PHASE_NETWORK + * |. . + * PPP_PHASE_ESTABLISH + * PPP_PHASE_DISCONNECT + * PPP_PHASE_DEAD + * + * Alright, PPP basic retransmission and LCP Echo consume one timer. + * 1 + * + * If authentication is enabled one timer is necessary during authentication. + * 1 + PPP_AUTH_SUPPORT + * + * If ECP is enabled one timer is necessary before IPCP and/or IP6CP, one more + * is necessary if CCP is enabled (only with MPPE support but we don't care much + * up to this detail level). + * 1 + ECP_SUPPORT + CCP_SUPPORT + * + * If CCP is enabled it might consume a timer during IPCP or IP6CP, thus + * we might use IPCP, IP6CP and CCP timers simultaneously. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + * + * When entering running phase, IPCP or IP6CP is still running. If idle time limit + * is enabled one more timer is necessary. Same for max connect time and max + * octets features. Furthermore CCP RACK might be used past this point. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT + * + * IPv4 or IPv6 must be enabled, therefore we don't need to take care the authentication + * and the CCP + ECP case, thus reducing overall complexity. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT) + * + * We don't support PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS features + * and adding those defines to ppp_opts.h just for having the value always + * defined to 0 isn't worth it. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + CCP_SUPPORT) + * + * Thus, the following is enough for now. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_SUPPORT */ +#endif /* LWIP_HDR_PPP_IMPL_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h new file mode 100644 index 000000000..6702bec61 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h @@ -0,0 +1,610 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPP_OPTS_H +#define LWIP_PPP_OPTS_H + +#include "lwip/opt.h" + +/** + * PPP_SUPPORT==1: Enable PPP. + */ +#ifndef PPP_SUPPORT +#define PPP_SUPPORT 0 +#endif + +/** + * PPPOE_SUPPORT==1: Enable PPP Over Ethernet + */ +#ifndef PPPOE_SUPPORT +#define PPPOE_SUPPORT 0 +#endif + +/** + * PPPOL2TP_SUPPORT==1: Enable PPP Over L2TP + */ +#ifndef PPPOL2TP_SUPPORT +#define PPPOL2TP_SUPPORT 0 +#endif + +/** + * PPPOL2TP_AUTH_SUPPORT==1: Enable PPP Over L2TP Auth (enable MD5 support) + */ +#ifndef PPPOL2TP_AUTH_SUPPORT +#define PPPOL2TP_AUTH_SUPPORT PPPOL2TP_SUPPORT +#endif + +/** + * PPPOS_SUPPORT==1: Enable PPP Over Serial + */ +#ifndef PPPOS_SUPPORT +#define PPPOS_SUPPORT PPP_SUPPORT +#endif + +/** + * LWIP_PPP_API==1: Enable PPP API (in pppapi.c) + */ +#ifndef LWIP_PPP_API +#define LWIP_PPP_API (PPP_SUPPORT && (NO_SYS == 0)) +#endif + +#if PPP_SUPPORT + +/** + * MEMP_NUM_PPP_PCB: the number of simultaneously active PPP + * connections (requires the PPP_SUPPORT option) + */ +#ifndef MEMP_NUM_PPP_PCB +#define MEMP_NUM_PPP_PCB 1 +#endif + +/** + * PPP_NUM_TIMEOUTS_PER_PCB: the number of sys_timeouts running in parallel per + * ppp_pcb. See the detailed explanation at the end of ppp_impl.h about simultaneous + * timers analysis. + */ +#ifndef PPP_NUM_TIMEOUTS_PER_PCB +#define PPP_NUM_TIMEOUTS_PER_PCB (1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT) +#endif + +/* The number of sys_timeouts required for the PPP module */ +#define PPP_NUM_TIMEOUTS (PPP_SUPPORT * PPP_NUM_TIMEOUTS_PER_PCB * MEMP_NUM_PPP_PCB) + +/** + * MEMP_NUM_PPPOS_INTERFACES: the number of concurrently active PPPoS + * interfaces (only used with PPPOS_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOS_INTERFACES +#define MEMP_NUM_PPPOS_INTERFACES MEMP_NUM_PPP_PCB +#endif + +/** + * MEMP_NUM_PPPOE_INTERFACES: the number of concurrently active PPPoE + * interfaces (only used with PPPOE_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOE_INTERFACES +#define MEMP_NUM_PPPOE_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPPOL2TP_INTERFACES: the number of concurrently active PPPoL2TP + * interfaces (only used with PPPOL2TP_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOL2TP_INTERFACES +#define MEMP_NUM_PPPOL2TP_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPP_API_MSG: Number of concurrent PPP API messages (in pppapi.c) + */ +#ifndef MEMP_NUM_PPP_API_MSG +#define MEMP_NUM_PPP_API_MSG 5 +#endif + +/** + * PPP_DEBUG: Enable debugging for PPP. + */ +#ifndef PPP_DEBUG +#define PPP_DEBUG LWIP_DBG_OFF +#endif + +/** + * PPP_INPROC_IRQ_SAFE==1 call pppos_input() using tcpip_callback(). + * + * Please read the "PPPoS input path" chapter in the PPP documentation about this option. + */ +#ifndef PPP_INPROC_IRQ_SAFE +#define PPP_INPROC_IRQ_SAFE 0 +#endif + +/** + * PRINTPKT_SUPPORT==1: Enable PPP print packet support + * + * Mandatory for debugging, it displays exchanged packet content in debug trace. + */ +#ifndef PRINTPKT_SUPPORT +#define PRINTPKT_SUPPORT 0 +#endif + +/** + * PPP_IPV4_SUPPORT==1: Enable PPP IPv4 support + */ +#ifndef PPP_IPV4_SUPPORT +#define PPP_IPV4_SUPPORT (LWIP_IPV4) +#endif + +/** + * PPP_IPV6_SUPPORT==1: Enable PPP IPv6 support + */ +#ifndef PPP_IPV6_SUPPORT +#define PPP_IPV6_SUPPORT (LWIP_IPV6) +#endif + +/** + * PPP_NOTIFY_PHASE==1: Support PPP notify phase support + * + * PPP notify phase support allows you to set a callback which is + * called on change of the internal PPP state machine. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +#ifndef PPP_NOTIFY_PHASE +#define PPP_NOTIFY_PHASE 0 +#endif + +/** + * pbuf_type PPP is using for LCP, PAP, CHAP, EAP, CCP, IPCP and IP6CP packets. + * + * Memory allocated must be single buffered for PPP to works, it requires pbuf + * that are not going to be chained when allocated. This requires setting + * PBUF_POOL_BUFSIZE to at least 512 bytes, which is quite huge for small systems. + * + * Setting PPP_USE_PBUF_RAM to 1 makes PPP use memory from heap where continuous + * buffers are required, allowing you to use a smaller PBUF_POOL_BUFSIZE. + */ +#ifndef PPP_USE_PBUF_RAM +#define PPP_USE_PBUF_RAM 0 +#endif + +/** + * PPP_FCS_TABLE: Keep a 256*2 byte table to speed up FCS calculation for PPPoS + */ +#ifndef PPP_FCS_TABLE +#define PPP_FCS_TABLE 1 +#endif + +/** + * PAP_SUPPORT==1: Support PAP. + */ +#ifndef PAP_SUPPORT +#define PAP_SUPPORT 0 +#endif + +/** + * CHAP_SUPPORT==1: Support CHAP. + */ +#ifndef CHAP_SUPPORT +#define CHAP_SUPPORT 0 +#endif + +/** + * MSCHAP_SUPPORT==1: Support MSCHAP. + */ +#ifndef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 0 +#endif +#if MSCHAP_SUPPORT +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MSCHAP_SUPPORT */ + +/** + * EAP_SUPPORT==1: Support EAP. + */ +#ifndef EAP_SUPPORT +#define EAP_SUPPORT 0 +#endif + +/** + * CCP_SUPPORT==1: Support CCP. + */ +#ifndef CCP_SUPPORT +#define CCP_SUPPORT 0 +#endif + +/** + * MPPE_SUPPORT==1: Support MPPE. + */ +#ifndef MPPE_SUPPORT +#define MPPE_SUPPORT 0 +#endif +#if MPPE_SUPPORT +/* MPPE requires CCP support */ +#undef CCP_SUPPORT +#define CCP_SUPPORT 1 +/* MPPE requires MSCHAP support */ +#undef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 1 +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MPPE_SUPPORT */ + +/** + * CBCP_SUPPORT==1: Support CBCP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef CBCP_SUPPORT +#define CBCP_SUPPORT 0 +#endif + +/** + * ECP_SUPPORT==1: Support ECP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef ECP_SUPPORT +#define ECP_SUPPORT 0 +#endif + +/** + * DEMAND_SUPPORT==1: Support dial on demand. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef DEMAND_SUPPORT +#define DEMAND_SUPPORT 0 +#endif + +/** + * LQR_SUPPORT==1: Support Link Quality Report. Do nothing except exchanging some LCP packets. + */ +#ifndef LQR_SUPPORT +#define LQR_SUPPORT 0 +#endif + +/** + * PPP_SERVER==1: Enable PPP server support (waiting for incoming PPP session). + * + * Currently only supported for PPPoS. + */ +#ifndef PPP_SERVER +#define PPP_SERVER 0 +#endif + +#if PPP_SERVER +/* + * PPP_OUR_NAME: Our name for authentication purposes + */ +#ifndef PPP_OUR_NAME +#define PPP_OUR_NAME "lwIP" +#endif +#endif /* PPP_SERVER */ + +/** + * VJ_SUPPORT==1: Support VJ header compression. + */ +#ifndef VJ_SUPPORT +#define VJ_SUPPORT 1 +#endif +/* VJ compression is only supported for TCP over IPv4 over PPPoS. */ +#if !PPPOS_SUPPORT || !PPP_IPV4_SUPPORT || !LWIP_TCP +#undef VJ_SUPPORT +#define VJ_SUPPORT 0 +#endif /* !PPPOS_SUPPORT */ + +/** + * PPP_MD5_RANDM==1: Use MD5 for better randomness. + * Enabled by default if CHAP, EAP, or L2TP AUTH support is enabled. + */ +#ifndef PPP_MD5_RANDM +#define PPP_MD5_RANDM (CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT) +#endif + +/** + * PolarSSL embedded library + * + * + * lwIP contains some files fetched from the latest BSD release of + * the PolarSSL project (PolarSSL 0.10.1-bsd) for ciphers and encryption + * methods we need for lwIP PPP support. + * + * The PolarSSL files were cleaned to contain only the necessary struct + * fields and functions needed for lwIP. + * + * The PolarSSL API was not changed at all, so if you are already using + * PolarSSL you can choose to skip the compilation of the included PolarSSL + * library into lwIP. + * + * If you are not using the embedded copy you must include external + * libraries into your arch/cc.h port file. + * + * Beware of the stack requirements which can be a lot larger if you are not + * using our cleaned PolarSSL library. + */ + +/** + * LWIP_USE_EXTERNAL_POLARSSL: Use external PolarSSL library + */ +#ifndef LWIP_USE_EXTERNAL_POLARSSL +#define LWIP_USE_EXTERNAL_POLARSSL 0 +#endif + +/** + * LWIP_USE_EXTERNAL_MBEDTLS: Use external mbed TLS library + */ +#ifndef LWIP_USE_EXTERNAL_MBEDTLS +#define LWIP_USE_EXTERNAL_MBEDTLS 0 +#endif + +/* + * PPP Timeouts + */ + +/** + * FSM_DEFTIMEOUT: Timeout time in seconds + */ +#ifndef FSM_DEFTIMEOUT +#define FSM_DEFTIMEOUT 6 +#endif + +/** + * FSM_DEFMAXTERMREQS: Maximum Terminate-Request transmissions + */ +#ifndef FSM_DEFMAXTERMREQS +#define FSM_DEFMAXTERMREQS 2 +#endif + +/** + * FSM_DEFMAXCONFREQS: Maximum Configure-Request transmissions + */ +#ifndef FSM_DEFMAXCONFREQS +#define FSM_DEFMAXCONFREQS 10 +#endif + +/** + * FSM_DEFMAXNAKLOOPS: Maximum number of nak loops + */ +#ifndef FSM_DEFMAXNAKLOOPS +#define FSM_DEFMAXNAKLOOPS 5 +#endif + +/** + * UPAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef UPAP_DEFTIMEOUT +#define UPAP_DEFTIMEOUT 6 +#endif + +/** + * UPAP_DEFTRANSMITS: Maximum number of auth-reqs to send + */ +#ifndef UPAP_DEFTRANSMITS +#define UPAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * UPAP_DEFREQTIME: Time to wait for auth-req from peer + */ +#ifndef UPAP_DEFREQTIME +#define UPAP_DEFREQTIME 30 +#endif +#endif /* PPP_SERVER */ + +/** + * CHAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef CHAP_DEFTIMEOUT +#define CHAP_DEFTIMEOUT 6 +#endif + +/** + * CHAP_DEFTRANSMITS: max # times to send challenge + */ +#ifndef CHAP_DEFTRANSMITS +#define CHAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * CHAP_DEFRECHALLENGETIME: If this option is > 0, rechallenge the peer every n seconds + */ +#ifndef CHAP_DEFRECHALLENGETIME +#define CHAP_DEFRECHALLENGETIME 0 +#endif +#endif /* PPP_SERVER */ + +/** + * EAP_DEFREQTIME: Time to wait for peer request + */ +#ifndef EAP_DEFREQTIME +#define EAP_DEFREQTIME 6 +#endif + +/** + * EAP_DEFALLOWREQ: max # times to accept requests + */ +#ifndef EAP_DEFALLOWREQ +#define EAP_DEFALLOWREQ 10 +#endif + +#if PPP_SERVER +/** + * EAP_DEFTIMEOUT: Timeout (seconds) for rexmit + */ +#ifndef EAP_DEFTIMEOUT +#define EAP_DEFTIMEOUT 6 +#endif + +/** + * EAP_DEFTRANSMITS: max # times to transmit + */ +#ifndef EAP_DEFTRANSMITS +#define EAP_DEFTRANSMITS 10 +#endif +#endif /* PPP_SERVER */ + +/** + * LCP_DEFLOOPBACKFAIL: Default number of times we receive our magic number from the peer + * before deciding the link is looped-back. + */ +#ifndef LCP_DEFLOOPBACKFAIL +#define LCP_DEFLOOPBACKFAIL 10 +#endif + +/** + * LCP_ECHOINTERVAL: Interval in seconds between keepalive echo requests, 0 to disable. + */ +#ifndef LCP_ECHOINTERVAL +#define LCP_ECHOINTERVAL 0 +#endif + +/** + * LCP_MAXECHOFAILS: Number of unanswered echo requests before failure. + */ +#ifndef LCP_MAXECHOFAILS +#define LCP_MAXECHOFAILS 3 +#endif + +/** + * PPP_MAXIDLEFLAG: Max Xmit idle time (in ms) before resend flag char. + */ +#ifndef PPP_MAXIDLEFLAG +#define PPP_MAXIDLEFLAG 100 +#endif + +/** + * PPP Packet sizes + */ + +/** + * PPP_MRU: Default MRU + */ +#ifndef PPP_MRU +#define PPP_MRU 1500 +#endif + +/** + * PPP_DEFMRU: Default MRU to try + */ +#ifndef PPP_DEFMRU +#define PPP_DEFMRU 1500 +#endif + +/** + * PPP_MAXMRU: Normally limit MRU to this (pppd default = 16384) + */ +#ifndef PPP_MAXMRU +#define PPP_MAXMRU 1500 +#endif + +/** + * PPP_MINMRU: No MRUs below this + */ +#ifndef PPP_MINMRU +#define PPP_MINMRU 128 +#endif + +/** + * PPPOL2TP_DEFMRU: Default MTU and MRU for L2TP + * Default = 1500 - PPPoE(6) - PPP Protocol(2) - IPv4 header(20) - UDP Header(8) + * - L2TP Header(6) - HDLC Header(2) - PPP Protocol(2) - MPPE Header(2) - PPP Protocol(2) + */ +#if PPPOL2TP_SUPPORT +#ifndef PPPOL2TP_DEFMRU +#define PPPOL2TP_DEFMRU 1450 +#endif +#endif /* PPPOL2TP_SUPPORT */ + +/** + * MAXNAMELEN: max length of hostname or name for auth + */ +#ifndef MAXNAMELEN +#define MAXNAMELEN 256 +#endif + +/** + * MAXSECRETLEN: max length of password or secret + */ +#ifndef MAXSECRETLEN +#define MAXSECRETLEN 256 +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * Build triggers for embedded PolarSSL + */ +#if !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS + +/* CHAP, EAP, L2TP AUTH and MD5 Random require MD5 support */ +#if CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM +#define LWIP_INCLUDED_POLARSSL_MD5 1 +#endif /* CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM */ + +#if MSCHAP_SUPPORT + +/* MSCHAP require MD4 support */ +#define LWIP_INCLUDED_POLARSSL_MD4 1 +/* MSCHAP require SHA1 support */ +#define LWIP_INCLUDED_POLARSSL_SHA1 1 +/* MSCHAP require DES support */ +#define LWIP_INCLUDED_POLARSSL_DES 1 + +/* MS-CHAP support is required for MPPE */ +#if MPPE_SUPPORT +/* MPPE require ARC4 support */ +#define LWIP_INCLUDED_POLARSSL_ARC4 1 +#endif /* MPPE_SUPPORT */ + +#endif /* MSCHAP_SUPPORT */ + +#endif /* !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* Default value if unset */ +#ifndef LWIP_INCLUDED_POLARSSL_MD4 +#define LWIP_INCLUDED_POLARSSL_MD4 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD4 */ +#ifndef LWIP_INCLUDED_POLARSSL_MD5 +#define LWIP_INCLUDED_POLARSSL_MD5 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD5 */ +#ifndef LWIP_INCLUDED_POLARSSL_SHA1 +#define LWIP_INCLUDED_POLARSSL_SHA1 0 +#endif /* LWIP_INCLUDED_POLARSSL_SHA1 */ +#ifndef LWIP_INCLUDED_POLARSSL_DES +#define LWIP_INCLUDED_POLARSSL_DES 0 +#endif /* LWIP_INCLUDED_POLARSSL_DES */ +#ifndef LWIP_INCLUDED_POLARSSL_ARC4 +#define LWIP_INCLUDED_POLARSSL_ARC4 0 +#endif /* LWIP_INCLUDED_POLARSSL_ARC4 */ + +#endif /* PPP_SUPPORT */ + +/* Default value if unset */ +#ifndef PPP_NUM_TIMEOUTS +#define PPP_NUM_TIMEOUTS 0 +#endif /* PPP_NUM_TIMEOUTS */ + +#endif /* LWIP_PPP_OPTS_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h new file mode 100644 index 000000000..913d93f74 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h @@ -0,0 +1,137 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPPAPI_H +#define LWIP_PPPAPI_H + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/ppp.h" +#if PPPOS_SUPPORT +#include "netif/ppp/pppos.h" +#endif /* PPPOS_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +struct pppapi_msg_msg { + ppp_pcb *ppp; + union { +#if PPP_NOTIFY_PHASE + struct { + ppp_notify_phase_cb_fn notify_phase_cb; + } setnotifyphasecb; +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT + struct { + struct netif *pppif; + pppos_output_cb_fn output_cb; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } serialcreate; +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT + struct { + struct netif *pppif; + struct netif *ethif; + const char *service_name; + const char *concentrator_name; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } ethernetcreate; +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT + struct { + struct netif *pppif; + struct netif *netif; + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; + u8_t secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } l2tpcreate; +#endif /* PPPOL2TP_SUPPORT */ + struct { + u16_t holdoff; + } connect; + struct { + u8_t nocarrier; + } close; + struct { + u8_t cmd; + void *arg; + } ioctl; + } msg; +}; + +struct pppapi_msg { + struct tcpip_api_call_data call; + struct pppapi_msg_msg msg; +}; + +/* API for application */ +err_t pppapi_set_default(ppp_pcb *pcb); +#if PPP_NOTIFY_PHASE +err_t pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT +ppp_pcb *pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT +ppp_pcb *pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb); +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT +ppp_pcb *pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOL2TP_SUPPORT */ +err_t pppapi_connect(ppp_pcb *pcb, u16_t holdoff); +#if PPP_SERVER +err_t pppapi_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ +err_t pppapi_close(ppp_pcb *pcb, u8_t nocarrier); +err_t pppapi_free(ppp_pcb *pcb); +err_t pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_PPP_API */ + +#endif /* LWIP_PPPAPI_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h new file mode 100644 index 000000000..c0230bbcb --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h @@ -0,0 +1,144 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* This header file is included in all PPP modules needing hashes and/or ciphers */ + +#ifndef PPPCRYPT_H +#define PPPCRYPT_H + +/* + * If included PolarSSL copy is not used, user is expected to include + * external libraries in arch/cc.h (which is included by lwip/arch.h). + */ +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Map hashes and ciphers functions to PolarSSL + */ +#if !LWIP_USE_EXTERNAL_MBEDTLS + +#include "netif/ppp/polarssl/md4.h" +#define lwip_md4_context md4_context +#define lwip_md4_init(context) +#define lwip_md4_starts md4_starts +#define lwip_md4_update md4_update +#define lwip_md4_finish md4_finish +#define lwip_md4_free(context) + +#include "netif/ppp/polarssl/md5.h" +#define lwip_md5_context md5_context +#define lwip_md5_init(context) +#define lwip_md5_starts md5_starts +#define lwip_md5_update md5_update +#define lwip_md5_finish md5_finish +#define lwip_md5_free(context) + +#include "netif/ppp/polarssl/sha1.h" +#define lwip_sha1_context sha1_context +#define lwip_sha1_init(context) +#define lwip_sha1_starts sha1_starts +#define lwip_sha1_update sha1_update +#define lwip_sha1_finish sha1_finish +#define lwip_sha1_free(context) + +#include "netif/ppp/polarssl/des.h" +#define lwip_des_context des_context +#define lwip_des_init(context) +#define lwip_des_setkey_enc des_setkey_enc +#define lwip_des_crypt_ecb des_crypt_ecb +#define lwip_des_free(context) + +#include "netif/ppp/polarssl/arc4.h" +#define lwip_arc4_context arc4_context +#define lwip_arc4_init(context) +#define lwip_arc4_setup arc4_setup +#define lwip_arc4_crypt arc4_crypt +#define lwip_arc4_free(context) + +#endif /* !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* + * Map hashes and ciphers functions to mbed TLS + */ +#if LWIP_USE_EXTERNAL_MBEDTLS + +#define lwip_md4_context mbedtls_md4_context +#define lwip_md4_init mbedtls_md4_init +#define lwip_md4_starts mbedtls_md4_starts +#define lwip_md4_update mbedtls_md4_update +#define lwip_md4_finish mbedtls_md4_finish +#define lwip_md4_free mbedtls_md4_free + +#define lwip_md5_context mbedtls_md5_context +#define lwip_md5_init mbedtls_md5_init +#define lwip_md5_starts mbedtls_md5_starts +#define lwip_md5_update mbedtls_md5_update +#define lwip_md5_finish mbedtls_md5_finish +#define lwip_md5_free mbedtls_md5_free + +#define lwip_sha1_context mbedtls_sha1_context +#define lwip_sha1_init mbedtls_sha1_init +#define lwip_sha1_starts mbedtls_sha1_starts +#define lwip_sha1_update mbedtls_sha1_update +#define lwip_sha1_finish mbedtls_sha1_finish +#define lwip_sha1_free mbedtls_sha1_free + +#define lwip_des_context mbedtls_des_context +#define lwip_des_init mbedtls_des_init +#define lwip_des_setkey_enc mbedtls_des_setkey_enc +#define lwip_des_crypt_ecb mbedtls_des_crypt_ecb +#define lwip_des_free mbedtls_des_free + +#define lwip_arc4_context mbedtls_arc4_context +#define lwip_arc4_init mbedtls_arc4_init +#define lwip_arc4_setup mbedtls_arc4_setup +#define lwip_arc4_crypt(context, buffer, length) mbedtls_arc4_crypt(context, length, buffer, buffer) +#define lwip_arc4_free mbedtls_arc4_free + +#endif /* LWIP_USE_EXTERNAL_MBEDTLS */ + +void pppcrypt_56_to_64_bit_key(u_char *key, u_char *des_key); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPCRYPT_H */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h new file mode 100644 index 000000000..36ee4f9bf --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h @@ -0,0 +1,88 @@ +/***************************************************************************** +* pppdebug.h - System debugging utilities. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1998 Global Election Systems Inc. +* portions Copyright (c) 2001 by Cognizant Pty Ltd. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY (please don't use tabs!) +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-07-29 Guy Lancaster , Global Election Systems Inc. +* Original. +* +***************************************************************************** +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPDEBUG_H +#define PPPDEBUG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Trace levels. */ +#define LOG_CRITICAL (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_ERR (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_NOTICE (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_WARNING (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_INFO (PPP_DEBUG) +#define LOG_DETAIL (PPP_DEBUG) +#define LOG_DEBUG (PPP_DEBUG) + +#if PPP_DEBUG + +#define MAINDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define SYSDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define FSMDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define LCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPV6CPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define UPAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define CHAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define PPPDEBUG(a, b) LWIP_DEBUGF(a, b) + +#else /* PPP_DEBUG */ + +#define MAINDEBUG(a) +#define SYSDEBUG(a) +#define FSMDEBUG(a) +#define LCPDEBUG(a) +#define IPCPDEBUG(a) +#define IPV6CPDEBUG(a) +#define UPAPDEBUG(a) +#define CHAPDEBUG(a) +#define PPPDEBUG(a, b) + +#endif /* PPP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPDEBUG_H */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h new file mode 100644 index 000000000..08ab7ab54 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h @@ -0,0 +1,187 @@ +/***************************************************************************** +* pppoe.h - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_OE_H +#define PPP_OE_H + +#include "ppp.h" +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoehdr { + PACK_STRUCT_FLD_8(u8_t vertype); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t session); + PACK_STRUCT_FIELD(u16_t plen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoetag { + PACK_STRUCT_FIELD(u16_t tag); + PACK_STRUCT_FIELD(u16_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +#define PPPOE_STATE_INITIAL 0 +#define PPPOE_STATE_PADI_SENT 1 +#define PPPOE_STATE_PADR_SENT 2 +#define PPPOE_STATE_SESSION 3 +/* passive */ +#define PPPOE_STATE_PADO_SENT 1 + +#define PPPOE_HEADERLEN sizeof(struct pppoehdr) +#define PPPOE_VERTYPE 0x11 /* VER=1, TYPE = 1 */ + +#define PPPOE_TAG_EOL 0x0000 /* end of list */ +#define PPPOE_TAG_SNAME 0x0101 /* service name */ +#define PPPOE_TAG_ACNAME 0x0102 /* access concentrator name */ +#define PPPOE_TAG_HUNIQUE 0x0103 /* host unique */ +#define PPPOE_TAG_ACCOOKIE 0x0104 /* AC cookie */ +#define PPPOE_TAG_VENDOR 0x0105 /* vendor specific */ +#define PPPOE_TAG_RELAYSID 0x0110 /* relay session id */ +#define PPPOE_TAG_SNAME_ERR 0x0201 /* service name error */ +#define PPPOE_TAG_ACSYS_ERR 0x0202 /* AC system error */ +#define PPPOE_TAG_GENERIC_ERR 0x0203 /* gerneric error */ + +#define PPPOE_CODE_PADI 0x09 /* Active Discovery Initiation */ +#define PPPOE_CODE_PADO 0x07 /* Active Discovery Offer */ +#define PPPOE_CODE_PADR 0x19 /* Active Discovery Request */ +#define PPPOE_CODE_PADS 0x65 /* Active Discovery Session confirmation */ +#define PPPOE_CODE_PADT 0xA7 /* Active Discovery Terminate */ + +#ifndef PPPOE_MAX_AC_COOKIE_LEN +#define PPPOE_MAX_AC_COOKIE_LEN 64 +#endif + +struct pppoe_softc { + struct pppoe_softc *next; + struct netif *sc_ethif; /* ethernet interface we are using */ + ppp_pcb *pcb; /* PPP PCB */ + + struct eth_addr sc_dest; /* hardware address of concentrator */ + u16_t sc_session; /* PPPoE session id */ + u8_t sc_state; /* discovery phase or session connected */ + +#ifdef PPPOE_TODO + u8_t *sc_service_name; /* if != NULL: requested name of service */ + u8_t *sc_concentrator_name; /* if != NULL: requested concentrator id */ +#endif /* PPPOE_TODO */ + u8_t sc_ac_cookie[PPPOE_MAX_AC_COOKIE_LEN]; /* content of AC cookie we must echo back */ + u8_t sc_ac_cookie_len; /* length of cookie data */ +#ifdef PPPOE_SERVER + u8_t *sc_hunique; /* content of host unique we must echo back */ + u8_t sc_hunique_len; /* length of host unique */ +#endif + u8_t sc_padi_retried; /* number of PADI retries already done */ + u8_t sc_padr_retried; /* number of PADR retries already done */ +}; + + +#define pppoe_init() /* compatibility define, no initialization needed */ + +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +void pppoe_disc_input(struct netif *netif, struct pbuf *p); +void pppoe_data_input(struct netif *netif, struct pbuf *p); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_OE_H */ + +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h new file mode 100644 index 000000000..1221ca15b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h @@ -0,0 +1,209 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOL2TP_H +#define PPPOL2TP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Timeout */ +#define PPPOL2TP_CONTROL_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOL2TP_SLOW_RETRY (60*1000) /* persistent retry interval */ + +#define PPPOL2TP_MAXSCCRQ 4 /* retry SCCRQ four times (quickly) */ +#define PPPOL2TP_MAXICRQ 4 /* retry IRCQ four times */ +#define PPPOL2TP_MAXICCN 4 /* retry ICCN four times */ + +/* L2TP header flags */ +#define PPPOL2TP_HEADERFLAG_CONTROL 0x8000 +#define PPPOL2TP_HEADERFLAG_LENGTH 0x4000 +#define PPPOL2TP_HEADERFLAG_SEQUENCE 0x0800 +#define PPPOL2TP_HEADERFLAG_OFFSET 0x0200 +#define PPPOL2TP_HEADERFLAG_PRIORITY 0x0100 +#define PPPOL2TP_HEADERFLAG_VERSION 0x0002 + +/* Mandatory bits for control: Control, Length, Sequence, Version 2 */ +#define PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY (PPPOL2TP_HEADERFLAG_CONTROL|PPPOL2TP_HEADERFLAG_LENGTH|PPPOL2TP_HEADERFLAG_SEQUENCE|PPPOL2TP_HEADERFLAG_VERSION) +/* Forbidden bits for control: Offset, Priority */ +#define PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN (PPPOL2TP_HEADERFLAG_OFFSET|PPPOL2TP_HEADERFLAG_PRIORITY) + +/* Mandatory bits for data: Version 2 */ +#define PPPOL2TP_HEADERFLAG_DATA_MANDATORY (PPPOL2TP_HEADERFLAG_VERSION) + +/* AVP (Attribute Value Pair) header */ +#define PPPOL2TP_AVPHEADERFLAG_MANDATORY 0x8000 +#define PPPOL2TP_AVPHEADERFLAG_HIDDEN 0x4000 +#define PPPOL2TP_AVPHEADERFLAG_LENGTHMASK 0x03ff + +/* -- AVP - Message type */ +#define PPPOL2TP_AVPTYPE_MESSAGE 0 /* Message type */ + +/* Control Connection Management */ +#define PPPOL2TP_MESSAGETYPE_SCCRQ 1 /* Start Control Connection Request */ +#define PPPOL2TP_MESSAGETYPE_SCCRP 2 /* Start Control Connection Reply */ +#define PPPOL2TP_MESSAGETYPE_SCCCN 3 /* Start Control Connection Connected */ +#define PPPOL2TP_MESSAGETYPE_STOPCCN 4 /* Stop Control Connection Notification */ +#define PPPOL2TP_MESSAGETYPE_HELLO 6 /* Hello */ +/* Call Management */ +#define PPPOL2TP_MESSAGETYPE_OCRQ 7 /* Outgoing Call Request */ +#define PPPOL2TP_MESSAGETYPE_OCRP 8 /* Outgoing Call Reply */ +#define PPPOL2TP_MESSAGETYPE_OCCN 9 /* Outgoing Call Connected */ +#define PPPOL2TP_MESSAGETYPE_ICRQ 10 /* Incoming Call Request */ +#define PPPOL2TP_MESSAGETYPE_ICRP 11 /* Incoming Call Reply */ +#define PPPOL2TP_MESSAGETYPE_ICCN 12 /* Incoming Call Connected */ +#define PPPOL2TP_MESSAGETYPE_CDN 14 /* Call Disconnect Notify */ +/* Error reporting */ +#define PPPOL2TP_MESSAGETYPE_WEN 15 /* WAN Error Notify */ +/* PPP Session Control */ +#define PPPOL2TP_MESSAGETYPE_SLI 16 /* Set Link Info */ + +/* -- AVP - Result code */ +#define PPPOL2TP_AVPTYPE_RESULTCODE 1 /* Result code */ +#define PPPOL2TP_RESULTCODE 1 /* General request to clear control connection */ + +/* -- AVP - Protocol version (!= L2TP Header version) */ +#define PPPOL2TP_AVPTYPE_VERSION 2 +#define PPPOL2TP_VERSION 0x0100 /* L2TP Protocol version 1, revision 0 */ + +/* -- AVP - Framing capabilities */ +#define PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES 3 /* Bearer capabilities */ +#define PPPOL2TP_FRAMINGCAPABILITIES 0x00000003 /* Async + Sync framing */ + +/* -- AVP - Bearer capabilities */ +#define PPPOL2TP_AVPTYPE_BEARERCAPABILITIES 4 /* Bearer capabilities */ +#define PPPOL2TP_BEARERCAPABILITIES 0x00000003 /* Analog + Digital Access */ + +/* -- AVP - Tie breaker */ +#define PPPOL2TP_AVPTYPE_TIEBREAKER 5 + +/* -- AVP - Host name */ +#define PPPOL2TP_AVPTYPE_HOSTNAME 7 /* Host name */ +#define PPPOL2TP_HOSTNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Vendor name */ +#define PPPOL2TP_AVPTYPE_VENDORNAME 8 /* Vendor name */ +#define PPPOL2TP_VENDORNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Assign tunnel ID */ +#define PPPOL2TP_AVPTYPE_TUNNELID 9 /* Assign Tunnel ID */ + +/* -- AVP - Receive window size */ +#define PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE 10 /* Receive window size */ +#define PPPOL2TP_RECEIVEWINDOWSIZE 8 /* FIXME: make it configurable */ + +/* -- AVP - Challenge */ +#define PPPOL2TP_AVPTYPE_CHALLENGE 11 /* Challenge */ + +/* -- AVP - Cause code */ +#define PPPOL2TP_AVPTYPE_CAUSECODE 12 /* Cause code*/ + +/* -- AVP - Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE 13 /* Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE 16 + +/* -- AVP - Assign session ID */ +#define PPPOL2TP_AVPTYPE_SESSIONID 14 /* Assign Session ID */ + +/* -- AVP - Call serial number */ +#define PPPOL2TP_AVPTYPE_CALLSERIALNUMBER 15 /* Call Serial Number */ + +/* -- AVP - Framing type */ +#define PPPOL2TP_AVPTYPE_FRAMINGTYPE 19 /* Framing Type */ +#define PPPOL2TP_FRAMINGTYPE 0x00000001 /* Sync framing */ + +/* -- AVP - TX Connect Speed */ +#define PPPOL2TP_AVPTYPE_TXCONNECTSPEED 24 /* TX Connect Speed */ +#define PPPOL2TP_TXCONNECTSPEED 100000000 /* Connect speed: 100 Mbits/s */ + +/* L2TP Session state */ +#define PPPOL2TP_STATE_INITIAL 0 +#define PPPOL2TP_STATE_SCCRQ_SENT 1 +#define PPPOL2TP_STATE_ICRQ_SENT 2 +#define PPPOL2TP_STATE_ICCN_SENT 3 +#define PPPOL2TP_STATE_DATA 4 + +#define PPPOL2TP_OUTPUT_DATA_HEADER_LEN 6 /* Our data header len */ + +/* + * PPPoL2TP interface control block. + */ +typedef struct pppol2tp_pcb_s pppol2tp_pcb; +struct pppol2tp_pcb_s { + ppp_pcb *ppp; /* PPP PCB */ + u8_t phase; /* L2TP phase */ + struct udp_pcb *udp; /* UDP L2TP Socket */ + struct netif *netif; /* Output interface, used as a default route */ + ip_addr_t remote_ip; /* LNS IP Address */ + u16_t remote_port; /* LNS port */ +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; /* Secret string */ + u8_t secret_len; /* Secret string length */ + u8_t secret_rv[16]; /* Random vector */ + u8_t challenge_hash[16]; /* Challenge response */ + u8_t send_challenge; /* Boolean whether the next sent packet should contains a challenge response */ +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + u16_t tunnel_port; /* Tunnel port */ + u16_t our_ns; /* NS to peer */ + u16_t peer_nr; /* NR from peer */ + u16_t peer_ns; /* Expected NS from peer */ + u16_t source_tunnel_id; /* Tunnel ID assigned by peer */ + u16_t remote_tunnel_id; /* Tunnel ID assigned to peer */ + u16_t source_session_id; /* Session ID assigned by peer */ + u16_t remote_session_id; /* Session ID assigned to peer */ + + u8_t sccrq_retried; /* number of SCCRQ retries already done */ + u8_t icrq_retried; /* number of ICRQ retries already done */ + u8_t iccn_retried; /* number of ICCN retries already done */ +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOL2TP_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h new file mode 100644 index 000000000..380a965ce --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h @@ -0,0 +1,126 @@ +/** + * @file + * Network Point to Point Protocol over Serial header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOS_H +#define PPPOS_H + +#include "lwip/sys.h" + +#include "ppp.h" +#include "vj.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* PPP packet parser states. Current state indicates operation yet to be + * completed. */ +enum { + PDIDLE = 0, /* Idle state - waiting. */ + PDSTART, /* Process start flag. */ + PDADDRESS, /* Process address field. */ + PDCONTROL, /* Process control field. */ + PDPROTOCOL1, /* Process protocol field 1. */ + PDPROTOCOL2, /* Process protocol field 2. */ + PDDATA /* Process data byte. */ +}; + +/* PPPoS serial output callback function prototype */ +typedef u32_t (*pppos_output_cb_fn)(ppp_pcb *pcb, u8_t *data, u32_t len, void *ctx); + +/* + * Extended asyncmap - allows any character to be escaped. + */ +typedef u8_t ext_accm[32]; + +/* + * PPPoS interface control block. + */ +typedef struct pppos_pcb_s pppos_pcb; +struct pppos_pcb_s { + /* -- below are data that will NOT be cleared between two sessions */ + ppp_pcb *ppp; /* PPP PCB */ + pppos_output_cb_fn output_cb; /* PPP serial output callback */ + + /* -- below are data that will be cleared between two sessions + * + * last_xmit must be the first member of cleared members, because it is + * used to know which part must not be cleared. + */ + u32_t last_xmit; /* Time of last transmission. */ + ext_accm out_accm; /* Async-Ctl-Char-Map for output. */ + + /* flags */ + unsigned int open :1; /* Set if PPPoS is open */ + unsigned int pcomp :1; /* Does peer accept protocol compression? */ + unsigned int accomp :1; /* Does peer accept addr/ctl compression? */ + + /* PPPoS rx */ + ext_accm in_accm; /* Async-Ctl-Char-Map for input. */ + struct pbuf *in_head, *in_tail; /* The input packet. */ + u16_t in_protocol; /* The input protocol code. */ + u16_t in_fcs; /* Input Frame Check Sequence value. */ + u8_t in_state; /* The input process state. */ + u8_t in_escaped; /* Escape next character. */ +}; + +/* Create a new PPPoS session. */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/* Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. */ +err_t pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/* PPP over Serial: this is the input function to be called for received data. */ +void pppos_input(ppp_pcb *ppp, u8_t* data, int len); + + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +err_t pppos_input_sys(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOS_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h new file mode 100644 index 000000000..540d98149 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h @@ -0,0 +1,131 @@ +/* + * upap.h - User/Password Authentication Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: upap.h,v 1.8 2002/12/04 23:03:33 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef UPAP_H +#define UPAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define UPAP_HEADERLEN 4 + + +/* + * UPAP codes. + */ +#define UPAP_AUTHREQ 1 /* Authenticate-Request */ +#define UPAP_AUTHACK 2 /* Authenticate-Ack */ +#define UPAP_AUTHNAK 3 /* Authenticate-Nak */ + + +/* + * Client states. + */ +#define UPAPCS_INITIAL 0 /* Connection down */ +#define UPAPCS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPCS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPCS_AUTHREQ 3 /* We've sent an Authenticate-Request */ +#define UPAPCS_OPEN 4 /* We've received an Ack */ +#define UPAPCS_BADAUTH 5 /* We've received a Nak */ + +/* + * Server states. + */ +#define UPAPSS_INITIAL 0 /* Connection down */ +#define UPAPSS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPSS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPSS_LISTEN 3 /* Listening for an Authenticate */ +#define UPAPSS_OPEN 4 /* We've sent an Ack */ +#define UPAPSS_BADAUTH 5 /* We've sent a Nak */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define UPAP_DEFTIMEOUT 3 /* Timeout (seconds) for retransmitting req */ +#define UPAP_DEFREQTIME 30 /* Time to wait for auth-req from peer */ +#endif /* moved to ppp_opts.h */ + +/* + * Each interface is described by upap structure. + */ +#if PAP_SUPPORT +typedef struct upap_state { + const char *us_user; /* User */ + u8_t us_userlen; /* User length */ + const char *us_passwd; /* Password */ + u8_t us_passwdlen; /* Password length */ + u8_t us_clientstate; /* Client state */ +#if PPP_SERVER + u8_t us_serverstate; /* Server state */ +#endif /* PPP_SERVER */ + u8_t us_id; /* Current id */ + u8_t us_transmits; /* Number of auth-reqs sent */ +} upap_state; +#endif /* PAP_SUPPORT */ + + +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password); +#if PPP_SERVER +void upap_authpeer(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +extern const struct protent pap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* UPAP_H */ +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h new file mode 100644 index 000000000..77d9976c5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h @@ -0,0 +1,169 @@ +/* + * Definitions for tcp compression routines. + * + * $Id: vj.h,v 1.7 2010/02/22 17:52:09 goldsimon Exp $ + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef VJ_H +#define VJ_H + +#include "lwip/ip.h" +#include "lwip/priv/tcp_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_SLOTS 16 /* must be > 2 and < 256 */ +#define MAX_HDR 128 + +/* + * Compressed packet format: + * + * The first octet contains the packet type (top 3 bits), TCP + * 'push' bit, and flags that indicate which of the 4 TCP sequence + * numbers have changed (bottom 5 bits). The next octet is a + * conversation number that associates a saved IP/TCP header with + * the compressed packet. The next two octets are the TCP checksum + * from the original datagram. The next 0 to 15 octets are + * sequence number changes, one change per bit set in the header + * (there may be no changes and there are two special cases where + * the receiver implicitly knows what changed -- see below). + * + * There are 5 numbers which can change (they are always inserted + * in the following order): TCP urgent pointer, window, + * acknowlegement, sequence number and IP ID. (The urgent pointer + * is different from the others in that its value is sent, not the + * change in value.) Since typical use of SLIP links is biased + * toward small packets (see comments on MTU/MSS below), changes + * use a variable length coding with one octet for numbers in the + * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the + * range 256 - 65535 or 0. (If the change in sequence number or + * ack is more than 65535, an uncompressed packet is sent.) + */ + +/* + * Packet types (must not conflict with IP protocol version) + * + * The top nibble of the first octet is the packet type. There are + * three possible types: IP (not proto TCP or tcp with one of the + * control flags set); uncompressed TCP (a normal IP/TCP packet but + * with the 8-bit protocol field replaced by an 8-bit connection id -- + * this type of packet syncs the sender & receiver); and compressed + * TCP (described above). + * + * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and + * is logically part of the 4-bit "changes" field that follows. Top + * three bits are actual packet type. For backward compatibility + * and in the interest of conserving bits, numbers are chosen so the + * IP protocol version number (4) which normally appears in this nibble + * means "IP packet". + */ + +/* packet types */ +#define TYPE_IP 0x40 +#define TYPE_UNCOMPRESSED_TCP 0x70 +#define TYPE_COMPRESSED_TCP 0x80 +#define TYPE_ERROR 0x00 + +/* Bits in first octet of compressed packet */ +#define NEW_C 0x40 /* flag bits for what changed in a packet */ +#define NEW_I 0x20 +#define NEW_S 0x08 +#define NEW_A 0x04 +#define NEW_W 0x02 +#define NEW_U 0x01 + +/* reserved, special-case values of above */ +#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */ +#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */ +#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U) + +#define TCP_PUSH_BIT 0x10 + + +/* + * "state" data for each active tcp conversation on the wire. This is + * basically a copy of the entire IP/TCP header from the last packet + * we saw from the conversation together with a small identifier + * the transmit & receive ends of the line use to locate saved header. + */ +struct cstate { + struct cstate *cs_next; /* next most recently used state (xmit only) */ + u16_t cs_hlen; /* size of hdr (receive only) */ + u8_t cs_id; /* connection # associated with this state */ + u8_t cs_filler; + union { + char csu_hdr[MAX_HDR]; + struct ip_hdr csu_ip; /* ip/tcp hdr from most recent packet */ + } vjcs_u; +}; +#define cs_ip vjcs_u.csu_ip +#define cs_hdr vjcs_u.csu_hdr + + +struct vjstat { + u32_t vjs_packets; /* outbound packets */ + u32_t vjs_compressed; /* outbound compressed packets */ + u32_t vjs_searches; /* searches for connection state */ + u32_t vjs_misses; /* times couldn't find conn. state */ + u32_t vjs_uncompressedin; /* inbound uncompressed packets */ + u32_t vjs_compressedin; /* inbound compressed packets */ + u32_t vjs_errorin; /* inbound unknown type packets */ + u32_t vjs_tossed; /* inbound packets tossed because of error */ +}; + +/* + * all the state data for one serial line (we need one of these per line). + */ +struct vjcompress { + struct cstate *last_cs; /* most recently used tstate */ + u8_t last_recv; /* last rcvd conn. id */ + u8_t last_xmit; /* last sent conn. id */ + u16_t flags; + u8_t maxSlotIndex; + u8_t compressSlot; /* Flag indicating OK to compress slot ID. */ +#if LINK_STATS + struct vjstat stats; +#endif + struct cstate tstate[MAX_SLOTS]; /* xmit connection states */ + struct cstate rstate[MAX_SLOTS]; /* receive connection states */ +}; + +/* flag values */ +#define VJF_TOSS 1U /* tossing rcvd frames because of input err */ + +extern void vj_compress_init (struct vjcompress *comp); +extern u8_t vj_compress_tcp (struct vjcompress *comp, struct pbuf **pb); +extern void vj_uncompress_err (struct vjcompress *comp); +extern int vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp); +extern int vj_uncompress_tcp (struct pbuf **nb, struct vjcompress *comp); + +#ifdef __cplusplus +} +#endif + +#endif /* VJ_H */ + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h new file mode 100644 index 000000000..65ba31f83 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h @@ -0,0 +1,87 @@ +/** + * @file + * + * SLIP netif API + */ + +/* + * Copyright (c) 2001, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_SLIPIF_H +#define LWIP_HDR_NETIF_SLIPIF_H + +#include "lwip/opt.h" +#include "lwip/netif.h" + +/** Set this to 1 to start a thread that blocks reading on the serial line + * (using sio_read()). + */ +#ifndef SLIP_USE_RX_THREAD +#define SLIP_USE_RX_THREAD !NO_SYS +#endif + +/** Set this to 1 to enable functions to pass in RX bytes from ISR context. + * If enabled, slipif_received_byte[s]() process incoming bytes and put assembled + * packets on a queue, which is fed into lwIP from slipif_poll(). + * If disabled, slipif_poll() polls the serial line (using sio_tryread()). + */ +#ifndef SLIP_RX_FROM_ISR +#define SLIP_RX_FROM_ISR 0 +#endif + +/** Set this to 1 (default for SLIP_RX_FROM_ISR) to queue incoming packets + * received by slipif_received_byte[s]() as long as PBUF_POOL pbufs are available. + * If disabled, packets will be dropped if more than one packet is received. + */ +#ifndef SLIP_RX_QUEUE +#define SLIP_RX_QUEUE SLIP_RX_FROM_ISR +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +err_t slipif_init(struct netif * netif); +void slipif_poll(struct netif *netif); +#if SLIP_RX_FROM_ISR +void slipif_process_rxqueue(struct netif *netif); +void slipif_received_byte(struct netif *netif, u8_t data); +void slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len); +#endif /* SLIP_RX_FROM_ISR */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_SLIPIF_H */ + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h new file mode 100644 index 000000000..2a801b4c4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h @@ -0,0 +1,81 @@ +/** + * @file + * + * A netif implementing the ZigBee Eencapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_ZEPIF_H +#define LWIP_HDR_ZEPIF_H + +#include "lwip/opt.h" +#include "netif/lowpan6.h" + +#if LWIP_IPV6 && LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZEPIF_DEFAULT_UDP_PORT 17754 + +/** Pass this struct as 'state' to netif_add to control the behaviour + * of this netif. If NULL is passed, default behaviour is chosen */ +struct zepif_init { + /** The UDP port used to ZEP frames from (0 = default) */ + u16_t zep_src_udp_port; + /** The UDP port used to ZEP frames to (0 = default) */ + u16_t zep_dst_udp_port; + /** The IP address to sed ZEP frames from (NULL = ANY) */ + const ip_addr_t *zep_src_ip_addr; + /** The IP address to sed ZEP frames to (NULL = BROADCAST) */ + const ip_addr_t *zep_dst_ip_addr; + /** If != NULL, the udp pcb is bound to this netif */ + const struct netif *zep_netif; + /** MAC address of the 6LowPAN device */ + u8_t addr[6]; +}; + +err_t zepif_init(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_UDP */ + +#endif /* LWIP_HDR_ZEPIF_H */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c new file mode 100644 index 000000000..8a97bce39 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c @@ -0,0 +1,563 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif IEEE 802.1D bridge + * @ingroup netifs + * This file implements an IEEE 802.1D bridge by using a multilayer netif approach + * (one hardware-independent netif for the bridge that uses hardware netifs for its ports). + * On transmit, the bridge selects the outgoing port(s). + * On receive, the port netif calls into the bridge (via its netif->input function) and + * the bridge selects the port(s) (and/or its netif->input function) to pass the received pbuf to. + * + * Usage: + * - add the port netifs just like you would when using them as dedicated netif without a bridge + * - only NETIF_FLAG_ETHARP/NETIF_FLAG_ETHERNET netifs are supported as bridge ports + * - add the bridge port netifs without IPv4 addresses (i.e. pass 'NULL, NULL, NULL') + * - don't add IPv6 addresses to the port netifs! + * - set up the bridge configuration in a global variable of type 'bridgeif_initdata_t' that contains + * - the MAC address of the bridge + * - some configuration options controlling the memory consumption (maximum number of ports + * and FDB entries) + * - e.g. for a bridge MAC address 00-01-02-03-04-05, 2 bridge ports, 1024 FDB entries + 16 static MAC entries: + * bridgeif_initdata_t mybridge_initdata = BRIDGEIF_INITDATA1(2, 1024, 16, ETH_ADDR(0, 1, 2, 3, 4, 5)); + * - add the bridge netif (with IPv4 config): + * struct netif bridge_netif; + * netif_add(&bridge_netif, &my_ip, &my_netmask, &my_gw, &mybridge_initdata, bridgeif_init, tcpip_input); + * NOTE: the passed 'input' function depends on BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT setting, + * which controls where the forwarding is done (netif low level input context vs. tcpip_thread) + * - set up all ports netifs and the bridge netif + * + * - When adding a port netif, NETIF_FLAG_ETHARP flag will be removed from a port + * to prevent ETHARP working on that port netif (we only want one IP per bridge not per port). + * - When adding a port netif, its input function is changed to call into the bridge. + * + * + * @todo: + * - compact static FDB entries (instead of walking the whole array) + * - add FDB query/read access + * - add FDB change callback (when learning or dropping auto-learned entries) + * - prefill FDB with MAC classes that should never be forwarded + * - multicast snooping? (and only forward group addresses to interested ports) + * - support removing ports + * - check SNMP integration + * - VLAN handling / trunk ports + * - priority handling? (although that largely depends on TX queue limitations and lwIP doesn't provide tx-done handling) + */ + +#include "netif/bridgeif.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/etharp.h" +#include "lwip/ethip6.h" +#include "lwip/snmp.h" +#include "lwip/timeouts.h" +#include + +#if LWIP_NUM_NETIF_CLIENT_DATA + +/* Define those to better describe your network interface. */ +#define IFNAME0 'b' +#define IFNAME1 'r' + +struct bridgeif_private_s; +typedef struct bridgeif_port_private_s { + struct bridgeif_private_s *bridge; + struct netif *port_netif; + u8_t port_num; +} bridgeif_port_t; + +typedef struct bridgeif_fdb_static_entry_s { + u8_t used; + bridgeif_portmask_t dst_ports; + struct eth_addr addr; +} bridgeif_fdb_static_entry_t; + +typedef struct bridgeif_private_s { + struct netif *netif; + struct eth_addr ethaddr; + u8_t max_ports; + u8_t num_ports; + bridgeif_port_t *ports; + u16_t max_fdbs_entries; + bridgeif_fdb_static_entry_t *fdbs; + u16_t max_fdbd_entries; + void *fdbd; +} bridgeif_private_t; + +/* netif data index to get the bridge on input */ +u8_t bridgeif_netif_client_id = 0xff; + +/** + * @ingroup bridgeif + * Add a static entry to the forwarding database. + * A static entry marks where frames to a specific eth address (unicast or group address) are + * forwarded. + * bits [0..(BRIDGEIF_MAX_PORTS-1)]: hw ports + * bit [BRIDGEIF_MAX_PORTS]: cpu port + * 0: drop + */ +err_t +bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (!br->fdbs[i].used) { + BRIDGEIF_WRITE_PROTECT(lev); + if (!br->fdbs[i].used) { + br->fdbs[i].used = 1; + br->fdbs[i].dst_ports = ports; + memcpy(&br->fdbs[i].addr, addr, sizeof(struct eth_addr)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_MEM; +} + +/** + * @ingroup bridgeif + * Remove a static entry from the forwarding database + */ +err_t +bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_WRITE_PROTECT(lev); + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + memset(&br->fdbs[i], 0, sizeof(bridgeif_fdb_static_entry_t)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_VAL; +} + +/** Get the forwarding port(s) (as bit mask) for the specified destination mac address */ +static bridgeif_portmask_t +bridgeif_find_dst_ports(bridgeif_private_t *br, struct eth_addr *dst_addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + /* first check for static entries */ + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used) { + if (!memcmp(&br->fdbs[i].addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = br->fdbs[i].dst_ports; + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + if (dst_addr->addr[0] & 1) { + /* no match found: flood remaining group address */ + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; + } + BRIDGEIF_READ_UNPROTECT(lev); + /* no match found: check dynamic fdb for port or fall back to flooding */ + return bridgeif_fdb_get_dst_ports(br->fdbd, dst_addr); +} + +/** Helper function to see if a destination mac belongs to the bridge + * (bridge netif or one of the port netifs), in which case the frame + * is sent to the cpu only. + */ +static int +bridgeif_is_local_mac(bridgeif_private_t *br, struct eth_addr *addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + if (!memcmp(br->netif->hwaddr, addr, sizeof(struct eth_addr))) { + return 1; + } + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->num_ports; i++) { + struct netif *portif = br->ports[i].port_netif; + if (portif != NULL) { + if (!memcmp(portif->hwaddr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_READ_UNPROTECT(lev); + return 1; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return 0; +} + +/* Output helper function */ +static err_t +bridgeif_send_to_port(bridgeif_private_t *br, struct pbuf *p, u8_t dstport_idx) +{ + if (dstport_idx < BRIDGEIF_MAX_PORTS) { + /* possibly an external port */ + if (dstport_idx < br->max_ports) { + struct netif *portif = br->ports[dstport_idx].port_netif; + if ((portif != NULL) && (portif->linkoutput != NULL)) { + /* prevent sending out to rx port */ + if (netif_get_index(portif) != p->if_idx) { + if (netif_is_link_up(portif)) { + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> flood(%p:%d) -> %d\n", (void *)p, p->if_idx, netif_get_index(portif))); + return portif->linkoutput(portif, p); + } + } + } + } + } else { + LWIP_ASSERT("invalid port index", dstport_idx == BRIDGEIF_MAX_PORTS); + } + return ERR_OK; +} + +/** Helper function to pass a pbuf to all ports marked in 'dstports' + */ +static err_t +bridgeif_send_to_ports(bridgeif_private_t *br, struct pbuf *p, bridgeif_portmask_t dstports) +{ + err_t err, ret_err = ERR_OK; + u8_t i; + bridgeif_portmask_t mask = 1; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < BRIDGEIF_MAX_PORTS; i++, mask = (bridgeif_portmask_t)(mask << 1)) { + if (dstports & mask) { + err = bridgeif_send_to_port(br, p, i); + if (err != ERR_OK) { + ret_err = err; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ret_err; +} + +/** Output function of the application port of the bridge (the one with an ip address). + * The forwarding port(s) where this pbuf is sent on is/are automatically selected + * from the FDB. + */ +static err_t +bridgeif_output(struct netif *netif, struct pbuf *p) +{ + err_t err; + bridgeif_private_t *br = (bridgeif_private_t *)netif->state; + struct eth_addr *dst = (struct eth_addr *)(p->payload); + + bridgeif_portmask_t dstports = bridgeif_find_dst_ports(br, dst); + err = bridgeif_send_to_ports(br, p, dstports); + + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); + if (((u8_t *)p->payload)[0] & 1) { + /* broadcast or multicast packet*/ + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + } else { + /* unicast packet */ + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + } + /* increase ifoutdiscards or ifouterrors on error */ + + LINK_STATS_INC(link.xmit); + + return err; +} + +/** The actual bridge input function. Port netif's input is changed to call + * here. This function decides where the frame is forwarded. + */ +static err_t +bridgeif_input(struct pbuf *p, struct netif *netif) +{ + u8_t rx_idx; + bridgeif_portmask_t dstports; + struct eth_addr *src, *dst; + bridgeif_private_t *br; + bridgeif_port_t *port; + if (p == NULL || netif == NULL) { + return ERR_VAL; + } + port = (bridgeif_port_t *)netif_get_client_data(netif, bridgeif_netif_client_id); + LWIP_ASSERT("port data not set", port != NULL); + if (port == NULL || port->bridge == NULL) { + return ERR_VAL; + } + br = (bridgeif_private_t *)port->bridge; + rx_idx = netif_get_index(netif); + /* store receive index in pbuf */ + p->if_idx = rx_idx; + + dst = (struct eth_addr *)p->payload; + src = (struct eth_addr *)(((u8_t *)p->payload) + sizeof(struct eth_addr)); + + if ((src->addr[0] & 1) == 0) { + /* update src for all non-group addresses */ + bridgeif_fdb_update_src(br->fdbd, src, port->port_num); + } + + if (dst->addr[0] & 1) { + /* group address -> flood + cpu? */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + if (dstports & (1 << BRIDGEIF_MAX_PORTS)) { + /* we pass the reference to ->input or have to free it */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + if (br->netif->input(p, br->netif) != ERR_OK) { + pbuf_free(p); + } + } else { + /* all references done */ + pbuf_free(p); + } + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } else { + /* is this for one of the local ports? */ + if (bridgeif_is_local_mac(br, dst)) { + /* yes, send to cpu port only */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + return br->netif->input(p, br->netif); + } + + /* get dst port */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + /* no need to send to cpu, flooding is for external ports only */ + /* by this, we consumed the pbuf */ + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } +} + +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +/** Input function for port netifs used to synchronize into tcpip_thread. + */ +static err_t +bridgeif_tcpip_input(struct pbuf *p, struct netif *netif) +{ + return tcpip_inpkt(p, netif, bridgeif_input); +} +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +/** + * @ingroup bridgeif + * Initialization function passed to netif_add(). + * + * ATTENTION: A pointer to a @ref bridgeif_initdata_t must be passed as 'state' + * to @ref netif_add when adding the bridge. I supplies MAC address + * and controls memory allocation (number of ports, FDB size). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t +bridgeif_init(struct netif *netif) +{ + bridgeif_initdata_t *init_data; + bridgeif_private_t *br; + size_t alloc_len_sizet; + mem_size_t alloc_len; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("bridgeif needs an input callback", (netif->input != NULL)); +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + if (netif->input == tcpip_input) { + LWIP_DEBUGF(BRIDGEIF_DEBUG | LWIP_DBG_ON, ("bridgeif does not need tcpip_input, use netif_input/ethernet_input instead")); + } +#endif + + if (bridgeif_netif_client_id == 0xFF) { + bridgeif_netif_client_id = netif_alloc_client_data_id(); + } + + init_data = (bridgeif_initdata_t *)netif->state; + LWIP_ASSERT("init_data != NULL", (init_data != NULL)); + LWIP_ASSERT("init_data->max_ports <= BRIDGEIF_MAX_PORTS", + init_data->max_ports <= BRIDGEIF_MAX_PORTS); + + alloc_len_sizet = sizeof(bridgeif_private_t) + (init_data->max_ports * sizeof(bridgeif_port_t) + (init_data->max_fdb_static_entries * sizeof(bridgeif_fdb_static_entry_t))); + alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_init: allocating %d bytes for private data\n", (int)alloc_len)); + br = (bridgeif_private_t *)mem_calloc(1, alloc_len); + if (br == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory\n")); + return ERR_MEM; + } + memcpy(&br->ethaddr, &init_data->ethaddr, sizeof(br->ethaddr)); + br->netif = netif; + + br->max_ports = init_data->max_ports; + br->ports = (bridgeif_port_t *)(br + 1); + + br->max_fdbs_entries = init_data->max_fdb_static_entries; + br->fdbs = (bridgeif_fdb_static_entry_t *)(((u8_t *)(br + 1)) + (init_data->max_ports * sizeof(bridgeif_port_t))); + + br->max_fdbd_entries = init_data->max_fdb_dynamic_entries; + br->fdbd = bridgeif_fdb_init(init_data->max_fdb_dynamic_entries); + if (br->fdbd == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory in fdb_init\n")); + mem_free(br); + return ERR_MEM; + } + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + /* + * Initialize the snmp variables and counters inside the struct netif. + * The last argument should be replaced with your link speed, in units + * of bits per second. + */ + MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 0); + + netif->state = br; + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ +#if LWIP_IPV4 + netif->output = etharp_output; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + netif->linkoutput = bridgeif_output; + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + memcpy(netif->hwaddr, &br->ethaddr, ETH_HWADDR_LEN); + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* device capabilities */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6 | NETIF_FLAG_LINK_UP; + +#if LWIP_IPV6 && LWIP_IPV6_MLD + /* + * For hardware/netifs that implement MAC filtering. + * All-nodes link-local is handled by default, so we must let the hardware know + * to allow multicast packets in. + * Should set mld_mac_filter previously. */ + if (netif->mld_mac_filter != NULL) { + ip6_addr_t ip6_allnodes_ll; + ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); + netif->mld_mac_filter(netif, &ip6_allnodes_ll, NETIF_ADD_MAC_FILTER); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + return ERR_OK; +} + +/** + * @ingroup bridgeif + * Add a port to the bridge + */ +err_t +bridgeif_add_port(struct netif *bridgeif, struct netif *portif) +{ + bridgeif_private_t *br; + bridgeif_port_t *port; + + LWIP_ASSERT("bridgeif != NULL", bridgeif != NULL); + LWIP_ASSERT("bridgeif->state != NULL", bridgeif->state != NULL); + LWIP_ASSERT("portif != NULL", portif != NULL); + + if (!(portif->flags & NETIF_FLAG_ETHARP) || !(portif->flags & NETIF_FLAG_ETHERNET)) { + /* can only add ETHERNET/ETHARP interfaces */ + return ERR_VAL; + } + + br = (bridgeif_private_t *)bridgeif->state; + + if (br->num_ports >= br->max_ports) { + return ERR_VAL; + } + port = &br->ports[br->num_ports]; + port->port_netif = portif; + port->port_num = br->num_ports; + port->bridge = br; + br->num_ports++; + + /* let the port call us on input */ +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + portif->input = bridgeif_input; +#else + portif->input = bridgeif_tcpip_input; +#endif + /* store pointer to bridge in netif */ + netif_set_client_data(portif, bridgeif_netif_client_id, port); + /* remove ETHARP flag to prevent sending report events on netif-up */ + netif_clear_flags(portif, NETIF_FLAG_ETHARP); + + return ERR_OK; +} + +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c new file mode 100644 index 000000000..6739fc247 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c @@ -0,0 +1,212 @@ +/** + * @file + * lwIP netif implementing an FDB for IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif_fdb FDB example code + * @ingroup bridgeif + * This file implements an example for an FDB (Forwarding DataBase) + */ + +#include "netif/bridgeif.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/timeouts.h" +#include + +#define BRIDGEIF_AGE_TIMER_MS 1000 + +#define BR_FDB_TIMEOUT_SEC (60*5) /* 5 minutes FDB timeout */ + +typedef struct bridgeif_dfdb_entry_s { + u8_t used; + u8_t port; + u32_t ts; + struct eth_addr addr; +} bridgeif_dfdb_entry_t; + +typedef struct bridgeif_dfdb_s { + u16_t max_fdb_entries; + bridgeif_dfdb_entry_t *fdb; +} bridgeif_dfdb_t; + +/** + * @ingroup bridgeif_fdb + * A real simple and slow implementation of an auto-learning forwarding database that + * remembers known src mac addresses to know which port to send frames destined for that + * mac address. + * + * ATTENTION: This is meant as an example only, in real-world use, you should + * provide a better implementation :-) + */ +void +bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, src_addr, sizeof(struct eth_addr))) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: update src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + BRIDGEIF_WRITE_PROTECT(lev); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + } + } + /* not found, allocate new entry from free */ + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (!e->used || !e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (!e->used || !e->ts) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: create src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + memcpy(&e->addr, src_addr, sizeof(struct eth_addr)); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + e->used = 1; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + /* not found, no free entry -> flood */ +} + +/** + * @ingroup bridgeif_fdb + * Walk our list of auto-learnt fdb entries and return a port to forward or BR_FLOOD if unknown + */ +bridgeif_portmask_t +bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = (bridgeif_portmask_t)(1 << e->port); + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; +} + +/** + * @ingroup bridgeif_fdb + * Aging implementation of our simple fdb + */ +static void +bridgeif_fdb_age_one_second(void *fdb_ptr) +{ + int i; + bridgeif_dfdb_t *fdb; + BRIDGEIF_DECL_PROTECT(lev); + + fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_READ_PROTECT(lev); + + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (e->used && e->ts) { + if (--e->ts == 0) { + e->used = 0; + } + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); +} + +/** Timer callback for fdb aging, called once per second */ +static void +bridgeif_age_tmr(void *arg) +{ + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)arg; + + LWIP_ASSERT("invalid arg", arg != NULL); + + bridgeif_fdb_age_one_second(fdb); + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, arg); +} + +/** + * @ingroup bridgeif_fdb + * Init our simple fdb list + */ +void * +bridgeif_fdb_init(u16_t max_fdb_entries) +{ + bridgeif_dfdb_t *fdb; + size_t alloc_len_sizet = sizeof(bridgeif_dfdb_t) + (max_fdb_entries * sizeof(bridgeif_dfdb_entry_t)); + mem_size_t alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_fdb_init: allocating %d bytes for private FDB data\n", (int)alloc_len)); + fdb = (bridgeif_dfdb_t *)mem_calloc(1, alloc_len); + if (fdb == NULL) { + return NULL; + } + fdb->max_fdb_entries = max_fdb_entries; + fdb->fdb = (bridgeif_dfdb_entry_t *)(fdb + 1); + + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, fdb); + + return fdb; +} diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ethernet.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ethernet.c new file mode 100644 index 000000000..dd171e280 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ethernet.c @@ -0,0 +1,321 @@ +/** + * @file + * Ethernet common functions + * + * @defgroup ethernet Ethernet + * @ingroup callbackstyle_api + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET + +#include "netif/ethernet.h" +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/etharp.h" +#include "lwip/ip.h" +#include "lwip/snmp.h" + +#include + +#include "netif/ppp/ppp_opts.h" +#if PPPOE_SUPPORT +#include "netif/ppp/pppoe.h" +#endif /* PPPOE_SUPPORT */ + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +const struct eth_addr ethbroadcast = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; +const struct eth_addr ethzero = {{0, 0, 0, 0, 0, 0}}; + +/** + * @ingroup lwip_nosys + * Process received ethernet frames. Using this function instead of directly + * calling ip_input and passing ARP frames through etharp in ethernetif_input, + * the ARP cache is protected from concurrent access.\n + * Don't call directly, pass to netif_add() and call netif->input(). + * + * @param p the received packet, p->payload pointing to the ethernet header + * @param netif the network interface on which the packet was received + * + * @see LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + * @see ETHARP_SUPPORT_VLAN + * @see LWIP_HOOK_VLAN_CHECK + */ +err_t +ethernet_input(struct pbuf *p, struct netif *netif) +{ + struct eth_hdr *ethhdr; + u16_t type; +#if LWIP_ARP || ETHARP_SUPPORT_VLAN || LWIP_IPV6 + u16_t next_hdr_offset = SIZEOF_ETH_HDR; +#endif /* LWIP_ARP || ETHARP_SUPPORT_VLAN */ + + LWIP_ASSERT_CORE_LOCKED(); + + if (p->len <= SIZEOF_ETH_HDR) { + /* a packet with only an ethernet header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } + + if (p->if_idx == NETIF_NO_INDEX) { + p->if_idx = netif_get_index(netif); + } + + /* points to packet payload, which starts with an Ethernet header */ + ethhdr = (struct eth_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_input: dest:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", src:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", type:%"X16_F"\n", + (unsigned char)ethhdr->dest.addr[0], (unsigned char)ethhdr->dest.addr[1], (unsigned char)ethhdr->dest.addr[2], + (unsigned char)ethhdr->dest.addr[3], (unsigned char)ethhdr->dest.addr[4], (unsigned char)ethhdr->dest.addr[5], + (unsigned char)ethhdr->src.addr[0], (unsigned char)ethhdr->src.addr[1], (unsigned char)ethhdr->src.addr[2], + (unsigned char)ethhdr->src.addr[3], (unsigned char)ethhdr->src.addr[4], (unsigned char)ethhdr->src.addr[5], + lwip_htons(ethhdr->type))); + + type = ethhdr->type; +#if ETHARP_SUPPORT_VLAN + if (type == PP_HTONS(ETHTYPE_VLAN)) { + struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr *)(((char *)ethhdr) + SIZEOF_ETH_HDR); + next_hdr_offset = SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR; + if (p->len <= SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) { + /* a packet with only an ethernet/vlan header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } +#if defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) /* if not, allow all VLANs */ +#ifdef LWIP_HOOK_VLAN_CHECK + if (!LWIP_HOOK_VLAN_CHECK(netif, ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK_FN) + if (!ETHARP_VLAN_CHECK_FN(ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK) + if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) { +#endif + /* silently ignore this packet: not for our VLAN */ + pbuf_free(p); + return ERR_OK; + } +#endif /* defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) */ + type = vlan->tpid; + } +#endif /* ETHARP_SUPPORT_VLAN */ + +#if LWIP_ARP_FILTER_NETIF + netif = LWIP_ARP_FILTER_NETIF_FN(p, netif, lwip_htons(type)); +#endif /* LWIP_ARP_FILTER_NETIF*/ + + if (ethhdr->dest.addr[0] & 1) { + /* this might be a multicast or broadcast packet */ + if (ethhdr->dest.addr[0] == LL_IP4_MULTICAST_ADDR_0) { +#if LWIP_IPV4 + if ((ethhdr->dest.addr[1] == LL_IP4_MULTICAST_ADDR_1) && + (ethhdr->dest.addr[2] == LL_IP4_MULTICAST_ADDR_2)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV4 */ + } +#if LWIP_IPV6 + else if ((ethhdr->dest.addr[0] == LL_IP6_MULTICAST_ADDR_0) && + (ethhdr->dest.addr[1] == LL_IP6_MULTICAST_ADDR_1)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV6 */ + else if (eth_addr_cmp(ðhdr->dest, ðbroadcast)) { + /* mark the pbuf as link-layer broadcast */ + p->flags |= PBUF_FLAG_LLBCAST; + } + } + + switch (type) { +#if LWIP_IPV4 && LWIP_ARP + /* IP packet? */ + case PP_HTONS(ETHTYPE_IP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv4 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + goto free_and_return; + } else { + /* pass to IP layer */ + ip4_input(p, netif); + } + break; + + case PP_HTONS(ETHTYPE_ARP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: ARP response packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + ETHARP_STATS_INC(etharp.lenerr); + ETHARP_STATS_INC(etharp.drop); + goto free_and_return; + } else { + /* pass p to ARP module */ + etharp_input(p, netif); + } + break; +#endif /* LWIP_IPV4 && LWIP_ARP */ +#if PPPOE_SUPPORT + case PP_HTONS(ETHTYPE_PPPOEDISC): /* PPP Over Ethernet Discovery Stage */ + pppoe_disc_input(netif, p); + break; + + case PP_HTONS(ETHTYPE_PPPOE): /* PPP Over Ethernet Session Stage */ + pppoe_data_input(netif, p); + break; +#endif /* PPPOE_SUPPORT */ + +#if LWIP_IPV6 + case PP_HTONS(ETHTYPE_IPV6): /* IPv6 */ + /* skip Ethernet header */ + if ((p->len < next_hdr_offset) || pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv6 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + goto free_and_return; + } else { + /* pass to IPv6 layer */ + ip6_input(p, netif); + } + break; +#endif /* LWIP_IPV6 */ + + default: +#ifdef LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + if (LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(p, netif) == ERR_OK) { + break; + } +#endif + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinunknownprotos); + goto free_and_return; + } + + /* This means the pbuf is freed or consumed, + so the caller doesn't have to free it again */ + return ERR_OK; + +free_and_return: + pbuf_free(p); + return ERR_OK; +} + +/** + * @ingroup ethernet + * Send an ethernet packet on the network using netif->linkoutput(). + * The ethernet header is filled in before sending. + * + * @see LWIP_HOOK_VLAN_SET + * + * @param netif the lwIP network interface on which to send the packet + * @param p the packet to send. pbuf layer must be @ref PBUF_LINK. + * @param src the source MAC address to be copied into the ethernet header + * @param dst the destination MAC address to be copied into the ethernet header + * @param eth_type ethernet type (@ref lwip_ieee_eth_type) + * @return ERR_OK if the packet was sent, any other err_t on failure + */ +err_t +ethernet_output(struct netif * netif, struct pbuf * p, + const struct eth_addr * src, const struct eth_addr * dst, + u16_t eth_type) { + struct eth_hdr *ethhdr; + u16_t eth_type_be = lwip_htons(eth_type); + +#if ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) + s32_t vlan_prio_vid = LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type); + if (vlan_prio_vid >= 0) { + struct eth_vlan_hdr *vlanhdr; + + LWIP_ASSERT("prio_vid must be <= 0xFFFF", vlan_prio_vid <= 0xFFFF); + + if (pbuf_add_header(p, SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) != 0) { + goto pbuf_header_failed; + } + vlanhdr = (struct eth_vlan_hdr *)(((u8_t *)p->payload) + SIZEOF_ETH_HDR); + vlanhdr->tpid = eth_type_be; + vlanhdr->prio_vid = lwip_htons((u16_t)vlan_prio_vid); + + eth_type_be = PP_HTONS(ETHTYPE_VLAN); + } else +#endif /* ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) */ + { + if (pbuf_add_header(p, SIZEOF_ETH_HDR) != 0) { + goto pbuf_header_failed; + } + } + + LWIP_ASSERT_CORE_LOCKED(); + + ethhdr = (struct eth_hdr *)p->payload; + ethhdr->type = eth_type_be; + SMEMCPY(ðhdr->dest, dst, ETH_HWADDR_LEN); + SMEMCPY(ðhdr->src, src, ETH_HWADDR_LEN); + + LWIP_ASSERT("netif->hwaddr_len must be 6 for ethernet_output!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_output: sending packet %p\n", (void *)p)); + + /* send the packet */ + return netif->linkoutput(netif, p); + +pbuf_header_failed: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("ethernet_output: could not allocate room for header.\n")); + LINK_STATS_INC(link.lenerr); + return ERR_BUF; +} + +#endif /* LWIP_ARP || LWIP_ETHERNET */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c new file mode 100644 index 000000000..7f0d27676 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c @@ -0,0 +1,920 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" +#include "netif/ieee802154.h" + +#include + +#if LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) 0 +#else +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) LWIP_6LOWPAN_CALC_CRC(buf, len) +#endif + +/** This is a helper struct for reassembly of fragments + * (IEEE 802.15.4 limits to 127 bytes) + */ +struct lowpan6_reass_helper { + struct lowpan6_reass_helper *next_packet; + struct pbuf *reass; + struct pbuf *frags; + u8_t timer; + struct lowpan6_link_addr sender_addr; + u16_t datagram_size; + u16_t datagram_tag; +}; + +/** This struct keeps track of per-netif state */ +struct lowpan6_ieee802154_data { + /** fragment reassembly list */ + struct lowpan6_reass_helper *reass_list; +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /** address context for compression */ + ip6_addr_t lowpan6_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#endif + /** Datagram Tag for fragmentation */ + u16_t tx_datagram_tag; + /** local PAN ID for IEEE 802.15.4 header */ + u16_t ieee_802154_pan_id; + /** Sequence Number for IEEE 802.15.4 transmission */ + u8_t tx_frame_seq_num; +}; + +/* Maximum frame size is 127 bytes minus CRC size */ +#define LOWPAN6_MAX_PAYLOAD (127 - 2) + +/** Currently, this state is global, since there's only one 6LoWPAN netif */ +static struct lowpan6_ieee802154_data lowpan6_data; + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +#define LWIP_6LOWPAN_CONTEXTS(netif) lowpan6_data.lowpan6_context +#else +#define LWIP_6LOWPAN_CONTEXTS(netif) NULL +#endif + +static const struct lowpan6_link_addr ieee_802154_broadcast = {2, {0xff, 0xff}}; + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +static struct lowpan6_link_addr short_mac_addr = {2, {0, 0}}; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* IEEE 802.15.4 specific functions: */ + +/** Write the IEEE 802.15.4 header that encapsulates the 6LoWPAN frame. + * Src and dst PAN IDs are filled with the ID set by @ref lowpan6_set_pan_id. + * + * Since the length is variable: + * @returns the header length + */ +static u8_t +lowpan6_write_iee802154_header(struct ieee_802154_hdr *hdr, const struct lowpan6_link_addr *src, + const struct lowpan6_link_addr *dst) +{ + u8_t ieee_header_len; + u8_t *buffer; + u8_t i; + u16_t fc; + + fc = IEEE_802154_FC_FT_DATA; /* send data packet (2003 frame version) */ + fc |= IEEE_802154_FC_PANID_COMPR; /* set PAN ID compression, for now src and dst PANs are equal */ + if (dst != &ieee_802154_broadcast) { + fc |= IEEE_802154_FC_ACK_REQ; /* data packet, no broadcast: ack required. */ + } + if (dst->addr_len == 2) { + fc |= IEEE_802154_FC_DST_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid dst address length", dst->addr_len == 8); + fc |= IEEE_802154_FC_DST_ADDR_MODE_EXT; + } + if (src->addr_len == 2) { + fc |= IEEE_802154_FC_SRC_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid src address length", src->addr_len == 8); + fc |= IEEE_802154_FC_SRC_ADDR_MODE_EXT; + } + hdr->frame_control = fc; + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + hdr->destination_pan_id = lowpan6_data.ieee_802154_pan_id; /* pan id */ + + buffer = (u8_t *)hdr; + ieee_header_len = 5; + i = dst->addr_len; + /* reverse memcpy of dst addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = dst->addr[i]; + } + /* Source PAN ID skipped due to PAN ID Compression */ + i = src->addr_len; + /* reverse memcpy of src addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = src->addr[i]; + } + return ieee_header_len; +} + +/** Parse the IEEE 802.15.4 header from a pbuf. + * If successful, the header is hidden from the pbuf. + * + * PAN IDs and seuqence number are not checked + * + * @param p input pbuf, p->payload pointing at the IEEE 802.15.4 header + * @param src pointer to source address filled from the header + * @param dest pointer to destination address filled from the header + * @returns ERR_OK if successful + */ +static err_t +lowpan6_parse_iee802154_header(struct pbuf *p, struct lowpan6_link_addr *src, + struct lowpan6_link_addr *dest) +{ + u8_t *puc; + s8_t i; + u16_t frame_control, addr_mode; + u16_t datagram_offset; + + /* Parse IEEE 802.15.4 header */ + puc = (u8_t *)p->payload; + frame_control = puc[0] | (puc[1] << 8); + datagram_offset = 2; + if (frame_control & IEEE_802154_FC_SEQNO_SUPPR) { + if (IEEE_802154_FC_FRAME_VERSION_GET(frame_control) <= 1) { + /* sequence number suppressed, this is not valid for versions 0/1 */ + return ERR_VAL; + } + } else { + datagram_offset++; + } + datagram_offset += 2; /* Skip destination PAN ID */ + addr_mode = frame_control & IEEE_802154_FC_DST_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + dest->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + dest->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + dest->addr_len = 2; + /* reverse memcpy: */ + dest->addr[0] = puc[datagram_offset + 1]; + dest->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + if (!(frame_control & IEEE_802154_FC_PANID_COMPR)) { + /* No PAN ID compression, skip source PAN ID */ + datagram_offset += 2; + } + + addr_mode = frame_control & IEEE_802154_FC_SRC_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_SRC_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + src->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + src->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + src->addr_len = 2; + src->addr[0] = puc[datagram_offset + 1]; + src->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + /* hide IEEE802.15.4 header. */ + if (pbuf_remove_header(p, datagram_offset)) { + return ERR_VAL; + } + return ERR_OK; +} + +/** Calculate the 16-bit CRC as required by IEEE 802.15.4 */ +u16_t +lowpan6_calc_crc(const void* buf, u16_t len) +{ +#define CCITT_POLY_16 0x8408U + u16_t i; + u8_t b; + u16_t crc = 0; + const u8_t* p = (const u8_t*)buf; + + for (i = 0; i < len; i++) { + u8_t data = *p; + for (b = 0U; b < 8U; b++) { + if (((data ^ crc) & 1) != 0) { + crc = (u16_t)((crc >> 1) ^ CCITT_POLY_16); + } else { + crc = (u16_t)(crc >> 1); + } + data = (u8_t)(data >> 1); + } + p++; + } + return crc; +} + +/* Fragmentation specific functions: */ + +static void +free_reass_datagram(struct lowpan6_reass_helper *lrh) +{ + if (lrh->reass) { + pbuf_free(lrh->reass); + } + if (lrh->frags) { + pbuf_free(lrh->frags); + } + mem_free(lrh); +} + +/** + * Removes a datagram from the reassembly queue. + **/ +static void +dequeue_datagram(struct lowpan6_reass_helper *lrh, struct lowpan6_reass_helper *prev) +{ + if (lowpan6_data.reass_list == lrh) { + lowpan6_data.reass_list = lowpan6_data.reass_list->next_packet; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next_packet = lrh->next_packet; + } +} + +/** + * Periodic timer for 6LowPAN functions: + * + * - Remove incomplete/old packets + */ +void +lowpan6_tmr(void) +{ + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + lrh_next = lrh->next_packet; + if ((--lrh->timer) == 0) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + lrh = lrh_next; + } +} + +/* + * Encapsulates data into IEEE 802.15.4 frames. + * Fragments an IPv6 datagram into 6LowPAN units, which fit into IEEE 802.15.4 frames. + * If configured, will compress IPv6 and or UDP headers. + * */ +static err_t +lowpan6_frag(struct netif *netif, struct pbuf *p, const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + struct pbuf *p_frag; + u16_t frag_len, remaining_len, max_data_len; + u8_t *buffer; + u8_t ieee_header_len; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + u16_t crc; + u16_t datagram_offset; + err_t err = ERR_IF; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + + /* We'll use a dedicated pbuf for building 6LowPAN fragments. */ + p_frag = pbuf_alloc(PBUF_RAW, 127, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IEEE 802.15.4 header. */ + buffer = (u8_t *)p_frag->payload; + ieee_header_len = lowpan6_write_iee802154_header((struct ieee_802154_hdr *)buffer, src, dst); + LWIP_ASSERT("ieee_header_len < p_frag->len", ieee_header_len < p_frag->len); + +#if LWIP_6LOWPAN_IPHC + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + /* do the header compression (this does NOT copy any non-compressed data) */ + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, + &buffer[ieee_header_len], p_frag->len - ieee_header_len, &lowpan6_header_len, + &hidden_header_len, LWIP_6LOWPAN_CONTEXTS(netif), src, dst); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + +#else /* LWIP_6LOWPAN_IPHC */ + /* Send uncompressed IPv6 header with appropriate dispatch byte. */ + lowpan6_header_len = 1; + buffer[ieee_header_len] = 0x41; /* IPv6 dispatch */ +#endif /* LWIP_6LOWPAN_IPHC */ + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + if (remaining_len > 0x7FF) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + /* datagram_size must fit into 11 bit */ + pbuf_free(p_frag); + return ERR_VAL; + } + + /* Fragment, or 1 packet? */ + max_data_len = LOWPAN6_MAX_PAYLOAD - ieee_header_len - lowpan6_header_len; + if (remaining_len > max_data_len) { + u16_t data_len; + /* We must move the 6LowPAN header to make room for the FRAG header. */ + memmove(&buffer[ieee_header_len + 4], &buffer[ieee_header_len], lowpan6_header_len); + + /* Now we need to fragment the packet. FRAG1 header first */ + buffer[ieee_header_len] = 0xc0 | (((p->tot_len + hidden_header_len) >> 8) & 0x7); + buffer[ieee_header_len + 1] = (p->tot_len + hidden_header_len) & 0xff; + + lowpan6_data.tx_datagram_tag++; + buffer[ieee_header_len + 2] = (lowpan6_data.tx_datagram_tag >> 8) & 0xff; + buffer[ieee_header_len + 3] = lowpan6_data.tx_datagram_tag & 0xff; + + /* Fragment follows. */ + data_len = (max_data_len - 4) & 0xf8; + frag_len = data_len + lowpan6_header_len; + + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len + 4, frag_len - lowpan6_header_len, 0); + remaining_len -= frag_len - lowpan6_header_len; + /* datagram offset holds the offset before compression */ + datagram_offset = frag_len - lowpan6_header_len + hidden_header_len; + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = ieee_header_len + 4 + frag_len + 2; /* add 2 bytes for crc*/ + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + while ((remaining_len > 0) && (err == ERR_OK)) { + struct ieee_802154_hdr *hdr = (struct ieee_802154_hdr *)buffer; + /* new frame, new seq num for ACK */ + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + + buffer[ieee_header_len] |= 0x20; /* Change FRAG1 to FRAGN */ + + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + buffer[ieee_header_len + 4] = (u8_t)(datagram_offset >> 3); /* datagram offset in FRAGN header (datagram_offset is max. 11 bit) */ + + frag_len = (127 - ieee_header_len - 5 - 2) & 0xf8; + if (frag_len > remaining_len) { + frag_len = remaining_len; + } + + pbuf_copy_partial(p, buffer + ieee_header_len + 5, frag_len, p->tot_len - remaining_len); + remaining_len -= frag_len; + datagram_offset += frag_len; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + 5 + ieee_header_len + 2; + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + } else { + /* It fits in one frame. */ + frag_len = remaining_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len, frag_len, 0); + remaining_len = 0; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + lowpan6_header_len + ieee_header_len + 2; + LWIP_ASSERT("", p_frag->len <= 127); + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + + pbuf_free(p_frag); + + return err; +} + +/** + * @ingroup sixlowpan + * Set context + */ +err_t +lowpan6_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + + IP6_ADDR_ZONECHECK(context); + + ip6_addr_set(&lowpan6_data.lowpan6_context[idx], context); + + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_ARG; +#endif +} + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +/** + * @ingroup sixlowpan + * Set short address + */ +err_t +lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low) +{ + short_mac_addr.addr[0] = addr_high; + short_mac_addr.addr[1] = addr_low; + + return ERR_OK; +} +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* Create IEEE 802.15.4 address from netif address */ +static err_t +lowpan6_hwaddr_to_addr(struct netif *netif, struct lowpan6_link_addr *addr) +{ + addr->addr_len = 8; + if (netif->hwaddr_len == 8) { + LWIP_ERROR("NETIF_MAX_HWADDR_LEN >= 8 required", sizeof(netif->hwaddr) >= 8, return ERR_VAL;); + SMEMCPY(addr->addr, netif->hwaddr, 8); + } else if (netif->hwaddr_len == 6) { + /* Copy from MAC-48 */ + SMEMCPY(addr->addr, netif->hwaddr, 3); + addr->addr[3] = addr->addr[4] = 0xff; + SMEMCPY(&addr->addr[5], &netif->hwaddr[3], 3); + } else { + /* Invalid address length, don't know how to convert this */ + return ERR_VAL; + } + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Resolve and fill-in IEEE 802.15.4 address header for outgoing IPv6 packet. + * + * Perform Header Compression and fragment if necessary. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return err_t + */ +err_t +lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + err_t result; + const u8_t *hwaddr; + struct lowpan6_link_addr src, dest; +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + ip6_addr_t ip6_src; + struct ip6_hdr *ip6_hdr; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + /* Check if we can compress source address (use aligned copy) */ + ip6_hdr = (struct ip6_hdr *)q->payload; + ip6_addr_copy_from_packed(ip6_src, ip6_hdr->src); + ip6_addr_assign_zone(&ip6_src, IP6_UNICAST, netif); + if (lowpan6_get_address_mode(&ip6_src, &short_mac_addr) == 3) { + src.addr_len = 2; + src.addr[0] = short_mac_addr.addr[0]; + src.addr[1] = short_mac_addr.addr[1]; + } else +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + { + result = lowpan6_hwaddr_to_addr(netif, &src); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + } + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + /* We need to send to the broadcast address.*/ + return lowpan6_frag(netif, q, &src, &ieee_802154_broadcast); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + if (src.addr_len == 2) { + /* If source address was compressable to short_mac_addr, and dest has same subnet and + * is also compressable to 2-bytes, assume we can infer dest as a short address too. */ + dest.addr_len = 2; + dest.addr[0] = ((u8_t *)q->payload)[38]; + dest.addr[1] = ((u8_t *)q->payload)[39]; + if ((src.addr_len == 2) && (ip6_addr_netcmp_zoneless(&ip6_hdr->src, &ip6_hdr->dest)) && + (lowpan6_get_address_mode(ip6addr, &dest) == 3)) { + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); + } + } +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + dest.addr_len = netif->hwaddr_len; + /* XXX: Inferring the length of the source address from the destination address + * is not correct for IEEE 802.15.4, but currently we don't get this information + * from the neighbor cache */ + SMEMCPY(dest.addr, hwaddr, netif->hwaddr_len); + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); +} +/** + * @ingroup sixlowpan + * NETIF input function: don't free the input pbuf when returning != ERR_OK! + */ +err_t +lowpan6_input(struct pbuf *p, struct netif *netif) +{ + u8_t *puc, b; + s8_t i; + struct lowpan6_link_addr src, dest; + u16_t datagram_size = 0; + u16_t datagram_offset, datagram_tag; + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + if (p == NULL) { + return ERR_OK; + } + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + if (p->len != p->tot_len) { + /* for now, this needs a pbuf in one piece */ + goto lowpan6_input_discard; + } + + if (lowpan6_parse_iee802154_header(p, &src, &dest) != ERR_OK) { + goto lowpan6_input_discard; + } + + /* Check dispatch. */ + puc = (u8_t *)p->payload; + + b = *puc; + if ((b & 0xf8) == 0xc0) { + /* FRAG1 dispatch. add this packet to reassembly list. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + + /* check for duplicate */ + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + uint8_t discard = 0; + lrh_next = lrh->next_packet; + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0)) { + /* address match with packet in reassembly. */ + if ((datagram_tag == lrh->datagram_tag) && (datagram_size == lrh->datagram_size)) { + /* duplicate fragment. */ + goto lowpan6_input_discard; + } else { + /* We are receiving the start of a new datagram. Discard old one (incomplete). */ + discard = 1; + } + } + if (discard) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + /* Check next datagram in queue. */ + lrh = lrh_next; + } + + pbuf_remove_header(p, 4); /* hide frag1 dispatch */ + + lrh = (struct lowpan6_reass_helper *) mem_malloc(sizeof(struct lowpan6_reass_helper)); + if (lrh == NULL) { + goto lowpan6_input_discard; + } + + lrh->sender_addr.addr_len = src.addr_len; + for (i = 0; i < src.addr_len; i++) { + lrh->sender_addr.addr[i] = src.addr[i]; + } + lrh->datagram_size = datagram_size; + lrh->datagram_tag = datagram_tag; + lrh->frags = NULL; + if (*(u8_t *)p->payload == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + lrh->reass = p; + } else if ((*(u8_t *)p->payload & 0xe0 ) == 0x60) { + lrh->reass = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (lrh->reass == NULL) { + /* decompression failed */ + mem_free(lrh); + goto lowpan6_input_discard; + } + } + /* TODO: handle the case where we already have FRAGN received */ + lrh->next_packet = lowpan6_data.reass_list; + lrh->timer = 2; + lowpan6_data.reass_list = lrh; + + return ERR_OK; + } else if ((b & 0xf8) == 0xe0) { + /* FRAGN dispatch, find packet being reassembled. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + datagram_offset = (u16_t)puc[4] << 3; + pbuf_remove_header(p, 4); /* hide frag1 dispatch but keep datagram offset for reassembly */ + + for (lrh = lowpan6_data.reass_list; lrh != NULL; lrh_prev = lrh, lrh = lrh->next_packet) { + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0) && + (datagram_tag == lrh->datagram_tag) && + (datagram_size == lrh->datagram_size)) { + break; + } + } + if (lrh == NULL) { + /* rogue fragment */ + goto lowpan6_input_discard; + } + /* Insert new pbuf into list of fragments. Each fragment is a pbuf, + this only works for unchained pbufs. */ + LWIP_ASSERT("p->next == NULL", p->next == NULL); + if (lrh->reass != NULL) { + /* FRAG1 already received, check this offset against first len */ + if (datagram_offset < lrh->reass->len) { + /* fragment overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + } + if (lrh->frags == NULL) { + /* first FRAGN */ + lrh->frags = p; + } else { + /* find the correct place to insert */ + struct pbuf *q, *last; + u16_t new_frag_len = p->len - 1; /* p->len includes datagram_offset byte */ + for (q = lrh->frags, last = NULL; q != NULL; last = q, q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + u16_t q_frag_len = q->len - 1; + if (datagram_offset < q_datagram_offset) { + if (datagram_offset + new_frag_len > q_datagram_offset) { + /* overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* insert here */ + break; + } else if (datagram_offset == q_datagram_offset) { + if (q_frag_len != new_frag_len) { + /* fragment mismatch, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* duplicate, ignore */ + pbuf_free(p); + return ERR_OK; + } + } + /* insert fragment */ + if (last == NULL) { + lrh->frags = p; + } else { + last->next = p; + p->next = q; + } + } + /* check if all fragments were received */ + if (lrh->reass) { + u16_t offset = lrh->reass->len; + struct pbuf *q; + for (q = lrh->frags; q != NULL; q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + if (q_datagram_offset != offset) { + /* not complete, wait for more fragments */ + return ERR_OK; + } + offset += q->len - 1; + } + if (offset == datagram_size) { + /* all fragments received, combine pbufs */ + u16_t datagram_left = datagram_size - lrh->reass->len; + for (q = lrh->frags; q != NULL; q = q->next) { + /* hide datagram_offset byte now */ + pbuf_remove_header(q, 1); + q->tot_len = datagram_left; + datagram_left -= q->len; + } + LWIP_ASSERT("datagram_left == 0", datagram_left == 0); + q = lrh->reass; + q->tot_len = datagram_size; + q->next = lrh->frags; + lrh->frags = NULL; + lrh->reass = NULL; + dequeue_datagram(lrh, lrh_prev); + mem_free(lrh); + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + return ip6_input(q, netif); + } + } + /* pbuf enqueued, waiting for more fragments */ + return ERR_OK; + } else { + if (b == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + } else if ((b & 0xe0 ) == 0x60) { + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + } else { + goto lowpan6_input_discard; + } + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + + return ip6_input(p, netif); + } +lowpan6_input_discard: + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; +} + +/** + * @ingroup sixlowpan + */ +err_t +lowpan6_if_init(struct netif *netif) +{ + netif->name[0] = 'L'; + netif->name[1] = '6'; + netif->output_ip6 = lowpan6_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit */ + netif->mtu = 1280; + + /* broadcast capability */ + netif->flags = NETIF_FLAG_BROADCAST /* | NETIF_FLAG_LOWPAN6 */; + + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Set PAN ID + */ +err_t +lowpan6_set_pan_id(u16_t pan_id) +{ + lowpan6_data.ieee_802154_pan_id = pan_id; + + return ERR_OK; +} + +#if !NO_SYS +/** + * @ingroup sixlowpan + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_6lowpan_input(struct pbuf *p, struct netif *inp) +{ + return tcpip_inpkt(p, inp, lowpan6_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c new file mode 100644 index 000000000..d89816d3b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c @@ -0,0 +1,447 @@ +/** + * @file + * 6LowPAN over BLE output for IPv6 (RFC7668). +*/ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + + +/** + * @defgroup rfc7668if 6LoWPAN over BLE (RFC7668) + * @ingroup netifs + * This file implements a RFC7668 implementation for 6LoWPAN over + * Bluetooth Low Energy. The specification is very similar to 6LoWPAN, + * so most of the code is re-used. + * Compared to 6LoWPAN, much functionality is already implemented in + * lower BLE layers (fragmenting, session management,...). + * + * Usage: + * - add this netif + * - don't add IPv4 addresses (no IPv4 support in RFC7668), pass 'NULL','NULL','NULL' + * - use the BLE to EUI64 conversation util to create an IPv6 link-local address from the BLE MAC (@ref ble_addr_to_eui64) + * - input function: @ref rfc7668_input + * - set the link output function, which transmits output data to an established L2CAP channel + * - If data arrives (HCI event "L2CAP_DATA_PACKET"): + * - allocate a @ref PBUF_RAW buffer + * - let the pbuf struct point to the incoming data or copy it to the buffer + * - call netif->input + * + * @todo: + * - further testing + * - support compression contexts + * - support multiple addresses + * - support multicast + * - support neighbor discovery + */ + + +#include "netif/lowpan6_ble.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" + +#include + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +/** context memory, containing IPv6 addresses */ +static ip6_addr_t rfc7668_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#else +#define rfc7668_context NULL +#endif + +static struct lowpan6_link_addr rfc7668_local_addr; +static struct lowpan6_link_addr rfc7668_peer_addr; + +/** + * @ingroup rfc7668if + * convert BT address to EUI64 addr + * + * This method converts a Bluetooth MAC address to an EUI64 address, + * which is used within IPv6 communication + * + * @param dst IPv6 destination space + * @param src BLE MAC address source + * @param public_addr If the LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + * option is set, bit 0x02 will be set if param=0 (no public addr); cleared otherwise + * + * @see LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + */ +void +ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst, src, 3); + dst[3] = 0xFF; + dst[4] = 0xFE; + memcpy(&dst[5], &src[3], 3); +#if LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + if(public_addr) { + dst[0] &= ~0x02; + } else { + dst[0] |= 0x02; + } +#else + LWIP_UNUSED_ARG(public_addr); +#endif +} + +/** + * @ingroup rfc7668if + * convert EUI64 address to Bluetooth MAC addr + * + * This method converts an EUI64 address to a Bluetooth MAC address, + * + * @param dst BLE MAC address destination + * @param src IPv6 source + * + */ +void +eui64_to_ble_addr(uint8_t *dst, const uint8_t *src) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst,src,3); + memcpy(&dst[3],&src[5],3); +} + +/** Set an address used for stateful compression. + * This expects an address of 6 or 8 bytes. + */ +static err_t +rfc7668_set_addr(struct lowpan6_link_addr *addr, const u8_t *in_addr, size_t in_addr_len, int is_mac_48, int is_public_addr) +{ + if ((in_addr == NULL) || (addr == NULL)) { + return ERR_VAL; + } + if (is_mac_48) { + if (in_addr_len != 6) { + return ERR_VAL; + } + addr->addr_len = 8; + ble_addr_to_eui64(addr->addr, in_addr, is_public_addr); + } else { + if (in_addr_len != 8) { + return ERR_VAL; + } + addr->addr_len = 8; + memcpy(addr->addr, in_addr, 8); + } + return ERR_OK; +} + + +/** Set the local address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 0, 0); +} + +/** Set the local address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 1, is_public_addr); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 0, 0); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 1, is_public_addr); +} + +/** Encapsulate IPv6 frames for BLE transmission + * + * This method implements the IPv6 header compression: + * *) According to RFC6282 + * *) See Figure 2, contains base format of bit positions + * *) Fragmentation not necessary (done at L2CAP layer of BLE) + * @note Currently the pbuf allocation uses 256 bytes. If longer packets are used (possible due to MTU=1480Bytes), increase it here! + * + * @param p Pbuf struct, containing the payload data + * @param netif Output network interface. Should be of RFC7668 type + * + * @return Same as netif->output. + */ +static err_t +rfc7668_compress(struct netif *netif, struct pbuf *p) +{ + struct pbuf *p_frag; + u16_t remaining_len; + u8_t *buffer; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + err_t err; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + +#if LWIP_6LOWPAN_IPHC + + /* We'll use a dedicated pbuf for building BLE fragments. + * We'll over-allocate it by the bytes saved for header compression. + */ + p_frag = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IP6 header (with IPHC). */ + buffer = (u8_t*)p_frag->payload; + + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, buffer, p_frag->len, + &lowpan6_header_len, &hidden_header_len, rfc7668_context, &rfc7668_local_addr, &rfc7668_peer_addr); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + lowpan6_header_len, remaining_len, 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = remaining_len + lowpan6_header_len; + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG|LWIP_DBG_TRACE, ("rfc7668_output: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + pbuf_free(p_frag); + + return err; +#else /* LWIP_6LOWPAN_IPHC */ + /* 6LoWPAN over BLE requires IPHC! */ + return ERR_IF; +#endif/* LWIP_6LOWPAN_IPHC */ +} + +/** + * @ingroup rfc7668if + * Set context id IPv6 address + * + * Store one IPv6 address to a given context id. + * + * @param idx Context id + * @param context IPv6 addr for this context + * + * @return ERR_OK (if everything is fine), ERR_ARG (if the context id is out of range), ERR_VAL (if contexts disabled) + */ +err_t +rfc7668_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /* check if the ID is possible */ + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + /* copy IPv6 address to context storage */ + ip6_addr_set(&rfc7668_context[idx], context); + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_VAL; +#endif +} + +/** + * @ingroup rfc7668if + * Compress outgoing IPv6 packet and pass it on to netif->linkoutput + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return See rfc7668_compress + */ +err_t +rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + /* dst ip6addr is not used here, we only have one peer */ + LWIP_UNUSED_ARG(ip6addr); + + return rfc7668_compress(netif, q); +} + +/** + * @ingroup rfc7668if + * Process a received raw payload from an L2CAP channel + * + * @param p the received packet, p->payload pointing to the + * IPv6 header (maybe compressed) + * @param netif the network interface on which the packet was received + * + * @return ERR_OK if everything was fine + */ +err_t +rfc7668_input(struct pbuf * p, struct netif *netif) +{ + u8_t * puc; + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + /* Load first header byte */ + puc = (u8_t*)p->payload; + + /* no IP header compression */ + if (*puc == 0x41) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, removing dispatch: 0x%2x \n", *puc)); + /* This is a complete IPv6 packet, just skip header byte. */ + pbuf_remove_header(p, 1); + /* IPHC header compression */ + } else if ((*puc & 0xe0 )== 0x60) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, decompress dispatch: 0x%2x \n", *puc)); + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, 0, rfc7668_context, &rfc7668_peer_addr, &rfc7668_local_addr); + /* if no pbuf is returned, handle as discarded packet */ + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + /* invalid header byte, discard */ + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, discarding: 0x%2x \n", *puc)); + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + return ERR_OK; + } + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + +#if LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG + { + u16_t i; + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("IPv6 payload:\n")); + for (i = 0; i < p->len; i++) { + if ((i%4)==0) { + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("%2X ", *((uint8_t *)p->payload+i))); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\np->len: %d\n", p->len)); + } +#endif + /* pass data to ip6_input */ + return ip6_input(p, netif); +} + +/** + * @ingroup rfc7668if + * Initialize the netif + * + * No flags are used (broadcast not possible, not ethernet, ...) + * The shortname for this netif is "BT" + * + * @param netif the network interface to be initialized as RFC7668 netif + * + * @return ERR_OK if everything went fine + */ +err_t +rfc7668_if_init(struct netif *netif) +{ + netif->name[0] = 'b'; + netif->name[1] = 't'; + /* local function as IPv6 output */ + netif->output_ip6 = rfc7668_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit, set according to RFC7668 ch2.4 */ + netif->mtu = 1280; + + /* no flags set (no broadcast, ethernet,...)*/ + netif->flags = 0; + + /* everything fine */ + return ERR_OK; +} + +#if !NO_SYS +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + * + * @return see @ref tcpip_inpkt, same return values + */ +err_t +tcpip_rfc7668_input(struct pbuf *p, struct netif *inp) +{ + /* send data to upper layer, return the result */ + return tcpip_inpkt(p, inp, rfc7668_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c new file mode 100644 index 000000000..baea206a7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c @@ -0,0 +1,841 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6_common.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/udp.h" + +#include + +/* Determine compression mode for unicast address. */ +s8_t +lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr) +{ + if (mac_addr->addr_len == 2) { + if ((ip6addr->addr[2] == (u32_t)PP_HTONL(0x000000ff)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000))) { + if ((ip6addr->addr[3] & PP_HTONL(0x0000ffff)) == lwip_ntohl((mac_addr->addr[0] << 8) | mac_addr->addr[1])) { + return 3; + } + } + } else if (mac_addr->addr_len == 8) { + if ((ip6addr->addr[2] == lwip_ntohl(((mac_addr->addr[0] ^ 2) << 24) | (mac_addr->addr[1] << 16) | mac_addr->addr[2] << 8 | mac_addr->addr[3])) && + (ip6addr->addr[3] == lwip_ntohl((mac_addr->addr[4] << 24) | (mac_addr->addr[5] << 16) | mac_addr->addr[6] << 8 | mac_addr->addr[7]))) { + return 3; + } + } + + if ((ip6addr->addr[2] == PP_HTONL(0x000000ffUL)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000UL))) { + return 2; + } + + return 1; +} + +#if LWIP_6LOWPAN_IPHC + +/* Determine compression mode for multicast address. */ +static s8_t +lowpan6_get_address_mode_mc(const ip6_addr_t *ip6addr) +{ + if ((ip6addr->addr[0] == PP_HTONL(0xff020000)) && + (ip6addr->addr[1] == 0) && + (ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xffffff00)) == 0)) { + return 3; + } else if (((ip6addr->addr[0] & PP_HTONL(0xff00ffff)) == PP_HTONL(0xff000000)) && + (ip6addr->addr[1] == 0)) { + if ((ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xff000000)) == 0)) { + return 2; + } else if ((ip6addr->addr[2] & PP_HTONL(0xffffff00)) == 0) { + return 1; + } + } + + return 0; +} + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +static s8_t +lowpan6_context_lookup(const ip6_addr_t *lowpan6_contexts, const ip6_addr_t *ip6addr) +{ + s8_t i; + + for (i = 0; i < LWIP_6LOWPAN_NUM_CONTEXTS; i++) { + if (ip6_addr_netcmp(&lowpan6_contexts[i], ip6addr)) { + return i; + } + } + return -1; +} +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + +/* + * Compress IPv6 and/or UDP headers. + * */ +err_t +lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + u8_t *buffer, *inptr; + u8_t lowpan6_header_len; + u8_t hidden_header_len = 0; + s8_t i; + struct ip6_hdr *ip6hdr; + ip_addr_t ip6src, ip6dst; + + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("inbuf != NULL", inbuf != NULL); + LWIP_ASSERT("outbuf != NULL", outbuf != NULL); + LWIP_ASSERT("lowpan6_header_len_out != NULL", lowpan6_header_len_out != NULL); + LWIP_ASSERT("hidden_header_len_out != NULL", hidden_header_len_out != NULL); + + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + buffer = outbuf; + inptr = inbuf; + + if (inbuf_size < IP6_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < IP6_HLEN) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + /* Point to ip6 header and align copies of src/dest addresses. */ + ip6hdr = (struct ip6_hdr *)inptr; + ip_addr_copy_from_ip6_packed(ip6dst, ip6hdr->dest); + ip6_addr_assign_zone(ip_2_ip6(&ip6dst), IP6_UNKNOWN, netif); + ip_addr_copy_from_ip6_packed(ip6src, ip6hdr->src); + ip6_addr_assign_zone(ip_2_ip6(&ip6src), IP6_UNKNOWN, netif); + + /* Basic length of 6LowPAN header, set dispatch and clear fields. */ + lowpan6_header_len = 2; + buffer[0] = 0x60; + buffer[1] = 0; + + /* Determine whether there will be a Context Identifier Extension byte or not. + * If so, set it already. */ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + buffer[2] = 0; + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6src)); + if (i >= 0) { + /* Stateful source address compression. */ + buffer[1] |= 0x40; + buffer[2] |= (i & 0x0f) << 4; + } + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6dst)); + if (i >= 0) { + /* Stateful destination address compression. */ + buffer[1] |= 0x04; + buffer[2] |= i & 0x0f; + } + + if (buffer[2] != 0x00) { + /* Context identifier extension byte is appended. */ + buffer[1] |= 0x80; + lowpan6_header_len++; + } +#else /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + + /* Determine TF field: Traffic Class, Flow Label */ + if (IP6H_FL(ip6hdr) == 0) { + /* Flow label is elided. */ + buffer[0] |= 0x10; + if (IP6H_TC(ip6hdr) == 0) { + /* Traffic class (ECN+DSCP) elided too. */ + buffer[0] |= 0x08; + } else { + /* Traffic class (ECN+DSCP) appended. */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + } + } else { + if (((IP6H_TC(ip6hdr) & 0x3f) == 0)) { + /* DSCP portion of Traffic Class is elided, ECN and FL are appended (3 bytes) */ + buffer[0] |= 0x08; + + buffer[lowpan6_header_len] = IP6H_TC(ip6hdr) & 0xc0; + buffer[lowpan6_header_len++] |= (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } else { + /* Traffic class and flow label are appended (4 bytes) */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } + } + + /* Compress NH? + * Only if UDP for now. @todo support other NH compression. */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + buffer[0] |= 0x04; + } else { + /* append nexth. */ + buffer[lowpan6_header_len++] = IP6H_NEXTH(ip6hdr); + } + + /* Compress hop limit? */ + if (IP6H_HOPLIM(ip6hdr) == 255) { + buffer[0] |= 0x03; + } else if (IP6H_HOPLIM(ip6hdr) == 64) { + buffer[0] |= 0x02; + } else if (IP6H_HOPLIM(ip6hdr) == 1) { + buffer[0] |= 0x01; + } else { + /* append hop limit */ + buffer[lowpan6_header_len++] = IP6H_HOPLIM(ip6hdr); + } + + /* Compress source address */ + if (((buffer[1] & 0x40) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6src)))) { + /* Context-based or link-local source address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6src), src); + buffer[1] |= (i & 0x03) << 4; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 16, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 22, 2); + lowpan6_header_len += 2; + } + } else if (ip6_addr_isany(ip_2_ip6(&ip6src))) { + /* Special case: mark SAC and leave SAM=0 */ + buffer[1] |= 0x40; + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 8, 16); + lowpan6_header_len += 16; + } + + /* Compress destination address */ + if (ip6_addr_ismulticast(ip_2_ip6(&ip6dst))) { + /* @todo support stateful multicast address compression */ + + buffer[1] |= 0x08; + + i = lowpan6_get_address_mode_mc(ip_2_ip6(&ip6dst)); + buffer[1] |= i & 0x03; + if (i == 0) { + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } else if (i == 1) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 35, 5); + lowpan6_header_len += 5; + } else if (i == 2) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 37, 3); + lowpan6_header_len += 3; + } else if (i == 3) { + buffer[lowpan6_header_len++] = (inptr)[39]; + } + } else if (((buffer[1] & 0x04) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6dst)))) { + /* Context-based or link-local destination address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6dst), dst); + buffer[1] |= i & 0x03; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 32, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 38, 2); + lowpan6_header_len += 2; + } + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } + + /* Move to payload. */ + inptr += IP6_HLEN; + hidden_header_len += IP6_HLEN; + +#if LWIP_UDP + /* Compress UDP header? */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + /* @todo support optional checksum compression */ + + if (inbuf_size < IP6_HLEN + UDP_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < (size_t)(hidden_header_len + 7)) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + buffer[lowpan6_header_len] = 0xf0; + + /* determine port compression mode. */ + if ((inptr[0] == 0xf0) && ((inptr[1] & 0xf0) == 0xb0) && + (inptr[2] == 0xf0) && ((inptr[3] & 0xf0) == 0xb0)) { + /* Compress source and dest ports. */ + buffer[lowpan6_header_len++] |= 0x03; + buffer[lowpan6_header_len++] = ((inptr[1] & 0x0f) << 4) | (inptr[3] & 0x0f); + } else if (inptr[0] == 0xf0) { + /* Compress source port. */ + buffer[lowpan6_header_len++] |= 0x02; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } else if (inptr[2] == 0xf0) { + /* Compress dest port. */ + buffer[lowpan6_header_len++] |= 0x01; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[3]; + } else { + /* append full ports. */ + lowpan6_header_len++; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } + + /* elide length and copy checksum */ + buffer[lowpan6_header_len++] = inptr[6]; + buffer[lowpan6_header_len++] = inptr[7]; + + hidden_header_len += UDP_HLEN; + } +#endif /* LWIP_UDP */ + + *lowpan6_header_len_out = lowpan6_header_len; + *hidden_header_len_out = hidden_header_len; + + return ERR_OK; +} + +/** Decompress IPv6 and UDP headers compressed according to RFC 6282 + * + * @param lowpan6_buffer compressed headers, first byte is the dispatch byte + * @param lowpan6_bufsize size of lowpan6_buffer (may include data after headers) + * @param decomp_buffer buffer where the decompressed headers are stored + * @param decomp_bufsize size of decomp_buffer + * @param hdr_size_comp returns the size of the compressed headers (skip to get to data) + * @param hdr_size_decomp returns the size of the decompressed headers (IPv6 + UDP) + * @param datagram_size datagram size from fragments or 0 if unfragmented + * @param compressed_size compressed datagram size (for unfragmented rx) + * @param lowpan6_contexts context addresses + * @param src source address of the outer layer, used for address compression + * @param dest destination address of the outer layer, used for address compression + * @return ERR_OK if decompression succeeded, an error otherwise + */ +static err_t +lowpan6_decompress_hdr(u8_t *lowpan6_buffer, size_t lowpan6_bufsize, + u8_t *decomp_buffer, size_t decomp_bufsize, + u16_t *hdr_size_comp, u16_t *hdr_size_decomp, + u16_t datagram_size, u16_t compressed_size, + ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + u16_t lowpan6_offset; + struct ip6_hdr *ip6hdr; + s8_t i; + u32_t header_temp; + u16_t ip6_offset = IP6_HLEN; + + LWIP_ASSERT("lowpan6_buffer != NULL", lowpan6_buffer != NULL); + LWIP_ASSERT("decomp_buffer != NULL", decomp_buffer != NULL); + LWIP_ASSERT("src != NULL", src != NULL); + LWIP_ASSERT("dest != NULL", dest != NULL); + LWIP_ASSERT("hdr_size_comp != NULL", hdr_size_comp != NULL); + LWIP_ASSERT("dehdr_size_decompst != NULL", hdr_size_decomp != NULL); + + ip6hdr = (struct ip6_hdr *)decomp_buffer; + if (decomp_bufsize < IP6_HLEN) { + return ERR_MEM; + } + + /* output the full compressed packet, if set in @see lowpan6_opts.h */ +#if LWIP_LOWPAN6_IP_COMPRESSED_DEBUG + { + u16_t j; + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("lowpan6_decompress_hdr: IP6 payload (compressed): \n")); + for (j = 0; j < lowpan6_bufsize; j++) { + if ((j % 4) == 0) { + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("%2X ", lowpan6_buffer[j])); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\np->len: %d", lowpan6_bufsize)); + } +#endif + + /* offset for inline IP headers (RFC 6282 ch3)*/ + lowpan6_offset = 2; + /* if CID is set (context identifier), the context byte + * follows immediately after the header, so other IPHC fields are @+3 */ + if (lowpan6_buffer[1] & 0x80) { + lowpan6_offset++; + } + + /* Set IPv6 version, traffic class and flow label. (RFC6282, ch 3.1.1.)*/ + if ((lowpan6_buffer[0] & 0x18) == 0x00) { + header_temp = ((lowpan6_buffer[lowpan6_offset+1] & 0x0f) << 16) | \ + (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset+3]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 00, ECN: 0x%2x, Flowlabel+DSCP: 0x%8X\n", \ + lowpan6_buffer[lowpan6_offset],header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset], header_temp); + /* increase offset, processed 4 bytes here: + * TF=00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)*/ + lowpan6_offset += 4; + } else if ((lowpan6_buffer[0] & 0x18) == 0x08) { + header_temp = ((lowpan6_buffer[lowpan6_offset] & 0x0f) << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset+2]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 01, ECN: 0x%2x, Flowlabel: 0x%2X, DSCP ignored\n", \ + lowpan6_buffer[lowpan6_offset] & 0xc0,header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset] & 0xc0, header_temp); + /* increase offset, processed 3 bytes here: + * TF=01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided.*/ + lowpan6_offset += 3; + } else if ((lowpan6_buffer[0] & 0x18) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 10, DCSP+ECN: 0x%2x, Flowlabel ignored\n", lowpan6_buffer[lowpan6_offset])); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset],0); + /* increase offset, processed 1 byte here: + * ECN + DSCP (1 byte), Flow Label is elided.*/ + lowpan6_offset += 1; + } else if ((lowpan6_buffer[0] & 0x18) == 0x18) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 11, DCSP/ECN & Flowlabel ignored\n")); + /* don't increase offset, no bytes processed here */ + IP6H_VTCFL_SET(ip6hdr, 6, 0, 0); + } + + /* Set Next Header (NH) */ + if ((lowpan6_buffer[0] & 0x04) == 0x00) { + /* 0: full next header byte carried inline (increase offset)*/ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: 0x%2X\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_NEXTH_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else { + /* 1: NH compression, LOWPAN_NHC (RFC6282, ch 4.1) */ + /* We should fill this later with NHC decoding */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: skipped, later done with NHC\n")); + IP6H_NEXTH_SET(ip6hdr, 0); + } + + /* Set Hop Limit, either carried inline or 3 different hops (1,64,255) */ + if ((lowpan6_buffer[0] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: full value: %d\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_HOPLIM_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else if ((lowpan6_buffer[0] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 1\n")); + IP6H_HOPLIM_SET(ip6hdr, 1); + } else if ((lowpan6_buffer[0] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 64\n")); + IP6H_HOPLIM_SET(ip6hdr, 64); + } else if ((lowpan6_buffer[0] & 0x03) == 0x03) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 255\n")); + IP6H_HOPLIM_SET(ip6hdr, 255); + } + + /* Source address decoding. */ + if ((lowpan6_buffer[1] & 0x40) == 0x00) { + /* Source address compression (SAC) = 0 -> stateless compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 0, no context byte\n")); + /* Stateless compression */ + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, no src compression, fetching 128bits inline\n")); + /* copy full address, increase offset by 16 Bytes */ + MEMCPY(&ip6hdr->src.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x30) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, src compression, 64bits inline\n")); + /* set 64 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + /* copy 8 Bytes, increase offset */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, src compression, 16bits inline\n")); + /* set 96 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + /* extract remaining 16bits from inline bytes, increase offset */ + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | + lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, src compression, 0bits inline, using other headers\n")); + /* no information avalaible, using other layers, see RFC6282 ch 3.2.2 */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | + (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | + (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } else { + /* Source address compression (SAC) = 1 -> stateful/context-based compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 1, additional context byte\n")); + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + /* SAM=00, address=> :: (ANY) */ + ip6hdr->src.addr[0] = 0; + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = 0; + ip6hdr->src.addr[3] = 0; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, context compression, ANY (::)\n")); + } else { + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = (lowpan6_buffer[2] >> 4) & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->src.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->src.addr[1] = lowpan6_contexts[i].addr[1]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == xx, context compression found @%d: %8X, %8X\n", (int)i, ip6hdr->src.addr[0], ip6hdr->src.addr[1])); +#else + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif + } + + /* determine further address bits */ + if ((lowpan6_buffer[1] & 0x30) == 0x10) { + /* SAM=01, load additional 64bits */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, context compression, 64bits inline\n")); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + /* SAM=01, load additional 16bits */ + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, context compression, 16bits inline\n")); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + /* SAM=11, address is fully elided, load from other layers */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, context compression, 0bits inline, using other headers\n")); + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } + + /* Destination address decoding. */ + if (lowpan6_buffer[1] & 0x08) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("M=1: multicast\n")); + /* Multicast destination */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_DBG_ON,("DAC == 1, context multicast: unsupported!!!\n")); + /* @todo support stateful multicast addressing */ + return ERR_VAL; + } + + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + /* DAM = 00, copy full address (128bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline\n")); + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + /* DAM = 01, copy 4 bytes (32bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst address form (48bits): ffXX::00XX:XXXX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 24) | (lowpan6_buffer[lowpan6_offset + 1] << 16) | (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + /* DAM = 10, copy 3 bytes (24bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 10, dst address form (32bits): ffXX::00XX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM = 11, copy 1 byte (8bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 11, dst address form (8bits): ff02::00XX\n")); + ip6hdr->dest.addr[0] = PP_HTONL(0xff020000UL); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + } + + } else { + /* no Multicast (M=0) */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 1, stateful compression\n")); + /* Stateful destination compression */ + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = lowpan6_buffer[2] & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->dest.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->dest.addr[1] = lowpan6_contexts[i].addr[1]; +#endif + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 0, stateless compression, setting link local prefix\n")); + /* Link local address compression */ + ip6hdr->dest.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->dest.addr[1] = 0; + } + + /* M=0, DAC=0, determining destination address length via DAM=xx */ + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline")); + /* DAM=00, copy full address */ + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 64bits inline\n")); + /* DAM=01, copy 64 inline bits, increase offset */ + MEMCPY(&ip6hdr->dest.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 16bits inline\n")); + /* DAM=10, copy 16 inline bits, increase offset */ + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM=11, no bits available, use other headers (not done here) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG,("DAM == 01, dst compression, 0bits inline, using other headers\n")); + if (dest->addr_len == 2) { + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (dest->addr[0] << 8) | dest->addr[1]); + } else if (dest->addr_len == 8) { + ip6hdr->dest.addr[2] = lwip_htonl(((dest->addr[0] ^ 2) << 24) | (dest->addr[1] << 16) | dest->addr[2] << 8 | dest->addr[3]); + ip6hdr->dest.addr[3] = lwip_htonl((dest->addr[4] << 24) | (dest->addr[5] << 16) | dest->addr[6] << 8 | dest->addr[7]); + } else { + /* invalid destination address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid destination address length\n")); + return ERR_VAL; + } + } + } + + + /* Next Header Compression (NHC) decoding? */ + if (lowpan6_buffer[0] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC decoding\n")); +#if LWIP_UDP + if ((lowpan6_buffer[lowpan6_offset] & 0xf8) == 0xf0) { + /* NHC: UDP */ + struct udp_hdr *udphdr; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC: UDP\n")); + + /* UDP compression */ + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_UDP); + udphdr = (struct udp_hdr *)((u8_t *)decomp_buffer + ip6_offset); + if (decomp_bufsize < IP6_HLEN + UDP_HLEN) { + return ERR_MEM; + } + + /* Checksum decompression */ + if (lowpan6_buffer[lowpan6_offset] & 0x04) { + /* @todo support checksum decompress */ + LWIP_DEBUGF(LWIP_DBG_ON, ("NHC: UDP chechsum decompression UNSUPPORTED\n")); + return ERR_VAL; + } + + /* Decompress ports, according to RFC4944 */ + i = lowpan6_buffer[lowpan6_offset++] & 0x03; + if (i == 0) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 2] << 8 | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if (i == 0x01) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x02) { + udphdr->src = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 1] << 8 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x03) { + udphdr->src = lwip_htons(0xf0b0 | ((lowpan6_buffer[lowpan6_offset] >> 4) & 0x0f)); + udphdr->dest = lwip_htons(0xf0b0 | (lowpan6_buffer[lowpan6_offset] & 0x0f)); + lowpan6_offset += 1; + } + + udphdr->chksum = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + ip6_offset += UDP_HLEN; + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + udphdr->len = lwip_htons(datagram_size - IP6_HLEN); + + } else +#endif /* LWIP_UDP */ + { + LWIP_DEBUGF(LWIP_DBG_ON,("NHC: unsupported protocol!\n")); + /* @todo support NHC other than UDP */ + return ERR_VAL; + } + } + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + /* Infer IPv6 payload length for header */ + IP6H_PLEN_SET(ip6hdr, datagram_size - IP6_HLEN); + + if (lowpan6_offset > lowpan6_bufsize) { + /* input buffer overflow */ + return ERR_VAL; + } + *hdr_size_comp = lowpan6_offset; + *hdr_size_decomp = ip6_offset; + + return ERR_OK; +} + +struct pbuf * +lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + struct pbuf *q; + u16_t lowpan6_offset, ip6_offset; + err_t err; + +#if LWIP_UDP +#define UDP_HLEN_ALLOC UDP_HLEN +#else +#define UDP_HLEN_ALLOC 0 +#endif + + /* Allocate a buffer for decompression. This buffer will be too big and will be + trimmed once the final size is known. */ + q = pbuf_alloc(PBUF_IP, p->len + IP6_HLEN + UDP_HLEN_ALLOC, PBUF_POOL); + if (q == NULL) { + pbuf_free(p); + return NULL; + } + if (q->len < IP6_HLEN + UDP_HLEN_ALLOC) { + /* The headers need to fit into the first pbuf */ + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Decompress the IPv6 (and possibly UDP) header(s) into the new pbuf */ + err = lowpan6_decompress_hdr((u8_t *)p->payload, p->len, (u8_t *)q->payload, q->len, + &lowpan6_offset, &ip6_offset, datagram_size, p->tot_len, lowpan6_contexts, src, dest); + if (err != ERR_OK) { + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Now we copy leftover contents from p to q, so we have all L2 and L3 headers + (and L4?) in a single pbuf: */ + + /* Hide the compressed headers in p */ + pbuf_remove_header(p, lowpan6_offset); + /* Temporarily hide the headers in q... */ + pbuf_remove_header(q, ip6_offset); + /* ... copy the rest of p into q... */ + pbuf_copy(q, p); + /* ... and reveal the headers again... */ + pbuf_add_header_force(q, ip6_offset); + /* ... trim the pbuf to its correct size... */ + pbuf_realloc(q, ip6_offset + p->len); + /* ... and cat possibly remaining (data-only) pbufs */ + if (p->next != NULL) { + pbuf_cat(q, p->next); + } + /* the original (first) pbuf can now be freed */ + p->next = NULL; + pbuf_free(p); + + /* all done */ + return q; +} + +#endif /* LWIP_6LOWPAN_IPHC */ +#endif /* LWIP_IPV6 */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c new file mode 100644 index 000000000..c8673ad0f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c @@ -0,0 +1,2510 @@ +/* + * auth.c - PPP authentication and phase control. + * + * Copyright (c) 1993-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from main.c, which is: + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(_PATH_LASTLOG) && defined(__linux__) +#include +#endif + +#include +#include +#include + +#ifdef HAS_SHADOW +#include +#ifndef PW_PPP +#define PW_PPP PW_LOGIN +#endif +#endif + +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* ECP_SUPPORT */ +#include "netif/ppp/ipcp.h" +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CBCP_SUPPORT +#include "netif/ppp/cbcp.h" +#endif + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Bits in scan_authfile return value */ +#define NONWILD_SERVER 1 +#define NONWILD_CLIENT 2 + +#define ISWILD(word) (word[0] == '*' && word[1] == 0) +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* List of addresses which the peer may use. */ +static struct permitted_ip *addresses[NUM_PPP]; + +/* Wordlist giving addresses which the peer may use + without authenticating itself. */ +static struct wordlist *noauth_addrs; + +/* Remote telephone number, if available */ +char remote_number[MAXNAMELEN]; + +/* Wordlist giving remote telephone numbers which may connect. */ +static struct wordlist *permitted_numbers; + +/* Extra options to apply, from the secrets file entry for the peer. */ +static struct wordlist *extra_options; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Set if we require authentication only because we have a default route. */ +static bool default_auth; + +/* Hook to enable a plugin to control the idle time limit */ +int (*idle_time_hook) (struct ppp_idle *) = NULL; + +/* Hook for a plugin to say whether we can possibly authenticate any peer */ +int (*pap_check_hook) (void) = NULL; + +/* Hook for a plugin to check the PAP user and password */ +int (*pap_auth_hook) (char *user, char *passwd, char **msgp, + struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +/* Hook for a plugin to know about the PAP user logout */ +void (*pap_logout_hook) (void) = NULL; + +/* Hook for a plugin to get the PAP password for authenticating us */ +int (*pap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say if we can possibly authenticate a peer using CHAP */ +int (*chap_check_hook) (void) = NULL; + +/* Hook for a plugin to get the CHAP password for authenticating us */ +int (*chap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say whether it is OK if the peer + refuses to authenticate. */ +int (*null_auth_hook) (struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +int (*allowed_address_hook) (u32_t addr) = NULL; +#endif /* UNUSED */ + +#ifdef HAVE_MULTILINK +/* Hook for plugin to hear when an interface joins a multilink bundle */ +void (*multilink_join_hook) (void) = NULL; +#endif + +#if PPP_NOTIFY +/* A notifier for when the peer has authenticated itself, + and we are proceeding to the network phase. */ +struct notifier *auth_up_notifier = NULL; + +/* A notifier for when the link goes down. */ +struct notifier *link_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* + * Option variables. + */ +#if 0 /* MOVED TO ppp_settings */ +bool uselogin = 0; /* Use /etc/passwd for checking PAP */ +bool session_mgmt = 0; /* Do session management (login records) */ +bool cryptpap = 0; /* Passwords in pap-secrets are encrypted */ +bool refuse_pap = 0; /* Don't wanna auth. ourselves with PAP */ +bool refuse_chap = 0; /* Don't wanna auth. ourselves with CHAP */ +bool refuse_eap = 0; /* Don't wanna auth. ourselves with EAP */ +#if MSCHAP_SUPPORT +bool refuse_mschap = 0; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 0; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#else /* MSCHAP_SUPPORT */ +bool refuse_mschap = 1; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 1; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +bool usehostname = 0; /* Use hostname for our_name */ +bool auth_required = 0; /* Always require authentication from peer */ +bool allow_any_ip = 0; /* Allow peer to use any IP address */ +bool explicit_remote = 0; /* User specified explicit remote name */ +bool explicit_user = 0; /* Set if "user" option supplied */ +bool explicit_passwd = 0; /* Set if "password" option supplied */ +char remote_name[MAXNAMELEN]; /* Peer's name for authentication */ +static char *uafname; /* name of most recent +ua file */ + +extern char *crypt (const char *, const char *); +#endif /* UNUSED */ +/* Prototypes for procedures local to this file. */ + +static void network_phase(ppp_pcb *pcb); +#if PPP_IDLETIMELIMIT +static void check_idle(void *arg); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT +static void connect_time_expired(void *arg); +#endif /* PPP_MAXCONNECT */ +#if 0 /* UNUSED */ +static int null_login (int); +/* static int get_pap_passwd (char *); */ +static int have_pap_secret (int *); +static int have_chap_secret (char *, char *, int, int *); +static int have_srp_secret (char *client, char *server, int need_ip, + int *lacks_ipp); +static int ip_addr_check (u32_t, struct permitted_ip *); +static int scan_authfile (FILE *, char *, char *, char *, + struct wordlist **, struct wordlist **, + char *, int); +static void free_wordlist (struct wordlist *); +static void set_allowed_addrs (int, struct wordlist *, struct wordlist *); +static int some_ip_ok (struct wordlist *); +static int setupapfile (char **); +static int privgroup (char **); +static int set_noauth_addr (char **); +static int set_permitted_number (char **); +static void check_access (FILE *, char *); +static int wordlist_count (struct wordlist *); +#endif /* UNUSED */ + +#ifdef MAXOCTETS +static void check_maxoctets (void *); +#endif + +#if PPP_OPTIONS +/* + * Authentication-related options. + */ +option_t auth_options[] = { + { "auth", o_bool, &auth_required, + "Require authentication from peer", OPT_PRIO | 1 }, + { "noauth", o_bool, &auth_required, + "Don't require peer to authenticate", OPT_PRIOSUB | OPT_PRIV, + &allow_any_ip }, + { "require-pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_PRIOSUB | 1, &auth_required }, + { "+pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | 1, &auth_required }, + { "require-chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, + { "+chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, +#if MSCHAP_SUPPORT + { "require-mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "require-mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, +#endif /* MSCHAP_SUPPORT */ +#if 0 + { "refuse-pap", o_bool, &refuse_pap, + "Don't agree to auth to peer with PAP", 1 }, + { "-pap", o_bool, &refuse_pap, + "Don't allow PAP authentication with peer", OPT_ALIAS | 1 }, + { "refuse-chap", o_bool, &refuse_chap, + "Don't agree to auth to peer with CHAP", + OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, + { "-chap", o_bool, &refuse_chap, + "Don't allow CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#if MSCHAP_SUPPORT +#if 0 + { "refuse-mschap", o_bool, &refuse_mschap, + "Don't agree to auth to peer with MS-CHAP", + OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap", o_bool, &refuse_mschap, + "Don't allow MS-CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "refuse-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't agree to auth to peer with MS-CHAPv2", + OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't allow MS-CHAPv2 authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#endif /* MSCHAP_SUPPORT*/ +#if EAP_SUPPORT + { "require-eap", o_bool, &lcp_wantoptions[0].neg_eap, + "Require EAP authentication from peer", OPT_PRIOSUB | 1, + &auth_required }, +#if 0 + { "refuse-eap", o_bool, &refuse_eap, + "Don't agree to authenticate to peer with EAP", 1 }, +#endif +#endif /* EAP_SUPPORT */ + { "name", o_string, our_name, + "Set local name for authentication", + OPT_PRIO | OPT_PRIV | OPT_STATIC, NULL, MAXNAMELEN }, + + { "+ua", o_special, (void *)setupapfile, + "Get PAP user and password from file", + OPT_PRIO | OPT_A2STRVAL, &uafname }, + +#if 0 + { "user", o_string, user, + "Set name for auth with peer", OPT_PRIO | OPT_STATIC, + &explicit_user, MAXNAMELEN }, + + { "password", o_string, passwd, + "Password for authenticating us to the peer", + OPT_PRIO | OPT_STATIC | OPT_HIDE, + &explicit_passwd, MAXSECRETLEN }, +#endif + + { "usehostname", o_bool, &usehostname, + "Must use hostname for authentication", 1 }, + + { "remotename", o_string, remote_name, + "Set remote name for authentication", OPT_PRIO | OPT_STATIC, + &explicit_remote, MAXNAMELEN }, + + { "login", o_bool, &uselogin, + "Use system password database for PAP", OPT_A2COPY | 1 , + &session_mgmt }, + { "enable-session", o_bool, &session_mgmt, + "Enable session accounting for remote peers", OPT_PRIV | 1 }, + + { "papcrypt", o_bool, &cryptpap, + "PAP passwords are encrypted", 1 }, + + { "privgroup", o_special, (void *)privgroup, + "Allow group members to use privileged options", OPT_PRIV | OPT_A2LIST }, + + { "allow-ip", o_special, (void *)set_noauth_addr, + "Set IP address(es) which can be used without authentication", + OPT_PRIV | OPT_A2LIST }, + + { "remotenumber", o_string, remote_number, + "Set remote telephone number for authentication", OPT_PRIO | OPT_STATIC, + NULL, MAXNAMELEN }, + + { "allow-number", o_special, (void *)set_permitted_number, + "Set telephone number(s) which are allowed to connect", + OPT_PRIV | OPT_A2LIST }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * setupapfile - specifies UPAP info for authenticating with peer. + */ +static int +setupapfile(argv) + char **argv; +{ + FILE *ufile; + int l; + uid_t euid; + char u[MAXNAMELEN], p[MAXSECRETLEN]; + char *fname; + + lcp_allowoptions[0].neg_upap = 1; + + /* open user info file */ + fname = strdup(*argv); + if (fname == NULL) + novm("+ua file name"); + euid = geteuid(); + if (seteuid(getuid()) == -1) { + option_error("unable to reset uid before opening %s: %m", fname); + return 0; + } + ufile = fopen(fname, "r"); + if (seteuid(euid) == -1) + fatal("unable to regain privileges: %m"); + if (ufile == NULL) { + option_error("unable to open user login data file %s", fname); + return 0; + } + check_access(ufile, fname); + uafname = fname; + + /* get username */ + if (fgets(u, MAXNAMELEN - 1, ufile) == NULL + || fgets(p, MAXSECRETLEN - 1, ufile) == NULL) { + fclose(ufile); + option_error("unable to read user login data file %s", fname); + return 0; + } + fclose(ufile); + + /* get rid of newlines */ + l = strlen(u); + if (l > 0 && u[l-1] == '\n') + u[l-1] = 0; + l = strlen(p); + if (l > 0 && p[l-1] == '\n') + p[l-1] = 0; + + if (override_value("user", option_priority, fname)) { + strlcpy(ppp_settings.user, u, sizeof(ppp_settings.user)); + explicit_user = 1; + } + if (override_value("passwd", option_priority, fname)) { + strlcpy(ppp_settings.passwd, p, sizeof(ppp_settings.passwd)); + explicit_passwd = 1; + } + + return (1); +} + +/* + * privgroup - allow members of the group to have privileged access. + */ +static int +privgroup(argv) + char **argv; +{ + struct group *g; + int i; + + g = getgrnam(*argv); + if (g == 0) { + option_error("group %s is unknown", *argv); + return 0; + } + for (i = 0; i < ngroups; ++i) { + if (groups[i] == g->gr_gid) { + privileged = 1; + break; + } + } + return 1; +} + + +/* + * set_noauth_addr - set address(es) that can be used without authentication. + * Equivalent to specifying an entry like `"" * "" addr' in pap-secrets. + */ +static int +set_noauth_addr(argv) + char **argv; +{ + char *addr = *argv; + int l = strlen(addr) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-ip argument"); + wp->word = (char *) (wp + 1); + wp->next = noauth_addrs; + MEMCPY(wp->word, addr, l); + noauth_addrs = wp; + return 1; +} + + +/* + * set_permitted_number - set remote telephone number(s) that may connect. + */ +static int +set_permitted_number(argv) + char **argv; +{ + char *number = *argv; + int l = strlen(number) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-number argument"); + wp->word = (char *) (wp + 1); + wp->next = permitted_numbers; + MEMCPY(wp->word, number, l); + permitted_numbers = wp; + return 1; +} +#endif + +/* + * An Open on LCP has requested a change from Dead to Establish phase. + */ +void link_required(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); +} + +#if 0 +/* + * Bring the link up to the point of being able to do ppp. + */ +void start_link(unit) + int unit; +{ + ppp_pcb *pcb = &ppp_pcb_list[unit]; + char *msg; + + status = EXIT_NEGOTIATION_FAILED; + new_phase(pcb, PPP_PHASE_SERIALCONN); + + hungup = 0; + devfd = the_channel->connect(); + msg = "Connect script failed"; + if (devfd < 0) + goto fail; + + /* set up the serial device as a ppp interface */ + /* + * N.B. we used to do tdb_writelock/tdb_writeunlock around this + * (from establish_ppp to set_ifunit). However, we won't be + * doing the set_ifunit in multilink mode, which is the only time + * we need the atomicity that the tdb_writelock/tdb_writeunlock + * gives us. Thus we don't need the tdb_writelock/tdb_writeunlock. + */ + fd_ppp = the_channel->establish_ppp(devfd); + msg = "ppp establishment failed"; + if (fd_ppp < 0) { + status = EXIT_FATAL_ERROR; + goto disconnect; + } + + if (!demand && ifunit >= 0) + set_ifunit(1); + + /* + * Start opening the connection and wait for + * incoming events (reply, timeout, etc.). + */ + if (ifunit >= 0) + ppp_notice("Connect: %s <--> %s", ifname, ppp_devnam); + else + ppp_notice("Starting negotiation on %s", ppp_devnam); + add_fd(fd_ppp); + + new_phase(pcb, PPP_PHASE_ESTABLISH); + + lcp_lowerup(pcb); + return; + + disconnect: + new_phase(pcb, PPP_PHASE_DISCONNECT); + if (the_channel->disconnect) + the_channel->disconnect(); + + fail: + new_phase(pcb, PPP_PHASE_DEAD); + if (the_channel->cleanup) + (*the_channel->cleanup)(); +} +#endif + +/* + * LCP has terminated the link; go to the Dead phase and take the + * physical layer down. + */ +void link_terminated(ppp_pcb *pcb) { + if (pcb->phase == PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + || pcb->phase == PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + return; + new_phase(pcb, PPP_PHASE_DISCONNECT); + +#if 0 /* UNUSED */ + if (pap_logout_hook) { + pap_logout_hook(); + } + session_end(devnam); +#endif /* UNUSED */ + + if (!doing_multilink) { + ppp_notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + } else + ppp_notice("Link terminated."); + + lcp_lowerdown(pcb); + + ppp_link_terminated(pcb); +#if 0 + /* + * Delete pid files before disestablishing ppp. Otherwise it + * can happen that another pppd gets the same unit and then + * we delete its pid file. + */ + if (!doing_multilink && !demand) + remove_pidfiles(); + + /* + * If we may want to bring the link up again, transfer + * the ppp unit back to the loopback. Set the + * real serial device back to its normal mode of operation. + */ + if (fd_ppp >= 0) { + remove_fd(fd_ppp); + clean_check(); + the_channel->disestablish_ppp(devfd); + if (doing_multilink) + mp_exit_bundle(); + fd_ppp = -1; + } + if (!hungup) + lcp_lowerdown(pcb); + if (!doing_multilink && !demand) + script_unsetenv("IFNAME"); + + /* + * Run disconnector script, if requested. + * XXX we may not be able to do this if the line has hung up! + */ + if (devfd >= 0 && the_channel->disconnect) { + the_channel->disconnect(); + devfd = -1; + } + if (the_channel->cleanup) + (*the_channel->cleanup)(); + + if (doing_multilink && multilink_master) { + if (!bundle_terminating) + new_phase(pcb, PPP_PHASE_MASTER); + else + mp_bundle_terminated(); + } else + new_phase(pcb, PPP_PHASE_DEAD); +#endif +} + +/* + * LCP has gone down; it will either die or try to re-establish. + */ +void link_down(ppp_pcb *pcb) { +#if PPP_NOTIFY + notify(link_down_notifier, 0); +#endif /* PPP_NOTIFY */ + + if (!doing_multilink) { + upper_layers_down(pcb); + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_ESTABLISH); + } + /* XXX if doing_multilink, should do something to stop + network-layer traffic on the link */ +} + +void upper_layers_down(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol != PPP_LCP && protp->lowerdown != NULL) + (*protp->lowerdown)(pcb); + if (protp->protocol < 0xC000 && protp->close != NULL) + (*protp->close)(pcb, "LCP down"); + } + pcb->num_np_open = 0; + pcb->num_np_up = 0; +} + +/* + * The link is established. + * Proceed to the Dead, Authenticate or Network phase as appropriate. + */ +void link_established(ppp_pcb *pcb) { +#if PPP_AUTH_SUPPORT + int auth; +#if PPP_SERVER +#if PAP_SUPPORT + lcp_options *wo = &pcb->lcp_wantoptions; +#endif /* PAP_SUPPORT */ + lcp_options *go = &pcb->lcp_gotoptions; +#endif /* PPP_SERVER */ + lcp_options *ho = &pcb->lcp_hisoptions; +#endif /* PPP_AUTH_SUPPORT */ + int i; + const struct protent *protp; + + /* + * Tell higher-level protocols that LCP is up. + */ + if (!doing_multilink) { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol != PPP_LCP + && protp->lowerup != NULL) + (*protp->lowerup)(pcb); + } + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +#if PPP_ALLOWED_ADDRS + if (!auth_required && noauth_addrs != NULL) + set_allowed_addrs(unit, NULL, NULL); +#endif /* PPP_ALLOWED_ADDRS */ + + if (pcb->settings.auth_required && !(0 +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + )) { + +#if PPP_ALLOWED_ADDRS + /* + * We wanted the peer to authenticate itself, and it refused: + * if we have some address(es) it can use without auth, fine, + * otherwise treat it as though it authenticated with PAP using + * a username of "" and a password of "". If that's not OK, + * boot it out. + */ + if (noauth_addrs != NULL) { + set_allowed_addrs(unit, NULL, NULL); + } else +#endif /* PPP_ALLOWED_ADDRS */ + if (!pcb->settings.null_login +#if PAP_SUPPORT + || !wo->neg_upap +#endif /* PAP_SUPPORT */ + ) { + ppp_warn("peer refused to authenticate: terminating link"); +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "peer refused to authenticate"); + return; + } + } +#endif /* PPP_SERVER */ + + new_phase(pcb, PPP_PHASE_AUTHENTICATE); + auth = 0; +#if PPP_SERVER +#if EAP_SUPPORT + if (go->neg_eap) { + eap_authpeer(pcb, PPP_OUR_NAME); + auth |= EAP_PEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (go->neg_chap) { + chap_auth_peer(pcb, PPP_OUR_NAME, CHAP_DIGEST(go->chap_mdtype)); + auth |= CHAP_PEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (go->neg_upap) { + upap_authpeer(pcb); + auth |= PAP_PEER; + } else +#endif /* PAP_SUPPORT */ + {} +#endif /* PPP_SERVER */ + +#if EAP_SUPPORT + if (ho->neg_eap) { + eap_authwithpeer(pcb, pcb->settings.user); + auth |= EAP_WITHPEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ho->neg_chap) { + chap_auth_with_peer(pcb, pcb->settings.user, CHAP_DIGEST(ho->chap_mdtype)); + auth |= CHAP_WITHPEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (ho->neg_upap) { + upap_authwithpeer(pcb, pcb->settings.user, pcb->settings.passwd); + auth |= PAP_WITHPEER; + } else +#endif /* PAP_SUPPORT */ + {} + + pcb->auth_pending = auth; + pcb->auth_done = 0; + + if (!auth) +#endif /* PPP_AUTH_SUPPORT */ + network_phase(pcb); +} + +/* + * Proceed to the network phase. + */ +static void network_phase(ppp_pcb *pcb) { +#if CBCP_SUPPORT + ppp_pcb *pcb = &ppp_pcb_list[unit]; +#endif +#if 0 /* UNUSED */ + lcp_options *go = &lcp_gotoptions[unit]; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ + /* Log calling number. */ + if (*remote_number) + ppp_notice("peer from calling number %q authorized", remote_number); +#endif /* UNUSED */ + +#if PPP_NOTIFY + /* + * If the peer had to authenticate, notify it now. + */ + if (0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) { + notify(auth_up_notifier, 0); + } +#endif /* PPP_NOTIFY */ + +#if CBCP_SUPPORT + /* + * If we negotiated callback, do it now. + */ + if (go->neg_cbcp) { + new_phase(pcb, PPP_PHASE_CALLBACK); + (*cbcp_protent.open)(pcb); + return; + } +#endif + +#if PPP_OPTIONS + /* + * Process extra options from the secrets file + */ + if (extra_options) { + options_from_list(extra_options, 1); + free_wordlist(extra_options); + extra_options = 0; + } +#endif /* PPP_OPTIONS */ + start_networks(pcb); +} + +void start_networks(ppp_pcb *pcb) { +#if CCP_SUPPORT || ECP_SUPPORT + int i; + const struct protent *protp; +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + new_phase(pcb, PPP_PHASE_NETWORK); + +#ifdef HAVE_MULTILINK + if (multilink) { + if (mp_join_bundle()) { + if (multilink_join_hook) + (*multilink_join_hook)(); + if (updetach && !nodetach) + detach(); + return; + } + } +#endif /* HAVE_MULTILINK */ + +#ifdef PPP_FILTER + if (!demand) + set_filters(&pass_filter, &active_filter); +#endif +#if CCP_SUPPORT || ECP_SUPPORT + /* Start CCP and ECP */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if ( + (0 +#if ECP_SUPPORT + || protp->protocol == PPP_ECP +#endif /* ECP_SUPPORT */ +#if CCP_SUPPORT + || protp->protocol == PPP_CCP +#endif /* CCP_SUPPORT */ + ) + && protp->open != NULL) + (*protp->open)(pcb); +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + /* + * Bring up other network protocols iff encryption is not required. + */ + if (1 +#if ECP_SUPPORT + && !ecp_gotoptions[unit].required +#endif /* ECP_SUPPORT */ +#if MPPE_SUPPORT + && !pcb->ccp_gotoptions.mppe +#endif /* MPPE_SUPPORT */ + ) + continue_networks(pcb); +} + +void continue_networks(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + /* + * Start the "real" network protocols. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol < 0xC000 +#if CCP_SUPPORT + && protp->protocol != PPP_CCP +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + && protp->protocol != PPP_ECP +#endif /* ECP_SUPPORT */ + && protp->open != NULL) { + (*protp->open)(pcb); + ++pcb->num_np_open; + } + + if (pcb->num_np_open == 0) + /* nothing to do */ + lcp_close(pcb, "No network protocols running"); +} + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +/* + * auth_check_passwd - Check the user name and passwd against configuration. + * + * returns: + * 0: Authentication failed. + * 1: Authentication succeeded. + * In either case, msg points to an appropriate message and msglen to the message len. + */ +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen) { + int secretuserlen; + int secretpasswdlen; + + if (pcb->settings.user && pcb->settings.passwd) { + secretuserlen = (int)strlen(pcb->settings.user); + secretpasswdlen = (int)strlen(pcb->settings.passwd); + if (secretuserlen == userlen + && secretpasswdlen == passwdlen + && !memcmp(auser, pcb->settings.user, userlen) + && !memcmp(apasswd, pcb->settings.passwd, passwdlen) ) { + *msg = "Login ok"; + *msglen = sizeof("Login ok")-1; + return 1; + } + } + + *msg = "Login incorrect"; + *msglen = sizeof("Login incorrect")-1; + return 0; +} + +/* + * The peer has failed to authenticate himself using `protocol'. + */ +void auth_peer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * Authentication failure: take the link down + */ +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Authentication failed"); +} + +/* + * The peer has been successfully authenticated using `protocol'. + */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen) { + int bit; +#ifndef HAVE_MULTILINK + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(namelen); +#endif /* HAVE_MULTILINK */ + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_PEER; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_PEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_PEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_PEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_PEER; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_PEER; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_peer_success: unknown protocol %x", protocol); + return; + } + +#ifdef HAVE_MULTILINK + /* + * Save the authenticated name of the peer for later. + */ + if (namelen > (int)sizeof(pcb->peer_authname) - 1) + namelen = (int)sizeof(pcb->peer_authname) - 1; + MEMCPY(pcb->peer_authname, name, namelen); + pcb->peer_authname[namelen] = 0; +#endif /* HAVE_MULTILINK */ +#if 0 /* UNUSED */ + script_setenv("PEERNAME", , 0); +#endif /* UNUSED */ + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still to be done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_SERVER */ + +/* + * We have failed to authenticate ourselves to the peer using `protocol'. + */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * We've failed to authenticate ourselves to our peer. + * + * Some servers keep sending CHAP challenges, but there + * is no point in persisting without any way to get updated + * authentication secrets. + * + * He'll probably take the link down, and there's not much + * we can do except wait for that. + */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Failed to authenticate ourselves to peer"); +} + +/* + * We have successfully authenticated ourselves with the peer using `protocol'. + */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor) { + int bit; + const char *prot = ""; + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_WITHPEER; + prot = "CHAP"; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_WITHPEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_WITHPEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_WITHPEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_WITHPEER; + prot = "PAP"; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_WITHPEER; + prot = "EAP"; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_withpeer_success: unknown protocol %x", protocol); + bit = 0; + /* no break */ + } + + ppp_notice("%s authentication succeeded", prot); + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still being done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_AUTH_SUPPORT */ + + +/* + * np_up - a network protocol has come up. + */ +void np_up(ppp_pcb *pcb, int proto) { +#if PPP_IDLETIMELIMIT + int tlim; +#endif /* PPP_IDLETIMELIMIT */ + LWIP_UNUSED_ARG(proto); + + if (pcb->num_np_up == 0) { + /* + * At this point we consider that the link has come up successfully. + */ + new_phase(pcb, PPP_PHASE_RUNNING); + +#if PPP_IDLETIMELIMIT +#if 0 /* UNUSED */ + if (idle_time_hook != 0) + tlim = (*idle_time_hook)(NULL); + else +#endif /* UNUSED */ + tlim = pcb->settings.idle_time_limit; + if (tlim > 0) + TIMEOUT(check_idle, (void*)pcb, tlim); +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT + /* + * Set a timeout to close the connection once the maximum + * connect time has expired. + */ + if (pcb->settings.maxconnect > 0) + TIMEOUT(connect_time_expired, (void*)pcb, pcb->settings.maxconnect); +#endif /* PPP_MAXCONNECT */ + +#ifdef MAXOCTETS + if (maxoctets > 0) + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); +#endif + +#if 0 /* Unused */ + /* + * Detach now, if the updetach option was given. + */ + if (updetach && !nodetach) + detach(); +#endif /* Unused */ + } + ++pcb->num_np_up; +} + +/* + * np_down - a network protocol has gone down. + */ +void np_down(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_up == 0) { +#if PPP_IDLETIMELIMIT + UNTIMEOUT(check_idle, (void*)pcb); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + UNTIMEOUT(connect_time_expired, NULL); +#endif /* PPP_MAXCONNECT */ +#ifdef MAXOCTETS + UNTIMEOUT(check_maxoctets, NULL); +#endif + new_phase(pcb, PPP_PHASE_NETWORK); + } +} + +/* + * np_finished - a network protocol has finished using the link. + */ +void np_finished(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_open <= 0) { + /* no further use for the link: shut up shop. */ + lcp_close(pcb, "No network protocols running"); + } +} + +#ifdef MAXOCTETS +static void +check_maxoctets(arg) + void *arg; +{ +#if PPP_STATS_SUPPORT + unsigned int used; + + update_link_stats(ifunit); + link_stats_valid=0; + + switch(maxoctets_dir) { + case PPP_OCTETS_DIRECTION_IN: + used = link_stats.bytes_in; + break; + case PPP_OCTETS_DIRECTION_OUT: + used = link_stats.bytes_out; + break; + case PPP_OCTETS_DIRECTION_MAXOVERAL: + case PPP_OCTETS_DIRECTION_MAXSESSION: + used = (link_stats.bytes_in > link_stats.bytes_out) ? link_stats.bytes_in : link_stats.bytes_out; + break; + default: + used = link_stats.bytes_in+link_stats.bytes_out; + break; + } + if (used > maxoctets) { + ppp_notice("Traffic limit reached. Limit: %u Used: %u", maxoctets, used); + status = EXIT_TRAFFIC_LIMIT; + lcp_close(pcb, "Traffic limit"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); + } +#endif /* PPP_STATS_SUPPORT */ +} +#endif /* MAXOCTETS */ + +#if PPP_IDLETIMELIMIT +/* + * check_idle - check whether the link has been idle for long + * enough that we can shut it down. + */ +static void check_idle(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct ppp_idle idle; + time_t itime; + int tlim; + + if (!get_idle_time(pcb, &idle)) + return; +#if 0 /* UNUSED */ + if (idle_time_hook != 0) { + tlim = idle_time_hook(&idle); + } else { +#endif /* UNUSED */ + itime = LWIP_MIN(idle.xmit_idle, idle.recv_idle); + tlim = pcb->settings.idle_time_limit - itime; +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ + if (tlim <= 0) { + /* link is idle: shut it down. */ + ppp_notice("Terminating connection due to lack of activity."); + pcb->err_code = PPPERR_IDLETIMEOUT; + lcp_close(pcb, "Link inactive"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_idle, (void*)pcb, tlim); + } +} +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT +/* + * connect_time_expired - log a message and close the connection. + */ +static void connect_time_expired(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + ppp_info("Connect time expired"); + pcb->err_code = PPPERR_CONNECTTIME; + lcp_close(pcb, "Connect time expired"); /* Close connection */ +} +#endif /* PPP_MAXCONNECT */ + +#if PPP_OPTIONS +/* + * auth_check_options - called to check authentication options. + */ +void +auth_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + int can_auth; + int lacks_ip; + + /* Default our_name to hostname, and user to our_name */ + if (our_name[0] == 0 || usehostname) + strlcpy(our_name, hostname, sizeof(our_name)); + /* If a blank username was explicitly given as an option, trust + the user and don't use our_name */ + if (ppp_settings.user[0] == 0 && !explicit_user) + strlcpy(ppp_settings.user, our_name, sizeof(ppp_settings.user)); + + /* + * If we have a default route, require the peer to authenticate + * unless the noauth option was given or the real user is root. + */ + if (!auth_required && !allow_any_ip && have_route_to(0) && !privileged) { + auth_required = 1; + default_auth = 1; + } + +#if CHAP_SUPPORT + /* If we selected any CHAP flavors, we should probably negotiate it. :-) */ + if (wo->chap_mdtype) + wo->neg_chap = 1; +#endif /* CHAP_SUPPORT */ + + /* If authentication is required, ask peer for CHAP, PAP, or EAP. */ + if (auth_required) { + allow_any_ip = 0; + if (1 +#if CHAP_SUPPORT + && !wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + && !wo->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + && !wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { +#if CHAP_SUPPORT + wo->neg_chap = CHAP_MDTYPE_SUPPORTED != MDTYPE_NONE; + wo->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 1; +#endif /* EAP_SUPPORT */ + } + } else { +#if CHAP_SUPPORT + wo->neg_chap = 0; + wo->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + /* + * Check whether we have appropriate secrets to use + * to authenticate the peer. Note that EAP can authenticate by way + * of a CHAP-like exchanges as well as SRP. + */ + lacks_ip = 0; +#if PAP_SUPPORT + can_auth = wo->neg_upap && (uselogin || have_pap_secret(&lacks_ip)); +#else + can_auth = 0; +#endif /* PAP_SUPPORT */ + if (!can_auth && (0 +#if CHAP_SUPPORT + || wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || wo->neg_eap +#endif /* EAP_SUPPORT */ + )) { +#if CHAP_SUPPORT + can_auth = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); +#else + can_auth = 0; +#endif + } + if (!can_auth +#if EAP_SUPPORT + && wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { + can_auth = have_srp_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); + } + + if (auth_required && !can_auth && noauth_addrs == NULL) { + if (default_auth) { + option_error( +"By default the remote system is required to authenticate itself"); + option_error( +"(because this system has a default route to the internet)"); + } else if (explicit_remote) + option_error( +"The remote system (%s) is required to authenticate itself", + remote_name); + else + option_error( +"The remote system is required to authenticate itself"); + option_error( +"but I couldn't find any suitable secret (password) for it to use to do so."); + if (lacks_ip) + option_error( +"(None of the available passwords would let it use an IP address.)"); + + exit(1); + } + + /* + * Early check for remote number authorization. + */ + if (!auth_number()) { + ppp_warn("calling number %q is not authorized", remote_number); + exit(EXIT_CNID_AUTH_FAILED); + } +} +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * auth_reset - called when LCP is starting negotiations to recheck + * authentication options, i.e. whether we have appropriate secrets + * to use for authenticating ourselves and/or the peer. + */ +void +auth_reset(unit) + int unit; +{ + lcp_options *go = &lcp_gotoptions[unit]; + lcp_options *ao = &lcp_allowoptions[unit]; + int hadchap; + + hadchap = -1; + ao->neg_upap = !refuse_pap && (passwd[0] != 0 || get_pap_passwd(NULL)); + ao->neg_chap = (!refuse_chap || !refuse_mschap || !refuse_mschap_v2) + && (passwd[0] != 0 || + (hadchap = have_chap_secret(user, (explicit_remote? remote_name: + NULL), 0, NULL))); + ao->neg_eap = !refuse_eap && ( + passwd[0] != 0 || + (hadchap == 1 || (hadchap == -1 && have_chap_secret(user, + (explicit_remote? remote_name: NULL), 0, NULL))) || + have_srp_secret(user, (explicit_remote? remote_name: NULL), 0, NULL)); + + hadchap = -1; + if (go->neg_upap && !uselogin && !have_pap_secret(NULL)) + go->neg_upap = 0; + if (go->neg_chap) { + if (!(hadchap = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, NULL))) + go->neg_chap = 0; + } + if (go->neg_eap && + (hadchap == 0 || (hadchap == -1 && + !have_chap_secret((explicit_remote? remote_name: NULL), our_name, + 1, NULL))) && + !have_srp_secret((explicit_remote? remote_name: NULL), our_name, 1, + NULL)) + go->neg_eap = 0; +} + +/* + * check_passwd - Check the user name and passwd against the PAP secrets + * file. If requested, also check against the system password database, + * and login the user if OK. + * + * returns: + * UPAP_AUTHNAK: Authentication failed. + * UPAP_AUTHACK: Authentication succeeded. + * In either case, msg points to an appropriate message. + */ +int +check_passwd(unit, auser, userlen, apasswd, passwdlen, msg) + int unit; + char *auser; + int userlen; + char *apasswd; + int passwdlen; + char **msg; +{ + return UPAP_AUTHNAK; + int ret; + char *filename; + FILE *f; + struct wordlist *addrs = NULL, *opts = NULL; + char passwd[256], user[256]; + char secret[MAXWORDLEN]; + static int attempts = 0; + + /* + * Make copies of apasswd and auser, then null-terminate them. + * If there are unprintable characters in the password, make + * them visible. + */ + slprintf(ppp_settings.passwd, sizeof(ppp_settings.passwd), "%.*v", passwdlen, apasswd); + slprintf(ppp_settings.user, sizeof(ppp_settings.user), "%.*v", userlen, auser); + *msg = ""; + + /* + * Check if a plugin wants to handle this. + */ + if (pap_auth_hook) { + ret = (*pap_auth_hook)(ppp_settings.user, ppp_settings.passwd, msg, &addrs, &opts); + if (ret >= 0) { + /* note: set_allowed_addrs() saves opts (but not addrs): + don't free it! */ + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + return ret? UPAP_AUTHACK: UPAP_AUTHNAK; + } + } + + /* + * Open the file of pap secrets and scan for a suitable secret + * for authenticating this user. + */ + filename = _PATH_UPAPFILE; + addrs = opts = NULL; + ret = UPAP_AUTHNAK; + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open PAP password file %s: %m", filename); + + } else { + check_access(f, filename); + if (scan_authfile(f, ppp_settings.user, our_name, secret, &addrs, &opts, filename, 0) < 0) { + ppp_warn("no PAP secret found for %s", user); + } else { + /* + * If the secret is "@login", it means to check + * the password against the login database. + */ + int login_secret = strcmp(secret, "@login") == 0; + ret = UPAP_AUTHACK; + if (uselogin || login_secret) { + /* login option or secret is @login */ + if (session_full(ppp_settings.user, ppp_settings.passwd, devnam, msg) == 0) { + ret = UPAP_AUTHNAK; + } + } else if (session_mgmt) { + if (session_check(ppp_settings.user, NULL, devnam, NULL) == 0) { + ppp_warn("Peer %q failed PAP Session verification", user); + ret = UPAP_AUTHNAK; + } + } + if (secret[0] != 0 && !login_secret) { + /* password given in pap-secrets - must match */ + if ((cryptpap || strcmp(ppp_settings.passwd, secret) != 0) + && strcmp(crypt(ppp_settings.passwd, secret), secret) != 0) + ret = UPAP_AUTHNAK; + } + } + fclose(f); + } + + if (ret == UPAP_AUTHNAK) { + if (**msg == 0) + *msg = "Login incorrect"; + /* + * XXX can we ever get here more than once?? + * Frustrate passwd stealer programs. + * Allow 10 tries, but start backing off after 3 (stolen from login). + * On 10'th, drop the connection. + */ + if (attempts++ >= 10) { + ppp_warn("%d LOGIN FAILURES ON %s, %s", attempts, devnam, user); + lcp_close(pcb, "login failed"); + } + if (attempts > 3) + sleep((u_int) (attempts - 3) * 5); + if (opts != NULL) + free_wordlist(opts); + + } else { + attempts = 0; /* Reset count */ + if (**msg == 0) + *msg = "Login ok"; + set_allowed_addrs(unit, addrs, opts); + } + + if (addrs != NULL) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + BZERO(secret, sizeof(secret)); + + return ret; +} + +/* + * null_login - Check if a username of "" and a password of "" are + * acceptable, and iff so, set the list of acceptable IP addresses + * and return 1. + */ +static int +null_login(unit) + int unit; +{ + char *filename; + FILE *f; + int i, ret; + struct wordlist *addrs, *opts; + char secret[MAXWORDLEN]; + + /* + * Check if a plugin wants to handle this. + */ + ret = -1; + if (null_auth_hook) + ret = (*null_auth_hook)(&addrs, &opts); + + /* + * Open the file of pap secrets and scan for a suitable secret. + */ + if (ret <= 0) { + filename = _PATH_UPAPFILE; + addrs = NULL; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + + i = scan_authfile(f, "", our_name, secret, &addrs, &opts, filename, 0); + ret = i >= 0 && secret[0] == 0; + BZERO(secret, sizeof(secret)); + fclose(f); + } + + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + + return ret; +} + +/* + * get_pap_passwd - get a password for authenticating ourselves with + * our peer using PAP. Returns 1 on success, 0 if no suitable password + * could be found. + * Assumes passwd points to MAXSECRETLEN bytes of space (if non-null). + */ +static int +get_pap_passwd(passwd) + char *passwd; +{ + char *filename; + FILE *f; + int ret; + char secret[MAXWORDLEN]; + + /* + * Check whether a plugin wants to supply this. + */ + if (pap_passwd_hook) { + ret = (*pap_passwd_hook)(ppp_settings,user, ppp_settings.passwd); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + ret = scan_authfile(f, user, + (remote_name[0]? remote_name: NULL), + secret, NULL, NULL, filename, 0); + fclose(f); + if (ret < 0) + return 0; + if (passwd != NULL) + strlcpy(passwd, secret, MAXSECRETLEN); + BZERO(secret, sizeof(secret)); + return 1; +} + +/* + * have_pap_secret - check whether we have a PAP file with any + * secrets that we could possibly use for authenticating the peer. + */ +static int +have_pap_secret(lacks_ipp) + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + /* let the plugin decide, if there is one */ + if (pap_check_hook) { + ret = (*pap_check_hook)(); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + ret = scan_authfile(f, (explicit_remote? remote_name: NULL), our_name, + NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_chap_secret - check whether we have a CHAP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_chap_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + if (chap_check_hook) { + ret = (*chap_check_hook)(); + if (ret >= 0) { + return ret; + } + } + + filename = _PATH_CHAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_srp_secret - check whether we have a SRP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_srp_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + filename = _PATH_SRPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} +#endif /* UNUSED */ + +#if PPP_AUTH_SUPPORT +/* + * get_secret - open the CHAP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server) { + int len; + LWIP_UNUSED_ARG(server); + LWIP_UNUSED_ARG(am_server); + + if (!client || !client[0] || !pcb->settings.user || !pcb->settings.passwd || strcmp(client, pcb->settings.user)) { + return 0; + } + + len = (int)strlen(pcb->settings.passwd); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + + MEMCPY(secret, pcb->settings.passwd, len); + *secret_len = len; + return 1; + +#if 0 /* UNUSED */ + FILE *f; + int ret, len; + char *filename; + struct wordlist *addrs, *opts; + char secbuf[MAXWORDLEN]; + struct wordlist *addrs; + addrs = NULL; + + if (!am_server && ppp_settings.passwd[0] != 0) { + strlcpy(secbuf, ppp_settings.passwd, sizeof(secbuf)); + } else if (!am_server && chap_passwd_hook) { + if ( (*chap_passwd_hook)(client, secbuf) < 0) { + ppp_error("Unable to obtain CHAP password for %s on %s from plugin", + client, server); + return 0; + } + } else { + filename = _PATH_CHAPFILE; + addrs = NULL; + secbuf[0] = 0; + + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open chap secret file %s: %m", filename); + return 0; + } + check_access(f, filename); + + ret = scan_authfile(f, client, server, secbuf, &addrs, &opts, filename, 0); + fclose(f); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + } + + len = strlen(secbuf); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + MEMCPY(secret, secbuf, len); + BZERO(secbuf, sizeof(secbuf)); + *secret_len = len; + + return 1; +#endif /* UNUSED */ +} +#endif /* PPP_AUTH_SUPPORT */ + + +#if 0 /* UNUSED */ +/* + * get_srp_secret - open the SRP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int +get_srp_secret(unit, client, server, secret, am_server) + int unit; + char *client; + char *server; + char *secret; + int am_server; +{ + FILE *fp; + int ret; + char *filename; + struct wordlist *addrs, *opts; + + if (!am_server && ppp_settings.passwd[0] != '\0') { + strlcpy(secret, ppp_settings.passwd, MAXWORDLEN); + } else { + filename = _PATH_SRPFILE; + addrs = NULL; + + fp = fopen(filename, "r"); + if (fp == NULL) { + ppp_error("Can't open srp secret file %s: %m", filename); + return 0; + } + check_access(fp, filename); + + secret[0] = '\0'; + ret = scan_authfile(fp, client, server, secret, &addrs, &opts, + filename, am_server); + fclose(fp); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != NULL) + free_wordlist(opts); + if (addrs != NULL) + free_wordlist(addrs); + } + + return 1; +} + +/* + * set_allowed_addrs() - set the list of allowed addresses. + * Also looks for `--' indicating options to apply for this peer + * and leaves the following words in extra_options. + */ +static void +set_allowed_addrs(unit, addrs, opts) + int unit; + struct wordlist *addrs; + struct wordlist *opts; +{ + int n; + struct wordlist *ap, **plink; + struct permitted_ip *ip; + char *ptr_word, *ptr_mask; + struct hostent *hp; + struct netent *np; + u32_t a, mask, ah, offset; + struct ipcp_options *wo = &ipcp_wantoptions[unit]; + u32_t suggested_ip = 0; + + if (addresses[unit] != NULL) + free(addresses[unit]); + addresses[unit] = NULL; + if (extra_options != NULL) + free_wordlist(extra_options); + extra_options = opts; + + /* + * Count the number of IP addresses given. + */ + n = wordlist_count(addrs) + wordlist_count(noauth_addrs); + if (n == 0) + return; + ip = (struct permitted_ip *) malloc((n + 1) * sizeof(struct permitted_ip)); + if (ip == 0) + return; + + /* temporarily append the noauth_addrs list to addrs */ + for (plink = &addrs; *plink != NULL; plink = &(*plink)->next) + ; + *plink = noauth_addrs; + + n = 0; + for (ap = addrs; ap != NULL; ap = ap->next) { + /* "-" means no addresses authorized, "*" means any address allowed */ + ptr_word = ap->word; + if (strcmp(ptr_word, "-") == 0) + break; + if (strcmp(ptr_word, "*") == 0) { + ip[n].permit = 1; + ip[n].base = ip[n].mask = 0; + ++n; + break; + } + + ip[n].permit = 1; + if (*ptr_word == '!') { + ip[n].permit = 0; + ++ptr_word; + } + + mask = ~ (u32_t) 0; + offset = 0; + ptr_mask = strchr (ptr_word, '/'); + if (ptr_mask != NULL) { + int bit_count; + char *endp; + + bit_count = (int) strtol (ptr_mask+1, &endp, 10); + if (bit_count <= 0 || bit_count > 32) { + ppp_warn("invalid address length %v in auth. address list", + ptr_mask+1); + continue; + } + bit_count = 32 - bit_count; /* # bits in host part */ + if (*endp == '+') { + offset = ifunit + 1; + ++endp; + } + if (*endp != 0) { + ppp_warn("invalid address length syntax: %v", ptr_mask+1); + continue; + } + *ptr_mask = '\0'; + mask <<= bit_count; + } + + hp = gethostbyname(ptr_word); + if (hp != NULL && hp->h_addrtype == AF_INET) { + a = *(u32_t *)hp->h_addr; + } else { + np = getnetbyname (ptr_word); + if (np != NULL && np->n_addrtype == AF_INET) { + a = lwip_htonl ((u32_t)np->n_net); + if (ptr_mask == NULL) { + /* calculate appropriate mask for net */ + ah = lwip_ntohl(a); + if (IN_CLASSA(ah)) + mask = IN_CLASSA_NET; + else if (IN_CLASSB(ah)) + mask = IN_CLASSB_NET; + else if (IN_CLASSC(ah)) + mask = IN_CLASSC_NET; + } + } else { + a = inet_addr (ptr_word); + } + } + + if (ptr_mask != NULL) + *ptr_mask = '/'; + + if (a == (u32_t)-1L) { + ppp_warn("unknown host %s in auth. address list", ap->word); + continue; + } + if (offset != 0) { + if (offset >= ~mask) { + ppp_warn("interface unit %d too large for subnet %v", + ifunit, ptr_word); + continue; + } + a = lwip_htonl((lwip_ntohl(a) & mask) + offset); + mask = ~(u32_t)0; + } + ip[n].mask = lwip_htonl(mask); + ip[n].base = a & ip[n].mask; + ++n; + if (~mask == 0 && suggested_ip == 0) + suggested_ip = a; + } + *plink = NULL; + + ip[n].permit = 0; /* make the last entry forbid all addresses */ + ip[n].base = 0; /* to terminate the list */ + ip[n].mask = 0; + + addresses[unit] = ip; + + /* + * If the address given for the peer isn't authorized, or if + * the user hasn't given one, AND there is an authorized address + * which is a single host, then use that if we find one. + */ + if (suggested_ip != 0 + && (wo->hisaddr == 0 || !auth_ip_addr(unit, wo->hisaddr))) { + wo->hisaddr = suggested_ip; + /* + * Do we insist on this address? No, if there are other + * addresses authorized than the suggested one. + */ + if (n > 1) + wo->accept_remote = 1; + } +} + +/* + * auth_ip_addr - check whether the peer is authorized to use + * a given IP address. Returns 1 if authorized, 0 otherwise. + */ +int +auth_ip_addr(unit, addr) + int unit; + u32_t addr; +{ + int ok; + + /* don't allow loopback or multicast address */ + if (bad_ip_adrs(addr)) + return 0; + + if (allowed_address_hook) { + ok = allowed_address_hook(addr); + if (ok >= 0) return ok; + } + + if (addresses[unit] != NULL) { + ok = ip_addr_check(addr, addresses[unit]); + if (ok >= 0) + return ok; + } + + if (auth_required) + return 0; /* no addresses authorized */ + return allow_any_ip || privileged || !have_route_to(addr); +} + +static int +ip_addr_check(addr, addrs) + u32_t addr; + struct permitted_ip *addrs; +{ + for (; ; ++addrs) + if ((addr & addrs->mask) == addrs->base) + return addrs->permit; +} + +/* + * bad_ip_adrs - return 1 if the IP address is one we don't want + * to use, such as an address in the loopback net or a multicast address. + * addr is in network byte order. + */ +int +bad_ip_adrs(addr) + u32_t addr; +{ + addr = lwip_ntohl(addr); + return (addr >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET + || IN_MULTICAST(addr) || IN_BADCLASS(addr); +} + +/* + * some_ip_ok - check a wordlist to see if it authorizes any + * IP address(es). + */ +static int +some_ip_ok(addrs) + struct wordlist *addrs; +{ + for (; addrs != 0; addrs = addrs->next) { + if (addrs->word[0] == '-') + break; + if (addrs->word[0] != '!') + return 1; /* some IP address is allowed */ + } + return 0; +} + +/* + * auth_number - check whether the remote number is allowed to connect. + * Returns 1 if authorized, 0 otherwise. + */ +int +auth_number() +{ + struct wordlist *wp = permitted_numbers; + int l; + + /* Allow all if no authorization list. */ + if (!wp) + return 1; + + /* Allow if we have a match in the authorization list. */ + while (wp) { + /* trailing '*' wildcard */ + l = strlen(wp->word); + if ((wp->word)[l - 1] == '*') + l--; + if (!strncasecmp(wp->word, remote_number, l)) + return 1; + wp = wp->next; + } + + return 0; +} + +/* + * check_access - complain if a secret file has too-liberal permissions. + */ +static void +check_access(f, filename) + FILE *f; + char *filename; +{ + struct stat sbuf; + + if (fstat(fileno(f), &sbuf) < 0) { + ppp_warn("cannot stat secret file %s: %m", filename); + } else if ((sbuf.st_mode & (S_IRWXG | S_IRWXO)) != 0) { + ppp_warn("Warning - secret file %s has world and/or group access", + filename); + } +} + +/* + * scan_authfile - Scan an authorization file for a secret suitable + * for authenticating `client' on `server'. The return value is -1 + * if no secret is found, otherwise >= 0. The return value has + * NONWILD_CLIENT set if the secret didn't have "*" for the client, and + * NONWILD_SERVER set if the secret didn't have "*" for the server. + * Any following words on the line up to a "--" (i.e. address authorization + * info) are placed in a wordlist and returned in *addrs. Any + * following words (extra options) are placed in a wordlist and + * returned in *opts. + * We assume secret is NULL or points to MAXWORDLEN bytes of space. + * Flags are non-zero if we need two colons in the secret in order to + * match. + */ +static int +scan_authfile(f, client, server, secret, addrs, opts, filename, flags) + FILE *f; + char *client; + char *server; + char *secret; + struct wordlist **addrs; + struct wordlist **opts; + char *filename; + int flags; +{ + int newline, xxx; + int got_flag, best_flag; + FILE *sf; + struct wordlist *ap, *addr_list, *alist, **app; + char word[MAXWORDLEN]; + char atfile[MAXWORDLEN]; + char lsecret[MAXWORDLEN]; + char *cp; + + if (addrs != NULL) + *addrs = NULL; + if (opts != NULL) + *opts = NULL; + addr_list = NULL; + if (!getword(f, word, &newline, filename)) + return -1; /* file is empty??? */ + newline = 1; + best_flag = -1; + for (;;) { + /* + * Skip until we find a word at the start of a line. + */ + while (!newline && getword(f, word, &newline, filename)) + ; + if (!newline) + break; /* got to end of file */ + + /* + * Got a client - check if it's a match or a wildcard. + */ + got_flag = 0; + if (client != NULL && strcmp(word, client) != 0 && !ISWILD(word)) { + newline = 0; + continue; + } + if (!ISWILD(word)) + got_flag = NONWILD_CLIENT; + + /* + * Now get a server and check if it matches. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + if (!ISWILD(word)) { + if (server != NULL && strcmp(word, server) != 0) + continue; + got_flag |= NONWILD_SERVER; + } + + /* + * Got some sort of a match - see if it's better than what + * we have already. + */ + if (got_flag <= best_flag) + continue; + + /* + * Get the secret. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + + /* + * SRP-SHA1 authenticator should never be reading secrets from + * a file. (Authenticatee may, though.) + */ + if (flags && ((cp = strchr(word, ':')) == NULL || + strchr(cp + 1, ':') == NULL)) + continue; + + if (secret != NULL) { + /* + * Special syntax: @/pathname means read secret from file. + */ + if (word[0] == '@' && word[1] == '/') { + strlcpy(atfile, word+1, sizeof(atfile)); + if ((sf = fopen(atfile, "r")) == NULL) { + ppp_warn("can't open indirect secret file %s", atfile); + continue; + } + check_access(sf, atfile); + if (!getword(sf, word, &xxx, atfile)) { + ppp_warn("no secret in indirect secret file %s", atfile); + fclose(sf); + continue; + } + fclose(sf); + } + strlcpy(lsecret, word, sizeof(lsecret)); + } + + /* + * Now read address authorization info and make a wordlist. + */ + app = &alist; + for (;;) { + if (!getword(f, word, &newline, filename) || newline) + break; + ap = (struct wordlist *) + malloc(sizeof(struct wordlist) + strlen(word) + 1); + if (ap == NULL) + novm("authorized addresses"); + ap->word = (char *) (ap + 1); + strcpy(ap->word, word); + *app = ap; + app = &ap->next; + } + *app = NULL; + + /* + * This is the best so far; remember it. + */ + best_flag = got_flag; + if (addr_list) + free_wordlist(addr_list); + addr_list = alist; + if (secret != NULL) + strlcpy(secret, lsecret, MAXWORDLEN); + + if (!newline) + break; + } + + /* scan for a -- word indicating the start of options */ + for (app = &addr_list; (ap = *app) != NULL; app = &ap->next) + if (strcmp(ap->word, "--") == 0) + break; + /* ap = start of options */ + if (ap != NULL) { + ap = ap->next; /* first option */ + free(*app); /* free the "--" word */ + *app = NULL; /* terminate addr list */ + } + if (opts != NULL) + *opts = ap; + else if (ap != NULL) + free_wordlist(ap); + if (addrs != NULL) + *addrs = addr_list; + else if (addr_list != NULL) + free_wordlist(addr_list); + + return best_flag; +} + +/* + * wordlist_count - return the number of items in a wordlist + */ +static int +wordlist_count(wp) + struct wordlist *wp; +{ + int n; + + for (n = 0; wp != NULL; wp = wp->next) + ++n; + return n; +} + +/* + * free_wordlist - release memory allocated for a wordlist. + */ +static void +free_wordlist(wp) + struct wordlist *wp; +{ + struct wordlist *next; + + while (wp != NULL) { + next = wp->next; + free(wp); + wp = next; + } +} +#endif /* UNUSED */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c new file mode 100644 index 000000000..f8519ebec --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c @@ -0,0 +1,1740 @@ +/* + * ccp.c - PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ccp.h" + +#if MPPE_SUPPORT +#include "netif/ppp/lcp.h" /* lcp_close(), lcp_fsm */ +#include "netif/ppp/mppe.h" /* mppe_init() */ +#endif /* MPPE_SUPPORT */ + +/* + * Unfortunately there is a bug in zlib which means that using a + * size of 8 (window size = 256) for Deflate compression will cause + * buffer overruns and kernel crashes in the deflate module. + * Until this is fixed we only accept sizes in the range 9 .. 15. + * Thanks to James Carlson for pointing this out. + */ +#define DEFLATE_MIN_WORKS 9 + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setbsdcomp (char **); +static int setdeflate (char **); +static char bsd_value[8]; +static char deflate_value[8]; + +/* + * Option variables. + */ +#if MPPE_SUPPORT +bool refuse_mppe_stateful = 1; /* Allow stateful mode? */ +#endif /* MPPE_SUPPORT */ + +static option_t ccp_option_list[] = { + { "noccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation" }, + { "-ccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation", OPT_ALIAS }, + + { "bsdcomp", o_special, (void *)setbsdcomp, + "Request BSD-Compress packet compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, bsd_value }, + { "nobsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + { "-bsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + + { "deflate", o_special, (void *)setdeflate, + "request Deflate compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, deflate_value }, + { "nodeflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + { "-deflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + + { "nodeflatedraft", o_bool, &ccp_wantoptions[0].deflate_draft, + "don't use draft deflate #", OPT_A2COPY, + &ccp_allowoptions[0].deflate_draft }, + + { "predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "request Predictor-1", OPT_PRIO | 1 }, + { "nopredictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + { "-predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + +#if MPPE_SUPPORT + /* MPPE options are symmetrical ... we only set wantoptions here */ + { "require-mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "+mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_ALIAS | OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "nomppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_PRIO }, + { "-mppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_ALIAS | OPT_PRIO }, + + /* We use ccp_allowoptions[0].mppe as a junk var ... it is reset later */ + { "require-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "+mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "nomppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, &ccp_wantoptions[0].mppe }, + { "-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + + { "require-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "+mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "nomppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, &ccp_wantoptions[0].mppe }, + { "-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + + /* strange one; we always request stateless, but will we allow stateful? */ + { "mppe-stateful", o_bool, &refuse_mppe_stateful, + "allow MPPE stateful mode", OPT_PRIO }, + { "nomppe-stateful", o_bool, &refuse_mppe_stateful, + "disallow MPPE stateful mode", OPT_PRIO | 1 }, +#endif /* MPPE_SUPPORT */ + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ccp_init(ppp_pcb *pcb); +static void ccp_open(ppp_pcb *pcb); +static void ccp_close(ppp_pcb *pcb, const char *reason); +static void ccp_lowerup(ppp_pcb *pcb); +static void ccp_lowerdown(ppp_pcb *pcb); +static void ccp_input(ppp_pcb *pcb, u_char *pkt, int len); +static void ccp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ + +const struct protent ccp_protent = { + PPP_CCP, + ccp_init, + ccp_input, + ccp_protrej, + ccp_lowerup, + ccp_lowerdown, + ccp_open, + ccp_close, +#if PRINTPKT_SUPPORT + ccp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + ccp_datainput, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CCP", + "Compressed", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ccp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Callbacks for fsm code. + */ +static void ccp_resetci (fsm *); +static int ccp_cilen (fsm *); +static void ccp_addci (fsm *, u_char *, int *); +static int ccp_ackci (fsm *, u_char *, int); +static int ccp_nakci (fsm *, u_char *, int, int); +static int ccp_rejci (fsm *, u_char *, int); +static int ccp_reqci (fsm *, u_char *, int *, int); +static void ccp_up (fsm *); +static void ccp_down (fsm *); +static int ccp_extcode (fsm *, int, int, u_char *, int); +static void ccp_rack_timeout (void *); +static const char *method_name (ccp_options *, ccp_options *); + +static const fsm_callbacks ccp_callbacks = { + ccp_resetci, + ccp_cilen, + ccp_addci, + ccp_ackci, + ccp_nakci, + ccp_rejci, + ccp_reqci, + ccp_up, + ccp_down, + NULL, + NULL, + NULL, + NULL, + ccp_extcode, + "CCP" +}; + +/* + * Do we want / did we get any compression? + */ +static int ccp_anycompress(ccp_options *opt) { + return (0 +#if DEFLATE_SUPPORT + || (opt)->deflate +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + || (opt)->bsd_compress +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + || (opt)->predictor_1 || (opt)->predictor_2 +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + || (opt)->mppe +#endif /* MPPE_SUPPORT */ + ); +} + +/* + * Local state (mainly for handling reset-reqs and reset-acks). + */ +#define RACK_PENDING 1 /* waiting for reset-ack */ +#define RREQ_REPEAT 2 /* send another reset-req if no reset-ack */ + +#define RACKTIMEOUT 1 /* second */ + +#if PPP_OPTIONS +/* + * Option parsing + */ +static int +setbsdcomp(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for bsdcomp option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < BSD_MIN_BITS || rbits > BSD_MAX_BITS)) + || (abits != 0 && (abits < BSD_MIN_BITS || abits > BSD_MAX_BITS))) { + option_error("bsdcomp option values must be 0 or %d .. %d", + BSD_MIN_BITS, BSD_MAX_BITS); + return 0; + } + if (rbits > 0) { + ccp_wantoptions[0].bsd_compress = 1; + ccp_wantoptions[0].bsd_bits = rbits; + } else + ccp_wantoptions[0].bsd_compress = 0; + if (abits > 0) { + ccp_allowoptions[0].bsd_compress = 1; + ccp_allowoptions[0].bsd_bits = abits; + } else + ccp_allowoptions[0].bsd_compress = 0; + ppp_slprintf(bsd_value, sizeof(bsd_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} + +static int +setdeflate(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for deflate option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < DEFLATE_MIN_SIZE || rbits > DEFLATE_MAX_SIZE)) + || (abits != 0 && (abits < DEFLATE_MIN_SIZE + || abits > DEFLATE_MAX_SIZE))) { + option_error("deflate option values must be 0 or %d .. %d", + DEFLATE_MIN_SIZE, DEFLATE_MAX_SIZE); + return 0; + } + if (rbits == DEFLATE_MIN_SIZE || abits == DEFLATE_MIN_SIZE) { + if (rbits == DEFLATE_MIN_SIZE) + rbits = DEFLATE_MIN_WORKS; + if (abits == DEFLATE_MIN_SIZE) + abits = DEFLATE_MIN_WORKS; + warn("deflate option value of %d changed to %d to avoid zlib bug", + DEFLATE_MIN_SIZE, DEFLATE_MIN_WORKS); + } + if (rbits > 0) { + ccp_wantoptions[0].deflate = 1; + ccp_wantoptions[0].deflate_size = rbits; + } else + ccp_wantoptions[0].deflate = 0; + if (abits > 0) { + ccp_allowoptions[0].deflate = 1; + ccp_allowoptions[0].deflate_size = abits; + } else + ccp_allowoptions[0].deflate = 0; + ppp_slprintf(deflate_value, sizeof(deflate_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} +#endif /* PPP_OPTIONS */ + +/* + * ccp_init - initialize CCP. + */ +static void ccp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + f->pcb = pcb; + f->protocol = PPP_CCP; + f->callbacks = &ccp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(go, 0, sizeof(*go)); + memset(ao, 0, sizeof(*ao)); + memset(ho, 0, sizeof(*ho)); +#endif /* 0 */ + +#if DEFLATE_SUPPORT + wo->deflate = 1; + wo->deflate_size = DEFLATE_MAX_SIZE; + wo->deflate_correct = 1; + wo->deflate_draft = 1; + ao->deflate = 1; + ao->deflate_size = DEFLATE_MAX_SIZE; + ao->deflate_correct = 1; + ao->deflate_draft = 1; +#endif /* DEFLATE_SUPPORT */ + +#if BSDCOMPRESS_SUPPORT + wo->bsd_compress = 1; + wo->bsd_bits = BSD_MAX_BITS; + ao->bsd_compress = 1; + ao->bsd_bits = BSD_MAX_BITS; +#endif /* BSDCOMPRESS_SUPPORT */ + +#if PREDICTOR_SUPPORT + ao->predictor_1 = 1; +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_open - CCP is allowed to come up. + */ +static void ccp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + + if (f->state != PPP_FSM_OPENED) + ccp_set(pcb, 1, 0, 0, 0); + + /* + * Find out which compressors the kernel supports before + * deciding whether to open in silent mode. + */ + ccp_resetci(f); + if (!ccp_anycompress(go)) + f->flags |= OPT_SILENT; + + fsm_open(f); +} + +/* + * ccp_close - Terminate CCP. + */ +static void ccp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ccp_fsm; + ccp_set(pcb, 0, 0, 0, 0); + fsm_close(f, reason); +} + +/* + * ccp_lowerup - we may now transmit CCP packets. + */ +static void ccp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerup(f); +} + +/* + * ccp_lowerdown - we may not transmit CCP packets. + */ +static void ccp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerdown(f); +} + +/* + * ccp_input - process a received CCP packet. + */ +static void ccp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + int oldstate; + + /* + * Check for a terminate-request so we can print a message. + */ + oldstate = f->state; + fsm_input(f, p, len); + if (oldstate == PPP_FSM_OPENED && p[0] == TERMREQ && f->state != PPP_FSM_OPENED) { + ppp_notice("Compression disabled by peer."); +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE disabled, closing LCP"); + lcp_close(pcb, "MPPE disabled by peer"); + } +#endif /* MPPE_SUPPORT */ + } + + /* + * If we get a terminate-ack and we're not asking for compression, + * close CCP. + */ + if (oldstate == PPP_FSM_REQSENT && p[0] == TERMACK + && !ccp_anycompress(go)) + ccp_close(pcb, "No compression negotiated"); +} + +/* + * Handle a CCP-specific code. + */ +static int ccp_extcode(fsm *f, int code, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); + + switch (code) { + case CCP_RESETREQ: + if (f->state != PPP_FSM_OPENED) + break; + ccp_reset_comp(pcb); + /* send a reset-ack, which the transmitter will see and + reset its compression state. */ + fsm_sdata(f, CCP_RESETACK, id, NULL, 0); + break; + + case CCP_RESETACK: + if ((pcb->ccp_localstate & RACK_PENDING) && id == f->reqid) { + pcb->ccp_localstate &= ~(RACK_PENDING | RREQ_REPEAT); + UNTIMEOUT(ccp_rack_timeout, f); + ccp_reset_decomp(pcb); + } + break; + + default: + return 0; + } + + return 1; +} + +/* + * ccp_protrej - peer doesn't talk CCP. + */ +static void ccp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + ccp_set(pcb, 0, 0, 0, 0); + fsm_lowerdown(f); + +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + +} + +/* + * ccp_resetci - initialize at start of negotiation. + */ +static void ccp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *wo = &pcb->ccp_wantoptions; +#if MPPE_SUPPORT + ccp_options *ao = &pcb->ccp_allowoptions; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char opt_buf[CCP_MAX_OPTION_LENGTH]; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + +#if MPPE_SUPPORT + if (pcb->settings.require_mppe) { + wo->mppe = ao->mppe = + (pcb->settings.refuse_mppe_40 ? 0 : MPPE_OPT_40) + | (pcb->settings.refuse_mppe_128 ? 0 : MPPE_OPT_128); + } +#endif /* MPPE_SUPPORT */ + + *go = *wo; + pcb->ccp_all_rejected = 0; + +#if MPPE_SUPPORT + if (go->mppe) { + int auth_mschap_bits = pcb->auth_done; + int numbits; + + /* + * Start with a basic sanity check: mschap[v2] auth must be in + * exactly one direction. RFC 3079 says that the keys are + * 'derived from the credentials of the peer that initiated the call', + * however the PPP protocol doesn't have such a concept, and pppd + * cannot get this info externally. Instead we do the best we can. + * NB: If MPPE is required, all other compression opts are invalid. + * So, we return right away if we can't do it. + */ + + /* Leave only the mschap auth bits set */ + auth_mschap_bits &= (CHAP_MS_WITHPEER | CHAP_MS_PEER | + CHAP_MS2_WITHPEER | CHAP_MS2_PEER); + /* Count the mschap auths */ + auth_mschap_bits >>= CHAP_MS_SHIFT; + numbits = 0; + do { + numbits += auth_mschap_bits & 1; + auth_mschap_bits >>= 1; + } while (auth_mschap_bits); + if (numbits > 1) { + ppp_error("MPPE required, but auth done in both directions."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + if (!numbits) { + ppp_error("MPPE required, but MS-CHAP[v2] auth not performed."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* A plugin (eg radius) may not have obtained key material. */ + if (!pcb->mppe_keys_set) { + ppp_error("MPPE required, but keys are not available. " + "Possible plugin problem?"); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* LM auth not supported for MPPE */ + if (pcb->auth_done & (CHAP_MS_WITHPEER | CHAP_MS_PEER)) { + /* This might be noise */ + if (go->mppe & MPPE_OPT_40) { + ppp_notice("Disabling 40-bit MPPE; MS-CHAP LM not supported"); + go->mppe &= ~MPPE_OPT_40; + wo->mppe &= ~MPPE_OPT_40; + } + } + + /* Last check: can we actually negotiate something? */ + if (!(go->mppe & (MPPE_OPT_40 | MPPE_OPT_128))) { + /* Could be misconfig, could be 40-bit disabled above. */ + ppp_error("MPPE required, but both 40-bit and 128-bit disabled."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* sync options */ + ao->mppe = go->mppe; + /* MPPE is not compatible with other compression types */ +#if BSDCOMPRESS_SUPPORT + ao->bsd_compress = go->bsd_compress = 0; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + ao->predictor_1 = go->predictor_1 = 0; + ao->predictor_2 = go->predictor_2 = 0; +#endif /* PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT + ao->deflate = go->deflate = 0; +#endif /* DEFLATE_SUPPORT */ + } +#endif /* MPPE_SUPPORT */ + + /* + * Check whether the kernel knows about the various + * compression methods we might request. + */ +#if BSDCOMPRESS_SUPPORT + /* FIXME: we don't need to test if BSD compress is available + * if BSDCOMPRESS_SUPPORT is set, it is. + */ + if (go->bsd_compress) { + opt_buf[0] = CI_BSD_COMPRESS; + opt_buf[1] = CILEN_BSD_COMPRESS; + for (;;) { + if (go->bsd_bits < BSD_MIN_BITS) { + go->bsd_compress = 0; + break; + } + opt_buf[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + res = ccp_test(pcb, opt_buf, CILEN_BSD_COMPRESS, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->bsd_compress = 0; + break; + } + go->bsd_bits--; + } + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + /* FIXME: we don't need to test if deflate is available + * if DEFLATE_SUPPORT is set, it is. + */ + if (go->deflate) { + if (go->deflate_correct) { + opt_buf[0] = CI_DEFLATE; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_correct = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_correct = 0; + break; + } + go->deflate_size--; + } + } + if (go->deflate_draft) { + opt_buf[0] = CI_DEFLATE_DRAFT; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_draft = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_draft = 0; + break; + } + go->deflate_size--; + } + } + if (!go->deflate_correct && !go->deflate_draft) + go->deflate = 0; + } +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + /* FIXME: we don't need to test if predictor is available, + * if PREDICTOR_SUPPORT is set, it is. + */ + if (go->predictor_1) { + opt_buf[0] = CI_PREDICTOR_1; + opt_buf[1] = CILEN_PREDICTOR_1; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_1, 0) <= 0) + go->predictor_1 = 0; + } + if (go->predictor_2) { + opt_buf[0] = CI_PREDICTOR_2; + opt_buf[1] = CILEN_PREDICTOR_2; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_2, 0) <= 0) + go->predictor_2 = 0; + } +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_cilen - Return total length of our configuration info. + */ +static int ccp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + + return 0 +#if BSDCOMPRESS_SUPPORT + + (go->bsd_compress? CILEN_BSD_COMPRESS: 0) +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + + (go->deflate && go->deflate_correct? CILEN_DEFLATE: 0) + + (go->deflate && go->deflate_draft? CILEN_DEFLATE: 0) +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + + (go->predictor_1? CILEN_PREDICTOR_1: 0) + + (go->predictor_2? CILEN_PREDICTOR_2: 0) +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + + (go->mppe? CILEN_MPPE: 0) +#endif /* MPPE_SUPPORT */ + ; +} + +/* + * ccp_addci - put our requests in a packet. + */ +static void ccp_addci(fsm *f, u_char *p, int *lenp) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + u_char *p0 = p; + + /* + * Add the compression types that we can receive, in decreasing + * preference order. + */ +#if MPPE_SUPPORT + if (go->mppe) { + p[0] = CI_MPPE; + p[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &p[2]); + mppe_init(pcb, &pcb->mppe_decomp, go->mppe); + p += CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (go->deflate_correct) { + p[0] = CI_DEFLATE; + p[1] = CILEN_DEFLATE; + p[2] = DEFLATE_MAKE_OPT(go->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + if (go->deflate_draft) { + p[0] = CI_DEFLATE_DRAFT; + p[1] = CILEN_DEFLATE; + p[2] = p[2 - CILEN_DEFLATE]; + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + p[0] = CI_BSD_COMPRESS; + p[1] = CILEN_BSD_COMPRESS; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + p += CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + /* XXX Should Predictor 2 be preferable to Predictor 1? */ + if (go->predictor_1) { + p[0] = CI_PREDICTOR_1; + p[1] = CILEN_PREDICTOR_1; + p += CILEN_PREDICTOR_1; + } + if (go->predictor_2) { + p[0] = CI_PREDICTOR_2; + p[1] = CILEN_PREDICTOR_2; + p += CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + go->method = (p > p0)? p0[0]: 0; + + *lenp = p - p0; +} + +/* + * ccp_ackci - process a received configure-ack, and return + * 1 iff the packet was OK. + */ +static int ccp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; +#if BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char *p0 = p; +#endif /* BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + if (go->mppe) { + u_char opt_buf[CILEN_MPPE]; + + opt_buf[0] = CI_MPPE; + opt_buf[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &opt_buf[2]); + if (len < CILEN_MPPE || memcmp(opt_buf, p, CILEN_MPPE)) + return 0; + p += CILEN_MPPE; + len -= CILEN_MPPE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (len < CILEN_DEFLATE + || p[0] != (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + if (go->deflate_correct && go->deflate_draft) { + if (len < CILEN_DEFLATE + || p[0] != CI_DEFLATE_DRAFT + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + if (len < CILEN_BSD_COMPRESS + || p[0] != CI_BSD_COMPRESS || p[1] != CILEN_BSD_COMPRESS + || p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1) { + if (len < CILEN_PREDICTOR_1 + || p[0] != CI_PREDICTOR_1 || p[1] != CILEN_PREDICTOR_1) + return 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } + if (go->predictor_2) { + if (len < CILEN_PREDICTOR_2 + || p[0] != CI_PREDICTOR_2 || p[1] != CILEN_PREDICTOR_2) + return 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + return 1; +} + +/* + * ccp_nakci - process received configure-nak. + * Returns 1 iff the nak was OK. + */ +static int ccp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options no; /* options we've seen already */ + ccp_options try_; /* options to ask for next time */ + LWIP_UNUSED_ARG(treat_as_reject); +#if !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); +#endif /* !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + memset(&no, 0, sizeof(no)); + try_ = *go; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + no.mppe = 1; + /* + * Peer wants us to use a different strength or other setting. + * Fail if we aren't willing to use his suggestion. + */ + MPPE_CI_TO_OPTS(&p[2], try_.mppe); + if ((try_.mppe & MPPE_OPT_STATEFUL) && pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + try_.mppe = 0; + } else if (((go->mppe | MPPE_OPT_STATEFUL) & try_.mppe) != try_.mppe) { + /* Peer must have set options we didn't request (suggest) */ + try_.mppe = 0; + } + + if (!try_.mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate && len >= CILEN_DEFLATE + && p[0] == (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + && p[1] == CILEN_DEFLATE) { + no.deflate = 1; + /* + * Peer wants us to use a different code size or something. + * Stop asking for Deflate if we don't understand his suggestion. + */ + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(p[2]) < DEFLATE_MIN_WORKS + || p[3] != DEFLATE_CHK_SEQUENCE) + try_.deflate = 0; + else if (DEFLATE_SIZE(p[2]) < go->deflate_size) + try_.deflate_size = DEFLATE_SIZE(p[2]); + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + if (go->deflate_correct && go->deflate_draft + && len >= CILEN_DEFLATE && p[0] == CI_DEFLATE_DRAFT + && p[1] == CILEN_DEFLATE) { + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + no.bsd_compress = 1; + /* + * Peer wants us to use a different number of bits + * or a different version. + */ + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION) + try_.bsd_compress = 0; + else if (BSD_NBITS(p[2]) < go->bsd_bits) + try_.bsd_bits = BSD_NBITS(p[2]); + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ + + /* + * Predictor-1 and 2 have no options, so they can't be Naked. + * + * There may be remaining options but we ignore them. + */ + + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; +} + +/* + * ccp_rejci - reject some of our suggested compression methods. + */ +static int ccp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Cope with empty configure-rejects by ceasing to send + * configure-requests. + */ + if (len == 0 && pcb->ccp_all_rejected) + return -1; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + ppp_error("MPPE required but peer refused"); + lcp_close(pcb, "MPPE required but peer refused"); + p += CILEN_MPPE; + len -= CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate_correct && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_correct = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (go->deflate_draft && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE_DRAFT && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_draft = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (!try_.deflate_correct && !try_.deflate_draft) + try_.deflate = 0; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + if (p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + try_.bsd_compress = 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1 && len >= CILEN_PREDICTOR_1 + && p[0] == CI_PREDICTOR_1 && p[1] == CILEN_PREDICTOR_1) { + try_.predictor_1 = 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + } + if (go->predictor_2 && len >= CILEN_PREDICTOR_2 + && p[0] == CI_PREDICTOR_2 && p[1] == CILEN_PREDICTOR_2) { + try_.predictor_2 = 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; +} + +/* + * ccp_reqci - processed a received configure-request. + * Returns CONFACK, CONFNAK or CONFREJ and the packet modified + * appropriately. + */ +static int ccp_reqci(fsm *f, u_char *p, int *lenp, int dont_nak) { + ppp_pcb *pcb = f->pcb; + ccp_options *ho = &pcb->ccp_hisoptions; + ccp_options *ao = &pcb->ccp_allowoptions; + int ret, newret; +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; + int nb; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + u_char *p0, *retp; + int len, clen, type; +#if MPPE_SUPPORT + u8_t rej_for_ci_mppe = 1; /* Are we rejecting based on a bad/missing */ + /* CI_MPPE, or due to other options? */ +#endif /* MPPE_SUPPORT */ + + ret = CONFACK; + retp = p0 = p; + len = *lenp; + + memset(ho, 0, sizeof(ccp_options)); + ho->method = (len > 0)? p[0]: 0; + + while (len > 0) { + newret = CONFACK; + if (len < 2 || p[1] < 2 || p[1] > len) { + /* length is bad */ + clen = len; + newret = CONFREJ; + + } else { + type = p[0]; + clen = p[1]; + + switch (type) { +#if MPPE_SUPPORT + case CI_MPPE: + if (!ao->mppe || clen != CILEN_MPPE) { + newret = CONFREJ; + break; + } + MPPE_CI_TO_OPTS(&p[2], ho->mppe); + + /* Nak if anything unsupported or unknown are set. */ + if (ho->mppe & MPPE_OPT_UNSUPPORTED) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNSUPPORTED; + } + if (ho->mppe & MPPE_OPT_UNKNOWN) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNKNOWN; + } + + /* Check state opt */ + if (ho->mppe & MPPE_OPT_STATEFUL) { + /* + * We can Nak and request stateless, but it's a + * lot easier to just assume the peer will request + * it if he can do it; stateful mode is bad over + * the Internet -- which is where we expect MPPE. + */ + if (pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + newret = CONFREJ; + break; + } + } + + /* Find out which of {S,L} are set. */ + if ((ho->mppe & MPPE_OPT_128) + && (ho->mppe & MPPE_OPT_40)) { + /* Both are set, negotiate the strongest. */ + newret = CONFNAK; + if (ao->mppe & MPPE_OPT_128) + ho->mppe &= ~MPPE_OPT_40; + else if (ao->mppe & MPPE_OPT_40) + ho->mppe &= ~MPPE_OPT_128; + else { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_128) { + if (!(ao->mppe & MPPE_OPT_128)) { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_40) { + if (!(ao->mppe & MPPE_OPT_40)) { + newret = CONFREJ; + break; + } + } else { + /* Neither are set. */ + /* We cannot accept this. */ + newret = CONFNAK; + /* Give the peer our idea of what can be used, + so it can choose and confirm */ + ho->mppe = ao->mppe; + } + + /* rebuild the opts */ + MPPE_OPTS_TO_CI(ho->mppe, &p[2]); + if (newret == CONFACK) { + int mtu; + + mppe_init(pcb, &pcb->mppe_comp, ho->mppe); + /* + * We need to decrease the interface MTU by MPPE_PAD + * because MPPE frames **grow**. The kernel [must] + * allocate MPPE_PAD extra bytes in xmit buffers. + */ + mtu = netif_get_mtu(pcb); + if (mtu) + netif_set_mtu(pcb, mtu - MPPE_PAD); + else + newret = CONFREJ; + } + + /* + * We have accepted MPPE or are willing to negotiate + * MPPE parameters. A CONFREJ is due to subsequent + * (non-MPPE) processing. + */ + rej_for_ci_mppe = 0; + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (!ao->deflate || clen != CILEN_DEFLATE + || (!ao->deflate_correct && type == CI_DEFLATE) + || (!ao->deflate_draft && type == CI_DEFLATE_DRAFT)) { + newret = CONFREJ; + break; + } + + ho->deflate = 1; + ho->deflate_size = nb = DEFLATE_SIZE(p[2]); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || p[3] != DEFLATE_CHK_SEQUENCE + || nb > ao->deflate_size || nb < DEFLATE_MIN_WORKS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = DEFLATE_MAKE_OPT(ao->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do Deflate with the window + * size they want. If the window is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_DEFLATE, 1); + if (res > 0) + break; /* it's OK now */ + if (res < 0 || nb == DEFLATE_MIN_WORKS || dont_nak) { + newret = CONFREJ; + p[2] = DEFLATE_MAKE_OPT(ho->deflate_size); + break; + } + newret = CONFNAK; + --nb; + p[2] = DEFLATE_MAKE_OPT(nb); + } + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (!ao->bsd_compress || clen != CILEN_BSD_COMPRESS) { + newret = CONFREJ; + break; + } + + ho->bsd_compress = 1; + ho->bsd_bits = nb = BSD_NBITS(p[2]); + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION + || nb > ao->bsd_bits || nb < BSD_MIN_BITS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, ao->bsd_bits); + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do BSD-Compress with the code + * size they want. If the code size is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_BSD_COMPRESS, 1); + if (res > 0) + break; + if (res < 0 || nb == BSD_MIN_BITS || dont_nak) { + newret = CONFREJ; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, + ho->bsd_bits); + break; + } + newret = CONFNAK; + --nb; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, nb); + } + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (!ao->predictor_1 || clen != CILEN_PREDICTOR_1) { + newret = CONFREJ; + break; + } + + ho->predictor_1 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_1, 1) <= 0) { + newret = CONFREJ; + } + break; + + case CI_PREDICTOR_2: + if (!ao->predictor_2 || clen != CILEN_PREDICTOR_2) { + newret = CONFREJ; + break; + } + + ho->predictor_2 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_2, 1) <= 0) { + newret = CONFREJ; + } + break; +#endif /* PREDICTOR_SUPPORT */ + + default: + newret = CONFREJ; + } + } + + if (newret == CONFNAK && dont_nak) + newret = CONFREJ; + if (!(newret == CONFACK || (newret == CONFNAK && ret == CONFREJ))) { + /* we're returning this option */ + if (newret == CONFREJ && ret == CONFNAK) + retp = p0; + ret = newret; + if (p != retp) + MEMCPY(retp, p, clen); + retp += clen; + } + + p += clen; + len -= clen; + } + + if (ret != CONFACK) { + if (ret == CONFREJ && *lenp == retp - p0) + pcb->ccp_all_rejected = 1; + else + *lenp = retp - p0; + } +#if MPPE_SUPPORT + if (ret == CONFREJ && ao->mppe && rej_for_ci_mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + return ret; +} + +/* + * Make a string name for a compression method (or 2). + */ +static const char *method_name(ccp_options *opt, ccp_options *opt2) { + static char result[64]; +#if !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(opt2); +#endif /* !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + if (!ccp_anycompress(opt)) + return "(none)"; + switch (opt->method) { +#if MPPE_SUPPORT + case CI_MPPE: + { + char *p = result; + char *q = result + sizeof(result); /* 1 past result */ + + ppp_slprintf(p, q - p, "MPPE "); + p += 5; + if (opt->mppe & MPPE_OPT_128) { + ppp_slprintf(p, q - p, "128-bit "); + p += 8; + } + if (opt->mppe & MPPE_OPT_40) { + ppp_slprintf(p, q - p, "40-bit "); + p += 7; + } + if (opt->mppe & MPPE_OPT_STATEFUL) + ppp_slprintf(p, q - p, "stateful"); + else + ppp_slprintf(p, q - p, "stateless"); + + break; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (opt2 != NULL && opt2->deflate_size != opt->deflate_size) + ppp_slprintf(result, sizeof(result), "Deflate%s (%d/%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size, opt2->deflate_size); + else + ppp_slprintf(result, sizeof(result), "Deflate%s (%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size); + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (opt2 != NULL && opt2->bsd_bits != opt->bsd_bits) + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d/%d)", + opt->bsd_bits, opt2->bsd_bits); + else + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d)", + opt->bsd_bits); + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + return "Predictor 1"; + case CI_PREDICTOR_2: + return "Predictor 2"; +#endif /* PREDICTOR_SUPPORT */ + default: + ppp_slprintf(result, sizeof(result), "Method %d", opt->method); + } + return result; +} + +/* + * CCP has come up - inform the kernel driver and log a message. + */ +static void ccp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *ho = &pcb->ccp_hisoptions; + char method1[64]; + + ccp_set(pcb, 1, 1, go->method, ho->method); + if (ccp_anycompress(go)) { + if (ccp_anycompress(ho)) { + if (go->method == ho->method) { + ppp_notice("%s compression enabled", method_name(go, ho)); + } else { + ppp_strlcpy(method1, method_name(go, NULL), sizeof(method1)); + ppp_notice("%s / %s compression enabled", + method1, method_name(ho, NULL)); + } + } else + ppp_notice("%s receive compression enabled", method_name(go, NULL)); + } else if (ccp_anycompress(ho)) + ppp_notice("%s transmit compression enabled", method_name(ho, NULL)); +#if MPPE_SUPPORT + if (go->mppe) { + continue_networks(pcb); /* Bring up IP et al */ + } +#endif /* MPPE_SUPPORT */ +} + +/* + * CCP has gone down - inform the kernel driver. + */ +static void ccp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + if (pcb->ccp_localstate & RACK_PENDING) + UNTIMEOUT(ccp_rack_timeout, f); + pcb->ccp_localstate = 0; + ccp_set(pcb, 1, 0, 0, 0); +#if MPPE_SUPPORT + if (go->mppe) { + go->mppe = 0; + if (pcb->lcp_fsm.state == PPP_FSM_OPENED) { + /* If LCP is not already going down, make sure it does. */ + ppp_error("MPPE disabled"); + lcp_close(pcb, "MPPE disabled"); + } + } +#endif /* MPPE_SUPPORT */ +} + +#if PRINTPKT_SUPPORT +/* + * Print the contents of a CCP packet. + */ +static const char* const ccp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", + NULL, NULL, NULL, NULL, NULL, NULL, + "ResetReq", "ResetAck", +}; + +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + const u_char *p0, *optend; + int code, id, len; + int optlen; + + p0 = p; + if (plen < HEADERLEN) + return 0; + code = p[0]; + id = p[1]; + len = (p[2] << 8) + p[3]; + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ccp_codenames) && ccp_codenames[code-1] != NULL) + printer(arg, " %s", ccp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + p += HEADERLEN; + + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print list of possible compression methods */ + while (len >= 2) { + code = p[0]; + optlen = p[1]; + if (optlen < 2 || optlen > len) + break; + printer(arg, " <"); + len -= optlen; + optend = p + optlen; + switch (code) { +#if MPPE_SUPPORT + case CI_MPPE: + if (optlen >= CILEN_MPPE) { + u_char mppe_opts; + + MPPE_CI_TO_OPTS(&p[2], mppe_opts); + printer(arg, "mppe %s %s %s %s %s %s%s", + (p[2] & MPPE_H_BIT)? "+H": "-H", + (p[5] & MPPE_M_BIT)? "+M": "-M", + (p[5] & MPPE_S_BIT)? "+S": "-S", + (p[5] & MPPE_L_BIT)? "+L": "-L", + (p[5] & MPPE_D_BIT)? "+D": "-D", + (p[5] & MPPE_C_BIT)? "+C": "-C", + (mppe_opts & MPPE_OPT_UNKNOWN)? " +U": ""); + if (mppe_opts & MPPE_OPT_UNKNOWN) + printer(arg, " (%.2x %.2x %.2x %.2x)", + p[2], p[3], p[4], p[5]); + p += CILEN_MPPE; + } + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (optlen >= CILEN_DEFLATE) { + printer(arg, "deflate%s %d", + (code == CI_DEFLATE_DRAFT? "(old#)": ""), + DEFLATE_SIZE(p[2])); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL) + printer(arg, " method %d", DEFLATE_METHOD(p[2])); + if (p[3] != DEFLATE_CHK_SEQUENCE) + printer(arg, " check %d", p[3]); + p += CILEN_DEFLATE; + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (optlen >= CILEN_BSD_COMPRESS) { + printer(arg, "bsd v%d %d", BSD_VERSION(p[2]), + BSD_NBITS(p[2])); + p += CILEN_BSD_COMPRESS; + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (optlen >= CILEN_PREDICTOR_1) { + printer(arg, "predictor 1"); + p += CILEN_PREDICTOR_1; + } + break; + case CI_PREDICTOR_2: + if (optlen >= CILEN_PREDICTOR_2) { + printer(arg, "predictor 2"); + p += CILEN_PREDICTOR_2; + } + break; +#endif /* PREDICTOR_SUPPORT */ + default: + break; + } + while (p < optend) + printer(arg, " %.2x", *p++); + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* dump out the rest of the packet in hex */ + while (--len >= 0) + printer(arg, " %.2x", *p++); + + return p - p0; +} +#endif /* PRINTPKT_SUPPORT */ + +#if PPP_DATAINPUT +/* + * We have received a packet that the decompressor failed to + * decompress. Here we would expect to issue a reset-request, but + * Motorola has a patent on resetting the compressor as a result of + * detecting an error in the decompressed data after decompression. + * (See US patent 5,130,993; international patent publication number + * WO 91/10289; Australian patent 73296/91.) + * + * So we ask the kernel whether the error was detected after + * decompression; if it was, we take CCP down, thus disabling + * compression :-(, otherwise we issue the reset-request. + */ +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len) { + fsm *f; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + LWIP_UNUSED_ARG(pkt); + LWIP_UNUSED_ARG(len); + + f = &pcb->ccp_fsm; + if (f->state == PPP_FSM_OPENED) { + if (ccp_fatal_error(pcb)) { + /* + * Disable compression by taking CCP down. + */ + ppp_error("Lost compression sync: disabling compression"); + ccp_close(pcb, "Lost compression sync"); +#if MPPE_SUPPORT + /* + * If we were doing MPPE, we must also take the link down. + */ + if (go->mppe) { + ppp_error("Too many MPPE errors, closing LCP"); + lcp_close(pcb, "Too many MPPE errors"); + } +#endif /* MPPE_SUPPORT */ + } else { + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; + } + } +} +#endif /* PPP_DATAINPUT */ + +/* + * We have received a packet that the decompressor failed to + * decompress. Issue a reset-request. + */ +void ccp_resetrequest(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; +} + +/* + * Timeout waiting for reset-ack. + */ +static void ccp_rack_timeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + + if (f->state == PPP_FSM_OPENED && (pcb->ccp_localstate & RREQ_REPEAT)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate &= ~RREQ_REPEAT; + } else + pcb->ccp_localstate &= ~RACK_PENDING; +} + +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c new file mode 100644 index 000000000..88f069f03 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c @@ -0,0 +1,126 @@ +/* + * chap-md5.c - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +#define MD5_MIN_CHALLENGE 17 +#define MD5_MAX_CHALLENGE 24 +#define MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#if PPP_SERVER +static void chap_md5_generate_challenge(ppp_pcb *pcb, unsigned char *cp) { + int clen; + LWIP_UNUSED_ARG(pcb); + + clen = MD5_MIN_CHALLENGE + magic_pow(MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE); + *cp++ = clen; + magic_random_bytes(cp, clen); +} + +static int chap_md5_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + lwip_md5_context ctx; + unsigned char idbyte = id; + unsigned char hash[MD5_HASH_SIZE]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(pcb); + + challenge_len = *challenge++; + response_len = *response++; + if (response_len == MD5_HASH_SIZE) { + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, hash); + lwip_md5_free(&ctx); + + /* Test if our hash matches the peer's response */ + if (memcmp(hash, response, MD5_HASH_SIZE) == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + } + ppp_slprintf(message, message_space, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chap_md5_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + lwip_md5_context ctx; + unsigned char idbyte = id; + int challenge_len = *challenge++; + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + LWIP_UNUSED_ARG(pcb); + + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, (const u_char *)secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, &response[1]); + lwip_md5_free(&ctx); + response[0] = MD5_HASH_SIZE; +} + +const struct chap_digest_type md5_digest = { + CHAP_MD5, /* code */ +#if PPP_SERVER + chap_md5_generate_challenge, + chap_md5_verify_response, +#endif /* PPP_SERVER */ + chap_md5_make_response, + NULL, /* check_success */ + NULL, /* handle_failure */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c new file mode 100644 index 000000000..485122d27 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c @@ -0,0 +1,677 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#if MSCHAP_SUPPORT +#include "netif/ppp/chap_ms.h" +#endif +#include "netif/ppp/magic.h" + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +int (*chap_verify_hook)(const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) = NULL; +#endif /* UNUSED */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chap_option_list[] = { + { "chap-restart", o_int, &chap_timeout_time, + "Set timeout for CHAP", OPT_PRIO }, + { "chap-max-challenge", o_int, &pcb->settings.chap_max_transmits, + "Set max #xmits for challenge", OPT_PRIO }, + { "chap-interval", o_int, &pcb->settings.chap_rechallenge_time, + "Set interval for rechallenge", OPT_PRIO }, + { NULL } +}; +#endif /* PPP_OPTIONS */ + + +/* Values for flags in chap_client_state and chap_server_state */ +#define LOWERUP 1 +#define AUTH_STARTED 2 +#define AUTH_DONE 4 +#define AUTH_FAILED 8 +#define TIMEOUT_PENDING 0x10 +#define CHALLENGE_VALID 0x20 + +/* + * Prototypes. + */ +static void chap_init(ppp_pcb *pcb); +static void chap_lowerup(ppp_pcb *pcb); +static void chap_lowerdown(ppp_pcb *pcb); +#if PPP_SERVER +static void chap_timeout(void *arg); +static void chap_generate_challenge(ppp_pcb *pcb); +static void chap_handle_response(ppp_pcb *pcb, int code, + unsigned char *pkt, int len); +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len); +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len); +static void chap_protrej(ppp_pcb *pcb); +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen); +#if PRINTPKT_SUPPORT +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +/* List of digest types that we know about */ +static const struct chap_digest_type* const chap_digests[] = { + &md5_digest, +#if MSCHAP_SUPPORT + &chapms_digest, + &chapms2_digest, +#endif /* MSCHAP_SUPPORT */ + NULL +}; + +/* + * chap_init - reset to initial state. + */ +static void chap_init(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&pcb->chap_client, 0, sizeof(chap_client_state)); +#if PPP_SERVER + memset(&pcb->chap_server, 0, sizeof(chap_server_state)); +#endif /* PPP_SERVER */ +#endif /* 0 */ +} + +/* + * chap_lowerup - we can start doing stuff now. + */ +static void chap_lowerup(ppp_pcb *pcb) { + + pcb->chap_client.flags |= LOWERUP; +#if PPP_SERVER + pcb->chap_server.flags |= LOWERUP; + if (pcb->chap_server.flags & AUTH_STARTED) + chap_timeout(pcb); +#endif /* PPP_SERVER */ +} + +static void chap_lowerdown(ppp_pcb *pcb) { + + pcb->chap_client.flags = 0; +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) + UNTIMEOUT(chap_timeout, pcb); + pcb->chap_server.flags = 0; +#endif /* PPP_SERVER */ +} + +#if PPP_SERVER +/* + * chap_auth_peer - Start authenticating the peer. + * If the lower layer is already up, we start sending challenges, + * otherwise we wait for the lower layer to come up. + */ +void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if (pcb->chap_server.flags & AUTH_STARTED) { + ppp_error("CHAP: peer authentication already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_server.digest = dp; + pcb->chap_server.name = our_name; + /* Start with a random ID value */ + pcb->chap_server.id = magic(); + pcb->chap_server.flags |= AUTH_STARTED; + if (pcb->chap_server.flags & LOWERUP) + chap_timeout(pcb); +} +#endif /* PPP_SERVER */ + +/* + * chap_auth_with_peer - Prepare to authenticate ourselves to the peer. + * There isn't much to do until we receive a challenge. + */ +void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if(NULL == our_name) + return; + + if (pcb->chap_client.flags & AUTH_STARTED) { + ppp_error("CHAP: authentication with peer already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_client.digest = dp; + pcb->chap_client.name = our_name; + pcb->chap_client.flags |= AUTH_STARTED; +} + +#if PPP_SERVER +/* + * chap_timeout - It's time to send another challenge to the peer. + * This could be either a retransmission of a previous challenge, + * or a new challenge to start re-authentication. + */ +static void chap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct pbuf *p; + + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + if ((pcb->chap_server.flags & CHALLENGE_VALID) == 0) { + pcb->chap_server.challenge_xmits = 0; + chap_generate_challenge(pcb); + pcb->chap_server.flags |= CHALLENGE_VALID; + } else if (pcb->chap_server.challenge_xmits >= pcb->settings.chap_max_transmits) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + pcb->chap_server.flags |= AUTH_DONE | AUTH_FAILED; + auth_peer_fail(pcb, PPP_CHAP); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(pcb->chap_server.challenge_pktlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + MEMCPY(p->payload, pcb->chap_server.challenge, pcb->chap_server.challenge_pktlen); + ppp_write(pcb, p); + ++pcb->chap_server.challenge_xmits; + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, arg, pcb->settings.chap_timeout_time); +} + +/* + * chap_generate_challenge - generate a challenge string and format + * the challenge packet in pcb->chap_server.challenge_pkt. + */ +static void chap_generate_challenge(ppp_pcb *pcb) { + int clen = 1, nlen, len; + unsigned char *p; + + p = pcb->chap_server.challenge; + MAKEHEADER(p, PPP_CHAP); + p += CHAP_HDRLEN; + pcb->chap_server.digest->generate_challenge(pcb, p); + clen = *p; + nlen = strlen(pcb->chap_server.name); + memcpy(p + 1 + clen, pcb->chap_server.name, nlen); + + len = CHAP_HDRLEN + 1 + clen + nlen; + pcb->chap_server.challenge_pktlen = PPP_HDRLEN + len; + + p = pcb->chap_server.challenge + PPP_HDRLEN; + p[0] = CHAP_CHALLENGE; + p[1] = ++pcb->chap_server.id; + p[2] = len >> 8; + p[3] = len; +} + +/* + * chap_handle_response - check the response to our challenge. + */ +static void chap_handle_response(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int response_len, ok, mlen; + const unsigned char *response; + unsigned char *outp; + struct pbuf *p; + const char *name = NULL; /* initialized to shut gcc up */ +#if 0 /* UNUSED */ + int (*verifier)(const char *, const char *, int, const struct chap_digest_type *, + const unsigned char *, const unsigned char *, char *, int); +#endif /* UNUSED */ + char rname[MAXNAMELEN+1]; + char message[256]; + + if ((pcb->chap_server.flags & LOWERUP) == 0) + return; + if (id != pcb->chap_server.challenge[PPP_HDRLEN+1] || len < 2) + return; + if (pcb->chap_server.flags & CHALLENGE_VALID) { + response = pkt; + GETCHAR(response_len, pkt); + len -= response_len + 1; /* length of name */ + name = (char *)pkt + response_len; + if (len < 0) + return; + + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote) { + name = pcb->remote_name; + } else +#endif /* PPP_REMOTENAME */ + { + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", len, name); + name = rname; + } + +#if 0 /* UNUSED */ + if (chap_verify_hook) + verifier = chap_verify_hook; + else + verifier = chap_verify_response; + ok = (*verifier)(name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, pcb->chap_server.message, sizeof(pcb->chap_server.message)); +#endif /* UNUSED */ + ok = chap_verify_response(pcb, name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, message, sizeof(message)); +#if 0 /* UNUSED */ + if (!ok || !auth_number()) { +#endif /* UNUSED */ + if (!ok) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP authentication", name); + } + } else if ((pcb->chap_server.flags & AUTH_DONE) == 0) + return; + + /* send the response */ + mlen = strlen(message); + len = CHAP_HDRLEN + mlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +len), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (unsigned char *)p->payload; + MAKEHEADER(outp, PPP_CHAP); + + outp[0] = (pcb->chap_server.flags & AUTH_FAILED)? CHAP_FAILURE: CHAP_SUCCESS; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + if (mlen > 0) + memcpy(outp + CHAP_HDRLEN, message, mlen); + ppp_write(pcb, p); + + if (pcb->chap_server.flags & CHALLENGE_VALID) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + if (!(pcb->chap_server.flags & AUTH_DONE) && !(pcb->chap_server.flags & AUTH_FAILED)) { + +#if 0 /* UNUSED */ + /* + * Auth is OK, so now we need to check session restrictions + * to ensure everything is OK, but only if we used a + * plugin, and only if we're configured to check. This + * allows us to do PAM checks on PPP servers that + * authenticate against ActiveDirectory, and use AD for + * account info (like when using Winbind integrated with + * PAM). + */ + if (session_mgmt && + session_check(name, NULL, devnam, NULL) == 0) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP Session verification", name); + } +#endif /* UNUSED */ + + } + if (pcb->chap_server.flags & AUTH_FAILED) { + auth_peer_fail(pcb, PPP_CHAP); + } else { + if ((pcb->chap_server.flags & AUTH_DONE) == 0) + auth_peer_success(pcb, PPP_CHAP, + pcb->chap_server.digest->code, + name, strlen(name)); + if (pcb->settings.chap_rechallenge_time) { + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, pcb, + pcb->settings.chap_rechallenge_time); + } + } + pcb->chap_server.flags |= AUTH_DONE; + } +} + +/* + * chap_verify_response - check whether the peer's response matches + * what we think it should be. Returns 1 if it does (authentication + * succeeded), or 0 if it doesn't. + */ +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + int ok; + unsigned char secret[MAXSECRETLEN]; + int secret_len; + + /* Get the secret that the peer is supposed to know */ + if (!get_secret(pcb, name, ourname, (char *)secret, &secret_len, 1)) { + ppp_error("No CHAP secret found for authenticating %q", name); + return 0; + } + ok = digest->verify_response(pcb, id, name, secret, secret_len, challenge, + response, message, message_space); + memset(secret, 0, sizeof(secret)); + + return ok; +} +#endif /* PPP_SERVER */ + +/* + * chap_respond - Generate and send a response to a challenge. + */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int clen, nlen; + int secret_len; + struct pbuf *p; + u_char *outp; + char rname[MAXNAMELEN+1]; + char secret[MAXSECRETLEN+1]; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(RESP_MAX_PKTLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + if ((pcb->chap_client.flags & (LOWERUP | AUTH_STARTED)) != (LOWERUP | AUTH_STARTED)) + return; /* not ready */ + if (len < 2 || len < pkt[0] + 1) + return; /* too short */ + clen = pkt[0]; + nlen = len - (clen + 1); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", nlen, pkt + clen + 1); + +#if PPP_REMOTENAME + /* Microsoft doesn't send their name back in the PPP packet */ + if (pcb->settings.explicit_remote || (pcb->settings.remote_name[0] != 0 && rname[0] == 0)) + strlcpy(rname, pcb->settings.remote_name, sizeof(rname)); +#endif /* PPP_REMOTENAME */ + + /* get secret for authenticating ourselves with the specified host */ + if (!get_secret(pcb, pcb->chap_client.name, rname, secret, &secret_len, 0)) { + secret_len = 0; /* assume null secret if can't find one */ + ppp_warn("No CHAP secret found for authenticating us to %q", rname); + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_CHAP); + outp += CHAP_HDRLEN; + + pcb->chap_client.digest->make_response(pcb, outp, id, pcb->chap_client.name, pkt, + secret, secret_len, pcb->chap_client.priv); + memset(secret, 0, secret_len); + + clen = *outp; + nlen = strlen(pcb->chap_client.name); + memcpy(outp + clen + 1, pcb->chap_client.name, nlen); + + outp = (u_char*)p->payload + PPP_HDRLEN; + len = CHAP_HDRLEN + clen + 1 + nlen; + outp[0] = CHAP_RESPONSE; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + + pbuf_realloc(p, PPP_HDRLEN + len); + ppp_write(pcb, p); +} + +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len) { + const char *msg = NULL; + LWIP_UNUSED_ARG(id); + + if ((pcb->chap_client.flags & (AUTH_DONE|AUTH_STARTED|LOWERUP)) + != (AUTH_STARTED|LOWERUP)) + return; + pcb->chap_client.flags |= AUTH_DONE; + + if (code == CHAP_SUCCESS) { + /* used for MS-CHAP v2 mutual auth, yuck */ + if (pcb->chap_client.digest->check_success != NULL) { + if (!(*pcb->chap_client.digest->check_success)(pcb, pkt, len, pcb->chap_client.priv)) + code = CHAP_FAILURE; + } else + msg = "CHAP authentication succeeded"; + } else { + if (pcb->chap_client.digest->handle_failure != NULL) + (*pcb->chap_client.digest->handle_failure)(pcb, pkt, len); + else + msg = "CHAP authentication failed"; + } + if (msg) { + if (len > 0) + ppp_info("%s: %.*v", msg, len, pkt); + else + ppp_info("%s", msg); + } + if (code == CHAP_SUCCESS) + auth_withpeer_success(pcb, PPP_CHAP, pcb->chap_client.digest->code); + else { + pcb->chap_client.flags |= AUTH_FAILED; + ppp_error("CHAP authentication failed"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen) { + unsigned char code, id; + int len; + + if (pktlen < CHAP_HDRLEN) + return; + GETCHAR(code, pkt); + GETCHAR(id, pkt); + GETSHORT(len, pkt); + if (len < CHAP_HDRLEN || len > pktlen) + return; + len -= CHAP_HDRLEN; + + switch (code) { + case CHAP_CHALLENGE: + chap_respond(pcb, id, pkt, len); + break; +#if PPP_SERVER + case CHAP_RESPONSE: + chap_handle_response(pcb, id, pkt, len); + break; +#endif /* PPP_SERVER */ + case CHAP_FAILURE: + case CHAP_SUCCESS: + chap_handle_status(pcb, code, id, pkt, len); + break; + default: + break; + } +} + +static void chap_protrej(ppp_pcb *pcb) { + +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } + if (pcb->chap_server.flags & AUTH_STARTED) { + pcb->chap_server.flags = 0; + auth_peer_fail(pcb, PPP_CHAP); + } +#endif /* PPP_SERVER */ + if ((pcb->chap_client.flags & (AUTH_STARTED|AUTH_DONE)) == AUTH_STARTED) { + pcb->chap_client.flags &= ~AUTH_STARTED; + ppp_error("CHAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +#if PRINTPKT_SUPPORT +/* + * chap_print_pkt - print the contents of a CHAP packet. + */ +static const char* const chap_code_names[] = { + "Challenge", "Response", "Success", "Failure" +}; + +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int clen, nlen; + unsigned char x; + + if (plen < CHAP_HDRLEN) + return 0; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < CHAP_HDRLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(chap_code_names)) + printer(arg, " %s", chap_code_names[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= CHAP_HDRLEN; + switch (code) { + case CHAP_CHALLENGE: + case CHAP_RESPONSE: + if (len < 1) + break; + clen = p[0]; + if (len < clen + 1) + break; + ++p; + nlen = len - clen - 1; + printer(arg, " <"); + for (; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, "%.2x", x); + } + printer(arg, ">, name = "); + ppp_print_string(p, nlen, printer, arg); + break; + case CHAP_FAILURE: + case CHAP_SUCCESS: + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + break; + default: + for (clen = len; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, " %.2x", x); + } + /* no break */ + } + + return len + CHAP_HDRLEN; +} +#endif /* PRINTPKT_SUPPORT */ + +const struct protent chap_protent = { + PPP_CHAP, + chap_init, + chap_input, + chap_protrej, + chap_lowerup, + chap_lowerdown, + NULL, /* open */ + NULL, /* close */ +#if PRINTPKT_SUPPORT + chap_print_pkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* datainput */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CHAP", /* name */ + NULL, /* data_name */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + chap_option_list, + NULL, /* check_options */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c new file mode 100644 index 000000000..5a989c9b7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c @@ -0,0 +1,962 @@ +/* + * chap_ms.c - Microsoft MS-CHAP compatible implementation. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Modifications by Lauri Pesonen / lpesonen@clinet.fi, april 1997 + * + * Implemented LANManager type password response to MS-CHAP challenges. + * Now pppd provides both NT style and LANMan style blocks, and the + * prefered is set by option "ms-lanman". Default is to use NT. + * The hash text (StdText) was taken from Win95 RASAPI32.DLL. + * + * You should also use DOMAIN\\USERNAME as described in README.MSCHAP80 + */ + +/* + * Modifications by Frank Cusack, frank@google.com, March 2002. + * + * Implemented MS-CHAPv2 functionality, heavily based on sample + * implementation in RFC 2759. Implemented MPPE functionality, + * heavily based on sample implementation in RFC 3079. + * + * Copyright (c) 2002 Google, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap_ms.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" /* For mppe_sha1_pad*, mppe_set_key() */ +#endif /* MPPE_SUPPORT */ + +#define SHA1_SIGNATURE_SIZE 20 +#define MD4_SIGNATURE_SIZE 16 /* 16 bytes in a MD4 message digest */ +#define MAX_NT_PASSWORD 256 /* Max (Unicode) chars in an NT pass */ + +#define MS_CHAP_RESPONSE_LEN 49 /* Response length for MS-CHAP */ +#define MS_CHAP2_RESPONSE_LEN 49 /* Response length for MS-CHAPv2 */ +#define MS_AUTH_RESPONSE_LENGTH 40 /* MS-CHAPv2 authenticator response, */ + /* as ASCII */ + +/* Error codes for MS-CHAP failure messages. */ +#define MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS 646 +#define MS_CHAP_ERROR_ACCT_DISABLED 647 +#define MS_CHAP_ERROR_PASSWD_EXPIRED 648 +#define MS_CHAP_ERROR_NO_DIALIN_PERMISSION 649 +#define MS_CHAP_ERROR_AUTHENTICATION_FAILURE 691 +#define MS_CHAP_ERROR_CHANGING_PASSWORD 709 + +/* + * Offsets within the response field for MS-CHAP + */ +#define MS_CHAP_LANMANRESP 0 +#define MS_CHAP_LANMANRESP_LEN 24 +#define MS_CHAP_NTRESP 24 +#define MS_CHAP_NTRESP_LEN 24 +#define MS_CHAP_USENT 48 + +/* + * Offsets within the response field for MS-CHAP2 + */ +#define MS_CHAP2_PEER_CHALLENGE 0 +#define MS_CHAP2_PEER_CHAL_LEN 16 +#define MS_CHAP2_RESERVED_LEN 8 +#define MS_CHAP2_NTRESP 24 +#define MS_CHAP2_NTRESP_LEN 24 +#define MS_CHAP2_FLAGS 48 + +#if MPPE_SUPPORT +#if 0 /* UNUSED */ +/* These values are the RADIUS attribute values--see RFC 2548. */ +#define MPPE_ENC_POL_ENC_ALLOWED 1 +#define MPPE_ENC_POL_ENC_REQUIRED 2 +#define MPPE_ENC_TYPES_RC4_40 2 +#define MPPE_ENC_TYPES_RC4_128 4 + +/* used by plugins (using above values) */ +extern void set_mppe_enc_types(int, int); +#endif /* UNUSED */ +#endif /* MPPE_SUPPORT */ + +/* Are we the authenticator or authenticatee? For MS-CHAPv2 key derivation. */ +#define MS_CHAP2_AUTHENTICATEE 0 +#define MS_CHAP2_AUTHENTICATOR 1 + +static void ascii2unicode (const char[], int, u_char[]); +static void NTPasswordHash (u_char *, int, u_char[MD4_SIGNATURE_SIZE]); +static void ChallengeResponse (const u_char *, const u_char *, u_char[24]); +static void ChallengeHash (const u_char[16], const u_char *, const char *, u_char[8]); +static void ChapMS_NT (const u_char *, const char *, int, u_char[24]); +static void ChapMS2_NT (const u_char *, const u_char[16], const char *, const char *, int, + u_char[24]); +static void GenerateAuthenticatorResponsePlain + (const char*, int, u_char[24], const u_char[16], const u_char *, + const char *, u_char[41]); +#ifdef MSLANMAN +static void ChapMS_LANMan (u_char *, char *, int, u_char *); +#endif + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]); + +#if MPPE_SUPPORT +static void Set_Start_Key (ppp_pcb *pcb, const u_char *, const char *, int); +static void SetMasterKeys (ppp_pcb *pcb, const char *, int, u_char[24], int); +#endif /* MPPE_SUPPORT */ + +static void ChapMS (ppp_pcb *pcb, const u_char *, const char *, int, u_char *); +static void ChapMS2 (ppp_pcb *pcb, const u_char *, const u_char *, const char *, const char *, int, + u_char *, u_char[MS_AUTH_RESPONSE_LENGTH+1], int); + +#ifdef MSLANMAN +bool ms_lanman = 0; /* Use LanMan password instead of NT */ + /* Has meaning only with MS-CHAP challenges */ +#endif + +#if MPPE_SUPPORT +#ifdef DEBUGMPPEKEY +/* For MPPE debug */ +/* Use "[]|}{?/><,`!2&&(" (sans quotes) for RFC 3079 MS-CHAPv2 test value */ +static char *mschap_challenge = NULL; +/* Use "!@\#$%^&*()_+:3|~" (sans quotes, backslash is to escape #) for ... */ +static char *mschap2_peer_challenge = NULL; +#endif + +#include "netif/ppp/fsm.h" /* Need to poke MPPE options */ +#include "netif/ppp/ccp.h" +#endif /* MPPE_SUPPORT */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chapms_option_list[] = { +#ifdef MSLANMAN + { "ms-lanman", o_bool, &ms_lanman, + "Use LanMan passwd when using MS-CHAP", 1 }, +#endif +#ifdef DEBUGMPPEKEY + { "mschap-challenge", o_string, &mschap_challenge, + "specify CHAP challenge" }, + { "mschap2-peer-challenge", o_string, &mschap2_peer_challenge, + "specify CHAP peer challenge" }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if PPP_SERVER +/* + * chapms_generate_challenge - generate a challenge for MS-CHAP. + * For MS-CHAP the challenge length is fixed at 8 bytes. + * The length goes in challenge[0] and the actual challenge starts + * at challenge[1]. + */ +static void chapms_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 8; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 8) + memcpy(challenge, mschap_challenge, 8); + else +#endif + magic_random_bytes(challenge, 8); +} + +static void chapms2_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 16; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 16) + memcpy(challenge, mschap_challenge, 16); + else +#endif + magic_random_bytes(challenge, 16); +} + +static int chapms_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP_RESPONSE_LEN]; + int diff; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(name); + + challenge_len = *challenge++; /* skip length, is 8 */ + response_len = *response++; + if (response_len != MS_CHAP_RESPONSE_LEN) + goto bad; + +#ifndef MSLANMAN + if (!response[MS_CHAP_USENT]) { + /* Should really propagate this into the error packet. */ + ppp_notice("Peer request for LANMAN auth not supported"); + goto bad; + } +#endif + + /* Generate the expected response. */ + ChapMS(pcb, (const u_char *)challenge, (const char *)secret, secret_len, md); + +#ifdef MSLANMAN + /* Determine which part of response to verify against */ + if (!response[MS_CHAP_USENT]) + diff = memcmp(&response[MS_CHAP_LANMANRESP], + &md[MS_CHAP_LANMANRESP], MS_CHAP_LANMANRESP_LEN); + else +#endif + diff = memcmp(&response[MS_CHAP_NTRESP], &md[MS_CHAP_NTRESP], + MS_CHAP_NTRESP_LEN); + + if (diff == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + + bad: + /* See comments below for MS-CHAP V2 */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0", + challenge_len, challenge); + return 0; +} + +static int chapms2_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP2_RESPONSE_LEN]; + char saresponse[MS_AUTH_RESPONSE_LENGTH+1]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + + challenge_len = *challenge++; /* skip length, is 16 */ + response_len = *response++; + if (response_len != MS_CHAP2_RESPONSE_LEN) + goto bad; /* not even the right length */ + + /* Generate the expected response and our mutual auth. */ + ChapMS2(pcb, (const u_char*)challenge, (const u_char*)&response[MS_CHAP2_PEER_CHALLENGE], name, + (const char *)secret, secret_len, md, + (unsigned char *)saresponse, MS_CHAP2_AUTHENTICATOR); + + /* compare MDs and send the appropriate status */ + /* + * Per RFC 2759, success message must be formatted as + * "S= M=" + * where + * is the Authenticator Response (mutual auth) + * is a text message + * + * However, some versions of Windows (win98 tested) do not know + * about the M= part (required per RFC 2759) and flag + * it as an error (reported incorrectly as an encryption error + * to the user). Since the RFC requires it, and it can be + * useful information, we supply it if the peer is a conforming + * system. Luckily (?), win98 sets the Flags field to 0x04 + * (contrary to RFC requirements) so we can use that to + * distinguish between conforming and non-conforming systems. + * + * Special thanks to Alex Swiridov for + * help debugging this. + */ + if (memcmp(&md[MS_CHAP2_NTRESP], &response[MS_CHAP2_NTRESP], + MS_CHAP2_NTRESP_LEN) == 0) { + if (response[MS_CHAP2_FLAGS]) + ppp_slprintf(message, message_space, "S=%s", saresponse); + else + ppp_slprintf(message, message_space, "S=%s M=%s", + saresponse, "Access granted"); + return 1; + } + + bad: + /* + * Failure message must be formatted as + * "E=e R=r C=c V=v M=m" + * where + * e = error code (we use 691, ERROR_AUTHENTICATION_FAILURE) + * r = retry (we use 1, ok to retry) + * c = challenge to use for next response, we reuse previous + * v = Change Password version supported, we use 0 + * m = text message + * + * The M=m part is only for MS-CHAPv2. Neither win2k nor + * win98 (others untested) display the message to the user anyway. + * They also both ignore the E=e code. + * + * Note that it's safe to reuse the same challenge as we don't + * actually accept another response based on the error message + * (and no clients try to resend a response anyway). + * + * Basically, this whole bit is useless code, even the small + * implementation here is only because of overspecification. + */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0 M=%s", + challenge_len, challenge, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chapms_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + challenge++; /* skip length, should be 8 */ + *response++ = MS_CHAP_RESPONSE_LEN; + ChapMS(pcb, challenge, secret, secret_len, response); +} + +static void chapms2_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + challenge++; /* skip length, should be 16 */ + *response++ = MS_CHAP2_RESPONSE_LEN; + ChapMS2(pcb, challenge, +#ifdef DEBUGMPPEKEY + mschap2_peer_challenge, +#else + NULL, +#endif + our_name, secret, secret_len, response, private_, + MS_CHAP2_AUTHENTICATEE); +} + +static int chapms2_check_success(ppp_pcb *pcb, unsigned char *msg, int len, unsigned char *private_) { + LWIP_UNUSED_ARG(pcb); + + if ((len < MS_AUTH_RESPONSE_LENGTH + 2) || + strncmp((char *)msg, "S=", 2) != 0) { + /* Packet does not start with "S=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + msg += 2; + len -= 2; + if (len < MS_AUTH_RESPONSE_LENGTH + || memcmp(msg, private_, MS_AUTH_RESPONSE_LENGTH)) { + /* Authenticator Response did not match expected. */ + ppp_error("MS-CHAPv2 mutual authentication failed."); + return 0; + } + /* Authenticator Response matches. */ + msg += MS_AUTH_RESPONSE_LENGTH; /* Eat it */ + len -= MS_AUTH_RESPONSE_LENGTH; + if ((len >= 3) && !strncmp((char *)msg, " M=", 3)) { + msg += 3; /* Eat the delimiter */ + } else if (len) { + /* Packet has extra text which does not begin " M=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + return 1; +} + +static void chapms_handle_failure(ppp_pcb *pcb, unsigned char *inp, int len) { + int err; + const char *p; + char msg[64]; + LWIP_UNUSED_ARG(pcb); + + /* We want a null-terminated string for strxxx(). */ + len = LWIP_MIN(len, 63); + MEMCPY(msg, inp, len); + msg[len] = 0; + p = msg; + + /* + * Deal with MS-CHAP formatted failure messages; just print the + * M= part (if any). For MS-CHAP we're not really supposed + * to use M=, but it shouldn't hurt. See + * chapms[2]_verify_response. + */ + if (!strncmp(p, "E=", 2)) + err = strtol(p+2, NULL, 10); /* Remember the error code. */ + else + goto print_msg; /* Message is badly formatted. */ + + if (len && ((p = strstr(p, " M=")) != NULL)) { + /* M= field found. */ + p += 3; + } else { + /* No M=; use the error code. */ + switch (err) { + case MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS: + p = "E=646 Restricted logon hours"; + break; + + case MS_CHAP_ERROR_ACCT_DISABLED: + p = "E=647 Account disabled"; + break; + + case MS_CHAP_ERROR_PASSWD_EXPIRED: + p = "E=648 Password expired"; + break; + + case MS_CHAP_ERROR_NO_DIALIN_PERMISSION: + p = "E=649 No dialin permission"; + break; + + case MS_CHAP_ERROR_AUTHENTICATION_FAILURE: + p = "E=691 Authentication failure"; + break; + + case MS_CHAP_ERROR_CHANGING_PASSWORD: + /* Should never see this, we don't support Change Password. */ + p = "E=709 Error changing password"; + break; + + default: + ppp_error("Unknown MS-CHAP authentication failure: %.*v", + len, inp); + return; + } + } +print_msg: + if (p != NULL) + ppp_error("MS-CHAP authentication failed: %v", p); +} + +static void ChallengeResponse(const u_char *challenge, + const u_char PasswordHash[MD4_SIGNATURE_SIZE], + u_char response[24]) { + u_char ZPasswordHash[21]; + lwip_des_context des; + u_char des_key[8]; + + BZERO(ZPasswordHash, sizeof(ZPasswordHash)); + MEMCPY(ZPasswordHash, PasswordHash, MD4_SIGNATURE_SIZE); + +#if 0 + dbglog("ChallengeResponse - ZPasswordHash %.*B", + sizeof(ZPasswordHash), ZPasswordHash); +#endif + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +8); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 14, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +16); + lwip_des_free(&des); + +#if 0 + dbglog("ChallengeResponse - response %.24B", response); +#endif +} + +static void ChallengeHash(const u_char PeerChallenge[16], const u_char *rchallenge, + const char *username, u_char Challenge[8]) { + lwip_sha1_context sha1Context; + u_char sha1Hash[SHA1_SIGNATURE_SIZE]; + const char *user; + + /* remove domain from "domain\username" */ + if ((user = strrchr(username, '\\')) != NULL) + ++user; + else + user = username; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PeerChallenge, 16); + lwip_sha1_update(&sha1Context, rchallenge, 16); + lwip_sha1_update(&sha1Context, (const unsigned char*)user, strlen(user)); + lwip_sha1_finish(&sha1Context, sha1Hash); + lwip_sha1_free(&sha1Context); + + MEMCPY(Challenge, sha1Hash, 8); +} + +/* + * Convert the ASCII version of the password to Unicode. + * This implicitly supports 8-bit ISO8859/1 characters. + * This gives us the little-endian representation, which + * is assumed by all M$ CHAP RFCs. (Unicode byte ordering + * is machine-dependent.) + */ +static void ascii2unicode(const char ascii[], int ascii_len, u_char unicode[]) { + int i; + + BZERO(unicode, ascii_len * 2); + for (i = 0; i < ascii_len; i++) + unicode[i * 2] = (u_char) ascii[i]; +} + +static void NTPasswordHash(u_char *secret, int secret_len, u_char hash[MD4_SIGNATURE_SIZE]) { + lwip_md4_context md4Context; + + lwip_md4_init(&md4Context); + lwip_md4_starts(&md4Context); + lwip_md4_update(&md4Context, secret, secret_len); + lwip_md4_finish(&md4Context, hash); + lwip_md4_free(&md4Context); +} + +static void ChapMS_NT(const u_char *rchallenge, const char *secret, int secret_len, + u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(rchallenge, PasswordHash, NTResponse); +} + +static void ChapMS2_NT(const u_char *rchallenge, const u_char PeerChallenge[16], const char *username, + const char *secret, int secret_len, u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char Challenge[8]; + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(Challenge, PasswordHash, NTResponse); +} + +#ifdef MSLANMAN +static u_char *StdText = (u_char *)"KGS!@#$%"; /* key from rasapi32.dll */ + +static void ChapMS_LANMan(u_char *rchallenge, char *secret, int secret_len, + unsigned char *response) { + int i; + u_char UcasePassword[MAX_NT_PASSWORD]; /* max is actually 14 */ + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + lwip_des_context des; + u_char des_key[8]; + + /* LANMan password is case insensitive */ + BZERO(UcasePassword, sizeof(UcasePassword)); + for (i = 0; i < secret_len; i++) + UcasePassword[i] = (u_char)toupper(secret[i]); + + pppcrypt_56_to_64_bit_key(UcasePassword +0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(UcasePassword +7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +8); + lwip_des_free(&des); + + ChallengeResponse(rchallenge, PasswordHash, &response[MS_CHAP_LANMANRESP]); +} +#endif + + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + /* + * "Magic" constants used in response generation, from RFC 2759. + */ + static const u_char Magic1[39] = /* "Magic server to client signing constant" */ + { 0x4D, 0x61, 0x67, 0x69, 0x63, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6C, 0x69, 0x65, + 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x69, 0x6E, 0x67, + 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74 }; + static const u_char Magic2[41] = /* "Pad to make it do more than one iteration" */ + { 0x50, 0x61, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x6D, 0x61, 0x6B, + 0x65, 0x20, 0x69, 0x74, 0x20, 0x64, 0x6F, 0x20, 0x6D, 0x6F, + 0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x6E, 0x20, 0x6F, 0x6E, + 0x65, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6F, + 0x6E }; + + int i; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; + u_char Challenge[8]; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, Digest, sizeof(Digest)); + lwip_sha1_update(&sha1Context, Challenge, sizeof(Challenge)); + lwip_sha1_update(&sha1Context, Magic2, sizeof(Magic2)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Convert to ASCII hex string. */ + for (i = 0; i < LWIP_MAX((MS_AUTH_RESPONSE_LENGTH / 2), (int)sizeof(Digest)); i++) + sprintf((char *)&authResponse[i * 2], "%02X", Digest[i]); +} + + +static void GenerateAuthenticatorResponsePlain( + const char *secret, int secret_len, + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), + PasswordHashHash); + + GenerateAuthenticatorResponse(PasswordHashHash, NTResponse, PeerChallenge, + rchallenge, username, authResponse); +} + + +#if MPPE_SUPPORT +/* + * Set mppe_xxxx_key from MS-CHAP credentials. (see RFC 3079) + */ +static void Set_Start_Key(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, rchallenge, 8); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Same key in both directions. */ + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +/* + * Set mppe_xxxx_key from MS-CHAPv2 credentials. (see RFC 3079) + */ +static void SetMasterKeys(ppp_pcb *pcb, const char *secret, int secret_len, u_char NTResponse[24], int IsServer) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char MasterKey[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + const u_char *s; + + /* "This is the MPPE Master Key" */ + static const u_char Magic1[27] = + { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x4d, 0x50, 0x50, 0x45, 0x20, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x4b, 0x65, 0x79 }; + /* "On the client side, this is the send key; " + "on the server side, it is the receive key." */ + static const u_char Magic2[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6b, 0x65, 0x79, + 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, 0x69, 0x64, 0x65, + 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + /* "On the client side, this is the receive key; " + "on the server side, it is the send key." */ + static const u_char Magic3[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, + 0x69, 0x64, 0x65, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, MasterKey); + lwip_sha1_free(&sha1Context); + + /* + * generate send key + */ + if (IsServer) + s = Magic3; + else + s = Magic2; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + + /* + * generate recv key + */ + if (IsServer) + s = Magic2; + else + s = Magic3; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +#endif /* MPPE_SUPPORT */ + + +static void ChapMS(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len, + unsigned char *response) { +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + BZERO(response, MS_CHAP_RESPONSE_LEN); + + ChapMS_NT(rchallenge, secret, secret_len, &response[MS_CHAP_NTRESP]); + +#ifdef MSLANMAN + ChapMS_LANMan(rchallenge, secret, secret_len, + &response[MS_CHAP_LANMANRESP]); + + /* preferred method is set by option */ + response[MS_CHAP_USENT] = !ms_lanman; +#else + response[MS_CHAP_USENT] = 1; +#endif + +#if MPPE_SUPPORT + Set_Start_Key(pcb, rchallenge, secret, secret_len); +#endif /* MPPE_SUPPORT */ +} + + +/* + * If PeerChallenge is NULL, one is generated and the PeerChallenge + * field of response is filled in. Call this way when generating a response. + * If PeerChallenge is supplied, it is copied into the PeerChallenge field. + * Call this way when verifying a response (or debugging). + * Do not call with PeerChallenge = response. + * + * The PeerChallenge field of response is then used for calculation of the + * Authenticator Response. + */ +static void ChapMS2(ppp_pcb *pcb, const u_char *rchallenge, const u_char *PeerChallenge, + const char *user, const char *secret, int secret_len, unsigned char *response, + u_char authResponse[], int authenticator) { + /* ARGSUSED */ + LWIP_UNUSED_ARG(authenticator); +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + + BZERO(response, MS_CHAP2_RESPONSE_LEN); + + /* Generate the Peer-Challenge if requested, or copy it if supplied. */ + if (!PeerChallenge) + magic_random_bytes(&response[MS_CHAP2_PEER_CHALLENGE], MS_CHAP2_PEER_CHAL_LEN); + else + MEMCPY(&response[MS_CHAP2_PEER_CHALLENGE], PeerChallenge, + MS_CHAP2_PEER_CHAL_LEN); + + /* Generate the NT-Response */ + ChapMS2_NT(rchallenge, &response[MS_CHAP2_PEER_CHALLENGE], user, + secret, secret_len, &response[MS_CHAP2_NTRESP]); + + /* Generate the Authenticator Response. */ + GenerateAuthenticatorResponsePlain(secret, secret_len, + &response[MS_CHAP2_NTRESP], + &response[MS_CHAP2_PEER_CHALLENGE], + rchallenge, user, authResponse); + +#if MPPE_SUPPORT + SetMasterKeys(pcb, secret, secret_len, + &response[MS_CHAP2_NTRESP], authenticator); +#endif /* MPPE_SUPPORT */ +} + +#if 0 /* UNUSED */ +#if MPPE_SUPPORT +/* + * Set MPPE options from plugins. + */ +void set_mppe_enc_types(int policy, int types) { + /* Early exit for unknown policies. */ + if (policy != MPPE_ENC_POL_ENC_ALLOWED || + policy != MPPE_ENC_POL_ENC_REQUIRED) + return; + + /* Don't modify MPPE if it's optional and wasn't already configured. */ + if (policy == MPPE_ENC_POL_ENC_ALLOWED && !ccp_wantoptions[0].mppe) + return; + + /* + * Disable undesirable encryption types. Note that we don't ENABLE + * any encryption types, to avoid overriding manual configuration. + */ + switch(types) { + case MPPE_ENC_TYPES_RC4_40: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_128; /* disable 128-bit */ + break; + case MPPE_ENC_TYPES_RC4_128: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_40; /* disable 40-bit */ + break; + default: + break; + } +} +#endif /* MPPE_SUPPORT */ +#endif /* UNUSED */ + +const struct chap_digest_type chapms_digest = { + CHAP_MICROSOFT, /* code */ +#if PPP_SERVER + chapms_generate_challenge, + chapms_verify_response, +#endif /* PPP_SERVER */ + chapms_make_response, + NULL, /* check_success */ + chapms_handle_failure, +}; + +const struct chap_digest_type chapms2_digest = { + CHAP_MICROSOFT_V2, /* code */ +#if PPP_SERVER + chapms2_generate_challenge, + chapms2_verify_response, +#endif /* PPP_SERVER */ + chapms2_make_response, + chapms2_check_success, + chapms_handle_failure, +}; + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c new file mode 100644 index 000000000..26c6c30db --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c @@ -0,0 +1,465 @@ +/* + * demand.c - Support routines for demand-dialling. + * + * Copyright (c) 1996-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && DEMAND_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PPP_FILTER +#include +#endif + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/lcp.h" + +char *frame; +int framelen; +int framemax; +int escape_flag; +int flush_flag; +int fcs; + +struct packet { + int length; + struct packet *next; + unsigned char data[1]; +}; + +struct packet *pend_q; +struct packet *pend_qtail; + +static int active_packet (unsigned char *, int); + +/* + * demand_conf - configure the interface for doing dial-on-demand. + */ +void +demand_conf() +{ + int i; + const struct protent *protp; + +/* framemax = lcp_allowoptions[0].mru; + if (framemax < PPP_MRU) */ + framemax = PPP_MRU; + framemax += PPP_HDRLEN + PPP_FCSLEN; + frame = malloc(framemax); + if (frame == NULL) + novm("demand frame"); + framelen = 0; + pend_q = NULL; + escape_flag = 0; + flush_flag = 0; + fcs = PPP_INITFCS; + + netif_set_mtu(pcb, LWIP_MIN(lcp_allowoptions[0].mru, PPP_MRU)); + if (ppp_send_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0) + fatal("Couldn't set up demand-dialled PPP interface: %m"); + +#ifdef PPP_FILTER + set_filters(&pass_filter, &active_filter); +#endif + + /* + * Call the demand_conf procedure for each protocol that's got one. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + ((*protp->demand_conf)(pcb)); +/* FIXME: find a way to die() here */ +#if 0 + if (!((*protp->demand_conf)(pcb))) + die(1); +#endif +} + + +/* + * demand_block - set each network protocol to block further packets. + */ +void +demand_block() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_QUEUE); + get_loop_output(); +} + +/* + * demand_discard - set each network protocol to discard packets + * with an error. + */ +void +demand_discard() +{ + struct packet *pkt, *nextpkt; + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_ERROR); + get_loop_output(); + + /* discard all saved packets */ + for (pkt = pend_q; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + free(pkt); + } + pend_q = NULL; + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; +} + +/* + * demand_unblock - set each enabled network protocol to pass packets. + */ +void +demand_unblock() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_PASS); +} + +/* + * FCS lookup table as calculated by genfcstab. + */ +static u_short fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +/* + * loop_chars - process characters received from the loopback. + * Calls loop_frame when a complete frame has been accumulated. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int +loop_chars(p, n) + unsigned char *p; + int n; +{ + int c, rv; + + rv = 0; + +/* check for synchronous connection... */ + + if ( (p[0] == 0xFF) && (p[1] == 0x03) ) { + rv = loop_frame(p,n); + return rv; + } + + for (; n > 0; --n) { + c = *p++; + if (c == PPP_FLAG) { + if (!escape_flag && !flush_flag + && framelen > 2 && fcs == PPP_GOODFCS) { + framelen -= 2; + if (loop_frame((unsigned char *)frame, framelen)) + rv = 1; + } + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; + continue; + } + if (flush_flag) + continue; + if (escape_flag) { + c ^= PPP_TRANS; + escape_flag = 0; + } else if (c == PPP_ESCAPE) { + escape_flag = 1; + continue; + } + if (framelen >= framemax) { + flush_flag = 1; + continue; + } + frame[framelen++] = c; + fcs = PPP_FCS(fcs, c); + } + return rv; +} + +/* + * loop_frame - given a frame obtained from the loopback, + * decide whether to bring up the link or not, and, if we want + * to transmit this frame later, put it on the pending queue. + * Return value is 1 if we need to bring up the link, 0 otherwise. + * We assume that the kernel driver has already applied the + * pass_filter, so we won't get packets it rejected. + * We apply the active_filter to see if we want this packet to + * bring up the link. + */ +int +loop_frame(frame, len) + unsigned char *frame; + int len; +{ + struct packet *pkt; + + /* dbglog("from loop: %P", frame, len); */ + if (len < PPP_HDRLEN) + return 0; + if ((PPP_PROTOCOL(frame) & 0x8000) != 0) + return 0; /* shouldn't get any of these anyway */ + if (!active_packet(frame, len)) + return 0; + + pkt = (struct packet *) malloc(sizeof(struct packet) + len); + if (pkt != NULL) { + pkt->length = len; + pkt->next = NULL; + memcpy(pkt->data, frame, len); + if (pend_q == NULL) + pend_q = pkt; + else + pend_qtail->next = pkt; + pend_qtail = pkt; + } + return 1; +} + +/* + * demand_rexmit - Resend all those frames which we got via the + * loopback, now that the real serial link is up. + */ +void +demand_rexmit(proto, newip) + int proto; + u32_t newip; +{ + struct packet *pkt, *prev, *nextpkt; + unsigned short checksum; + unsigned short pkt_checksum = 0; + unsigned iphdr; + struct timeval tv; + char cv = 0; + char ipstr[16]; + + prev = NULL; + pkt = pend_q; + pend_q = NULL; + tv.tv_sec = 1; + tv.tv_usec = 0; + select(0,NULL,NULL,NULL,&tv); /* Sleep for 1 Seconds */ + for (; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + if (PPP_PROTOCOL(pkt->data) == proto) { + if ( (proto == PPP_IP) && newip ) { + /* Get old checksum */ + + iphdr = (pkt->data[4] & 15) << 2; + checksum = *((unsigned short *) (pkt->data+14)); + if (checksum == 0xFFFF) { + checksum = 0; + } + + + if (pkt->data[13] == 17) { + pkt_checksum = *((unsigned short *) (pkt->data+10+iphdr)); + if (pkt_checksum) { + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + else { + cv = 0; + } + } + + if (pkt->data[13] == 6) { + pkt_checksum = *((unsigned short *) (pkt->data+20+iphdr)); + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + + /* Delete old Source-IP-Address */ + checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Change Source-IP-Address */ + * ((u32_t *) (pkt->data + 16)) = newip; + + /* Add new Source-IP-Address */ + checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Write new checksum */ + if (!checksum) { + checksum = 0xFFFF; + } + *((unsigned short *) (pkt->data+14)) = checksum; + if (pkt->data[13] == 6) { + *((unsigned short *) (pkt->data+20+iphdr)) = pkt_checksum; + } + if (cv && (pkt->data[13] == 17) ) { + *((unsigned short *) (pkt->data+10+iphdr)) = pkt_checksum; + } + + /* Log Packet */ + strcpy(ipstr,inet_ntoa(*( (struct in_addr *) (pkt->data+16)))); + if (pkt->data[13] == 1) { + syslog(LOG_INFO,"Open ICMP %s -> %s\n", + ipstr, + inet_ntoa(*( (struct in_addr *) (pkt->data+20)))); + } else { + syslog(LOG_INFO,"Open %s %s:%d -> %s:%d\n", + pkt->data[13] == 6 ? "TCP" : "UDP", + ipstr, + ntohs(*( (short *) (pkt->data+iphdr+4))), + inet_ntoa(*( (struct in_addr *) (pkt->data+20))), + ntohs(*( (short *) (pkt->data+iphdr+6)))); + } + } + output(pcb, pkt->data, pkt->length); + free(pkt); + } else { + if (prev == NULL) + pend_q = pkt; + else + prev->next = pkt; + prev = pkt; + } + } + pend_qtail = prev; + if (prev != NULL) + prev->next = NULL; +} + +/* + * Scan a packet to decide whether it is an "active" packet, + * that is, whether it is worth bringing up the link for. + */ +static int +active_packet(p, len) + unsigned char *p; + int len; +{ + int proto, i; + const struct protent *protp; + + if (len < PPP_HDRLEN) + return 0; + proto = PPP_PROTOCOL(p); +#ifdef PPP_FILTER + p[0] = 1; /* outbound packet indicator */ + if ((pass_filter.bf_len != 0 + && bpf_filter(pass_filter.bf_insns, p, len, len) == 0) + || (active_filter.bf_len != 0 + && bpf_filter(active_filter.bf_insns, p, len, len) == 0)) { + p[0] = 0xff; + return 0; + } + p[0] = 0xff; +#endif + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol < 0xC000 && (protp->protocol & ~0x8000) == proto) { + if (protp->active_pkt == NULL) + return 1; + return (*protp->active_pkt)(p, len); + } + } + return 0; /* not a supported protocol !!?? */ +} + +#endif /* PPP_SUPPORT && DEMAND_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c new file mode 100644 index 000000000..8fb56368e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c @@ -0,0 +1,2423 @@ +/* + * eap.c - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * This implementation of EAP supports MD5-Challenge and SRP-SHA1 + * authentication styles. Note that support of MD5-Challenge is a + * requirement of RFC 2284, and that it's essentially just a + * reimplementation of regular RFC 1994 CHAP using EAP messages. + * + * As an authenticator ("server"), there are multiple phases for each + * style. In the first phase of each style, the unauthenticated peer + * name is queried using the EAP Identity request type. If the + * "remotename" option is used, then this phase is skipped, because + * the peer's name is presumed to be known. + * + * For MD5-Challenge, there are two phases, and the second phase + * consists of sending the challenge itself and handling the + * associated response. + * + * For SRP-SHA1, there are four phases. The second sends 's', 'N', + * and 'g'. The reply contains 'A'. The third sends 'B', and the + * reply contains 'M1'. The forth sends the 'M2' value. + * + * As an authenticatee ("client"), there's just a single phase -- + * responding to the queries generated by the peer. EAP is an + * authenticator-driven protocol. + * + * Based on draft-ietf-pppext-eap-srp-03.txt. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eap.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#ifdef USE_SRP +#include +#include +#include +#endif /* USE_SRP */ + +#ifndef SHA_DIGESTSIZE +#define SHA_DIGESTSIZE 20 +#endif + +#ifdef USE_SRP +static char *pn_secret = NULL; /* Pseudonym generating secret */ +#endif + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t eap_option_list[] = { + { "eap-restart", o_int, &eap_states[0].es_server.ea_timeout, + "Set retransmit timeout for EAP Requests (server)" }, + { "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests, + "Set max number of EAP Requests sent (server)" }, + { "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout, + "Set time limit for peer EAP authentication" }, + { "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests, + "Set max number of EAP Requests allows (client)" }, + { "eap-interval", o_int, &eap_states[0].es_rechallenge, + "Set interval for EAP rechallenge" }, +#ifdef USE_SRP + { "srp-interval", o_int, &eap_states[0].es_lwrechallenge, + "Set interval for SRP lightweight rechallenge" }, + { "srp-pn-secret", o_string, &pn_secret, + "Long term pseudonym generation secret" }, + { "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo, + "Use pseudonym if offered one by server", 1 }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void eap_init(ppp_pcb *pcb); +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen); +static void eap_protrej(ppp_pcb *pcb); +static void eap_lowerup(ppp_pcb *pcb); +static void eap_lowerdown(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int eap_printpkt(const u_char *inp, int inlen, + void (*)(void *arg, const char *fmt, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent eap_protent = { + PPP_EAP, /* protocol number */ + eap_init, /* initialization procedure */ + eap_input, /* process a received packet */ + eap_protrej, /* process a received protocol-reject */ + eap_lowerup, /* lower layer has gone up */ + eap_lowerdown, /* lower layer has gone down */ + NULL, /* open the protocol */ + NULL, /* close the protocol */ +#if PRINTPKT_SUPPORT + eap_printpkt, /* print a packet in readable form */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* process a received data packet */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "EAP", /* text name of protocol */ + NULL, /* text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + eap_option_list, /* list of command-line options */ + NULL, /* check requested options; assign defaults */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, /* configure interface for demand-dial */ + NULL /* say whether to bring up link for this pkt */ +#endif /* DEMAND_SUPPORT */ +}; + +#ifdef USE_SRP +/* + * A well-known 2048 bit modulus. + */ +static const u_char wkmodulus[] = { + 0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B, + 0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F, + 0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07, + 0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50, + 0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED, + 0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D, + 0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D, + 0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50, + 0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0, + 0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3, + 0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8, + 0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8, + 0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA, + 0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74, + 0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7, + 0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B, + 0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16, + 0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81, + 0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A, + 0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48, + 0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D, + 0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA, + 0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78, + 0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6, + 0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29, + 0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8, + 0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82, + 0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6, + 0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4, + 0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75, + 0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2, + 0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73 +}; +#endif + +#if PPP_SERVER +/* Local forward declarations. */ +static void eap_server_timeout(void *arg); +#endif /* PPP_SERVER */ + +/* + * Convert EAP state code to printable string for debug. + */ +static const char * eap_state_name(enum eap_state_code esc) +{ + static const char *state_names[] = { EAP_STATES }; + + return (state_names[(int)esc]); +} + +/* + * eap_init - Initialize state for an EAP user. This is currently + * called once by main() during start-up. + */ +static void eap_init(ppp_pcb *pcb) { + + BZERO(&pcb->eap, sizeof(eap_state)); +#if PPP_SERVER + pcb->eap.es_server.ea_id = magic(); +#endif /* PPP_SERVER */ +} + +/* + * eap_client_timeout - Give up waiting for the peer to send any + * Request messages. + */ +static void eap_client_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_client_active(pcb)) + return; + + ppp_error("EAP: timeout waiting for Request from peer"); + auth_withpeer_fail(pcb, PPP_EAP); + pcb->eap.es_client.ea_state = eapBadAuth; +} + +/* + * eap_authwithpeer - Authenticate to our peer (behave as client). + * + * Start client state and wait for requests. This is called only + * after eap_lowerup. + */ +void eap_authwithpeer(ppp_pcb *pcb, const char *localname) { + + if(NULL == localname) + return; + + /* Save the peer name we're given */ + pcb->eap.es_client.ea_name = localname; + pcb->eap.es_client.ea_namelen = strlen(localname); + + pcb->eap.es_client.ea_state = eapListen; + + /* + * Start a timer so that if the other end just goes + * silent, we don't sit here waiting forever. + */ + if (pcb->settings.eap_req_time > 0) + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); +} + +#if PPP_SERVER +/* + * Format a standard EAP Failure message and send it to the peer. + * (Server operation) + */ +static void eap_send_failure(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_FAILURE, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + pcb->eap.es_server.ea_state = eapBadAuth; + auth_peer_fail(pcb, PPP_EAP); +} + +/* + * Format a standard EAP Success message and send it to the peer. + * (Server operation) + */ +static void eap_send_success(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_SUCCESS, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + auth_peer_success(pcb, PPP_EAP, 0, + pcb->eap.es_server.ea_peer, pcb->eap.es_server.ea_peerlen); +} +#endif /* PPP_SERVER */ + +#ifdef USE_SRP +/* + * Set DES key according to pseudonym-generating secret and current + * date. + */ +static bool +pncrypt_setkey(int timeoffs) +{ + struct tm *tp; + char tbuf[9]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + time_t reftime; + + if (pn_secret == NULL) + return (0); + reftime = time(NULL) + timeoffs; + tp = localtime(&reftime); + SHA1Init(&ctxt); + SHA1Update(&ctxt, pn_secret, strlen(pn_secret)); + strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp); + SHA1Update(&ctxt, tbuf, strlen(tbuf)); + SHA1Final(dig, &ctxt); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + return (DesSetkey(dig)); +} + +static char base64[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +struct b64state { + u32_t bs_bits; + int bs_offs; +}; + +static int +b64enc(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + + while (inlen > 0) { + bs->bs_bits = (bs->bs_bits << 8) | *inp++; + inlen--; + bs->bs_offs += 8; + if (bs->bs_offs >= 24) { + *outp++ = base64[(bs->bs_bits >> 18) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 12) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 6) & 0x3F]; + *outp++ = base64[bs->bs_bits & 0x3F]; + outlen += 4; + bs->bs_offs = 0; + bs->bs_bits = 0; + } + } + return (outlen); +} + +static int +b64flush(bs, outp) +struct b64state *bs; +u_char *outp; +{ + int outlen = 0; + + if (bs->bs_offs == 8) { + *outp++ = base64[(bs->bs_bits >> 2) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 4) & 0x3F]; + outlen = 2; + } else if (bs->bs_offs == 16) { + *outp++ = base64[(bs->bs_bits >> 10) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 4) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 2) & 0x3F]; + outlen = 3; + } + bs->bs_offs = 0; + bs->bs_bits = 0; + return (outlen); +} + +static int +b64dec(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + char *cp; + + while (inlen > 0) { + if ((cp = strchr(base64, *inp++)) == NULL) + break; + bs->bs_bits = (bs->bs_bits << 6) | (cp - base64); + inlen--; + bs->bs_offs += 6; + if (bs->bs_offs >= 8) { + *outp++ = bs->bs_bits >> (bs->bs_offs - 8); + outlen++; + bs->bs_offs -= 8; + } + } + return (outlen); +} +#endif /* USE_SRP */ + +#if PPP_SERVER +/* + * Assume that current waiting server state is complete and figure + * next state to use based on available authentication data. 'status' + * indicates if there was an error in handling the last query. It is + * 0 for success and non-zero for failure. + */ +static void eap_figure_next_state(ppp_pcb *pcb, int status) { +#ifdef USE_SRP + unsigned char secbuf[MAXSECRETLEN], clear[8], *sp, *dp; + struct t_pw tpw; + struct t_confent *tce, mytce; + char *cp, *cp2; + struct t_server *ts; + int id, i, plen, toffs; + u_char vals[2]; + struct b64state bs; +#endif /* USE_SRP */ + + pcb->settings.eap_timeout_time = pcb->eap.es_savedtime; + switch (pcb->eap.es_server.ea_state) { + case eapBadAuth: + return; + + case eapIdentify: +#ifdef USE_SRP + /* Discard any previous session. */ + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } +#ifdef USE_SRP + /* If we've got a pseudonym, try to decode to real name. */ + if (pcb->eap.es_server.ea_peerlen > SRP_PSEUDO_LEN && + strncmp(pcb->eap.es_server.ea_peer, SRP_PSEUDO_ID, + SRP_PSEUDO_LEN) == 0 && + (pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 < + sizeof (secbuf)) { + BZERO(&bs, sizeof (bs)); + plen = b64dec(&bs, + pcb->eap.es_server.ea_peer + SRP_PSEUDO_LEN, + pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN, + secbuf); + toffs = 0; + for (i = 0; i < 5; i++) { + pncrypt_setkey(toffs); + toffs -= 86400; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesDecrypt(secbuf, clear)) { + ppp_dbglog("no DES here; cannot decode " + "pseudonym"); + return; + } + id = *(unsigned char *)clear; + if (id + 1 <= plen && id + 9 > plen) + break; + } + if (plen % 8 == 0 && i < 5) { + /* + * Note that this is always shorter than the + * original stored string, so there's no need + * to realloc. + */ + if ((i = plen = *(unsigned char *)clear) > 7) + i = 7; + pcb->eap.es_server.ea_peerlen = plen; + dp = (unsigned char *)pcb->eap.es_server.ea_peer; + MEMCPY(dp, clear + 1, i); + plen -= i; + dp += i; + sp = secbuf + 8; + while (plen > 0) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesDecrypt(sp, dp); + sp += 8; + dp += 8; + plen -= 8; + } + pcb->eap.es_server.ea_peer[ + pcb->eap.es_server.ea_peerlen] = '\0'; + ppp_dbglog("decoded pseudonym to \"%.*q\"", + pcb->eap.es_server.ea_peerlen, + pcb->eap.es_server.ea_peer); + } else { + ppp_dbglog("failed to decode real name"); + /* Stay in eapIdentfy state; requery */ + break; + } + } + /* Look up user in secrets database. */ + if (get_srp_secret(pcb->eap.es_unit, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_name, (char *)secbuf, 1) != 0) { + /* Set up default in case SRP entry is bad */ + pcb->eap.es_server.ea_state = eapMD5Chall; + /* Get t_confent based on index in srp-secrets */ + id = strtol((char *)secbuf, &cp, 10); + if (*cp++ != ':' || id < 0) + break; + if (id == 0) { + mytce.index = 0; + mytce.modulus.data = (u_char *)wkmodulus; + mytce.modulus.len = sizeof (wkmodulus); + mytce.generator.data = (u_char *)"\002"; + mytce.generator.len = 1; + tce = &mytce; + } else if ((tce = gettcid(id)) != NULL) { + /* + * Client will have to verify this modulus/ + * generator combination, and that will take + * a while. Lengthen the timeout here. + */ + if (pcb->settings.eap_timeout_time > 0 && + pcb->settings.eap_timeout_time < 30) + pcb->settings.eap_timeout_time = 30; + } else { + break; + } + if ((cp2 = strchr(cp, ':')) == NULL) + break; + *cp2++ = '\0'; + tpw.pebuf.name = pcb->eap.es_server.ea_peer; + tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf, + cp); + tpw.pebuf.password.data = tpw.pwbuf; + tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf, + cp2); + tpw.pebuf.salt.data = tpw.saltbuf; + if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL) + break; + pcb->eap.es_server.ea_session = (void *)ts; + pcb->eap.es_server.ea_state = eapSRP1; + vals[0] = pcb->eap.es_server.ea_id + 1; + vals[1] = EAPT_SRP; + t_serveraddexdata(ts, vals, 2); + /* Generate B; must call before t_servergetkey() */ + t_servergenexp(ts); + break; + } +#endif /* USE_SRP */ + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + case eapSRP1: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status == 1) { + pcb->eap.es_server.ea_state = eapMD5Chall; + } else if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP2; + } + break; + + case eapSRP2: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP3; + } + break; + + case eapSRP3: + case eapSRP4: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + case eapMD5Chall: + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + default: + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } + if (pcb->eap.es_server.ea_state == eapBadAuth) + eap_send_failure(pcb); +} + +/* + * Format an EAP Request message and send it to the peer. Message + * type depends on current state. (Server operation) + */ +static void eap_send_request(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + u_char *lenloc; + int outlen; + int len; + const char *str; +#ifdef USE_SRP + struct t_server *ts; + u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp; + int i, j; + struct b64state b64; + SHA1_CTX ctxt; +#endif /* USE_SRP */ + + /* Handle both initial auth and restart */ + if (pcb->eap.es_server.ea_state < eapIdentify && + pcb->eap.es_server.ea_state != eapInitial) { + pcb->eap.es_server.ea_state = eapIdentify; +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote && pcb->remote_name) { + /* + * If we already know the peer's + * unauthenticated name, then there's no + * reason to ask. Go to next state instead. + */ + int len = (int)strlen(pcb->remote_name); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, pcb->remote_name, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + } +#endif /* PPP_REMOTENAME */ + } + + if (pcb->settings.eap_max_transmits > 0 && + pcb->eap.es_server.ea_requests >= pcb->settings.eap_max_transmits) { + if (pcb->eap.es_server.ea_responses > 0) + ppp_error("EAP: too many Requests sent"); + else + ppp_error("EAP: no response to Requests"); + eap_send_failure(pcb); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_REQUEST, outp); + PUTCHAR(pcb->eap.es_server.ea_id, outp); + lenloc = outp; + INCPTR(2, outp); + + switch (pcb->eap.es_server.ea_state) { + case eapIdentify: + PUTCHAR(EAPT_IDENTITY, outp); + str = "Name"; + len = strlen(str); + MEMCPY(outp, str, len); + INCPTR(len, outp); + break; + + case eapMD5Chall: + PUTCHAR(EAPT_MD5CHAP, outp); + /* + * pick a random challenge length between + * EAP_MIN_CHALLENGE_LENGTH and EAP_MAX_CHALLENGE_LENGTH + */ + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + PUTCHAR(pcb->eap.es_challen, outp); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + break; + +#ifdef USE_SRP + case eapSRP1: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CHALLENGE, outp); + + PUTCHAR(pcb->eap.es_server.ea_namelen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + PUTCHAR(ts->s.len, outp); + MEMCPY(outp, ts->s.data, ts->s.len); + INCPTR(ts->s.len, outp); + + if (ts->g.len == 1 && ts->g.data[0] == 2) { + PUTCHAR(0, outp); + } else { + PUTCHAR(ts->g.len, outp); + MEMCPY(outp, ts->g.data, ts->g.len); + INCPTR(ts->g.len, outp); + } + + if (ts->n.len != sizeof (wkmodulus) || + BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) { + MEMCPY(outp, ts->n.data, ts->n.len); + INCPTR(ts->n.len, outp); + } + break; + + case eapSRP2: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SKEY, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, ts->B.data, ts->B.len); + INCPTR(ts->B.len, outp); + break; + + case eapSRP3: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SVALIDATOR, outp); + PUTLONG(SRPVAL_EBIT, outp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, t_serverresponse(ts), SHA_DIGESTSIZE); + INCPTR(SHA_DIGESTSIZE, outp); + + if (pncrypt_setkey(0)) { + /* Generate pseudonym */ + optr = outp; + cp = (unsigned char *)pcb->eap.es_server.ea_peer; + if ((j = i = pcb->eap.es_server.ea_peerlen) > 7) + j = 7; + clear[0] = i; + MEMCPY(clear + 1, cp, j); + i -= j; + cp += j; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesEncrypt(clear, cipher)) { + ppp_dbglog("no DES here; not generating pseudonym"); + break; + } + BZERO(&b64, sizeof (b64)); + outp++; /* space for pseudonym length */ + outp += b64enc(&b64, cipher, 8, outp); + while (i >= 8) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(cp, cipher); + outp += b64enc(&b64, cipher, 8, outp); + cp += 8; + i -= 8; + } + if (i > 0) { + MEMCPY(clear, cp, i); + cp += i; + magic_random_bytes(cp, 8-i); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(clear, cipher); + outp += b64enc(&b64, cipher, 8, outp); + } + outp += b64flush(&b64, outp); + + /* Set length and pad out to next 20 octet boundary */ + i = outp - optr - 1; + *optr = i; + i %= SHA_DIGESTSIZE; + if (i != 0) { + magic_random_bytes(outp, SHA_DIGESTSIZE-i); + INCPTR(SHA_DIGESTSIZE-i, outp); + } + + /* Obscure the pseudonym with SHA1 hash */ + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + while (optr < outp) { + SHA1Final(dig, &ctxt); + cp = dig; + while (cp < dig + SHA_DIGESTSIZE) + *optr++ ^= *cp++; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, optr - SHA_DIGESTSIZE, + SHA_DIGESTSIZE); + } + } + break; + + case eapSRP4: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_LWRECHALLENGE, outp); + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + break; +#endif /* USE_SRP */ + + default: + return; + } + + outlen = (outp - (unsigned char*)p->payload) - PPP_HDRLEN; + PUTSHORT(outlen, lenloc); + + pbuf_realloc(p, outlen + PPP_HDRLEN); + ppp_write(pcb, p); + + pcb->eap.es_server.ea_requests++; + + if (pcb->settings.eap_timeout_time > 0) + TIMEOUT(eap_server_timeout, pcb, pcb->settings.eap_timeout_time); +} + +/* + * eap_authpeer - Authenticate our peer (behave as server). + * + * Start server state and send first request. This is called only + * after eap_lowerup. + */ +void eap_authpeer(ppp_pcb *pcb, const char *localname) { + + /* Save the name we're given. */ + pcb->eap.es_server.ea_name = localname; + pcb->eap.es_server.ea_namelen = strlen(localname); + + pcb->eap.es_savedtime = pcb->settings.eap_timeout_time; + + /* Lower layer up yet? */ + if (pcb->eap.es_server.ea_state == eapInitial || + pcb->eap.es_server.ea_state == eapPending) { + pcb->eap.es_server.ea_state = eapPending; + return; + } + + pcb->eap.es_server.ea_state = eapPending; + + /* ID number not updated here intentionally; hashed into M1 */ + eap_send_request(pcb); +} + +/* + * eap_server_timeout - Retransmission timer for sending Requests + * expired. + */ +static void eap_server_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_server_active(pcb)) + return; + + /* EAP ID number must not change on timeout. */ + eap_send_request(pcb); +} + +/* + * When it's time to send rechallenge the peer, this timeout is + * called. Once the rechallenge is successful, the response handler + * will restart the timer. If it fails, then the link is dropped. + */ +static void eap_rechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen && + pcb->eap.es_server.ea_state != eapSRP4) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} + +static void srp_lwrechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen || + pcb->eap.es_server.ea_type != EAPT_SRP) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapSRP4; + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} +#endif /* PPP_SERVER */ + +/* + * eap_lowerup - The lower layer is now up. + * + * This is called before either eap_authpeer or eap_authwithpeer. See + * link_established() in auth.c. All that's necessary here is to + * return to closed state so that those two routines will do the right + * thing. + */ +static void eap_lowerup(ppp_pcb *pcb) { + pcb->eap.es_client.ea_state = eapClosed; +#if PPP_SERVER + pcb->eap.es_server.ea_state = eapClosed; +#endif /* PPP_SERVER */ +} + +/* + * eap_lowerdown - The lower layer is now down. + * + * Cancel all timeouts and return to initial state. + */ +static void eap_lowerdown(ppp_pcb *pcb) { + + if (eap_client_active(pcb) && pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + } else { + if ((pcb->eap.es_server.ea_state == eapOpen || + pcb->eap.es_server.ea_state == eapSRP4) && + pcb->eap.es_rechallenge > 0) { + UNTIMEOUT(eap_rechallenge, (void *)pcb); + } + if (pcb->eap.es_server.ea_state == eapOpen && + pcb->eap.es_lwrechallenge > 0) { + UNTIMEOUT(srp_lwrechallenge, (void *)pcb); + } + } + + pcb->eap.es_client.ea_state = pcb->eap.es_server.ea_state = eapInitial; + pcb->eap.es_client.ea_requests = pcb->eap.es_server.ea_requests = 0; +#endif /* PPP_SERVER */ +} + +/* + * eap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. If it does, it represents authentication + * failure. + */ +static void eap_protrej(ppp_pcb *pcb) { + + if (eap_client_active(pcb)) { + ppp_error("EAP authentication failed due to Protocol-Reject"); + auth_withpeer_fail(pcb, PPP_EAP); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + ppp_error("EAP authentication of peer failed on Protocol-Reject"); + auth_peer_fail(pcb, PPP_EAP); + } +#endif /* PPP_SERVER */ + eap_lowerdown(pcb); +} + +/* + * Format and send a regular EAP Response message. + */ +static void eap_send_response(ppp_pcb *pcb, u_char id, u_char typenum, const u_char *str, int lenstr) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(typenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send an MD5-Challenge EAP Response message. + */ +static void eap_chap_response(ppp_pcb *pcb, u_char id, u_char *hash, const char *name, int namelen) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE + + namelen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_MD5CHAP, outp); + PUTCHAR(MD5_SIGNATURE_SIZE, outp); + MEMCPY(outp, hash, MD5_SIGNATURE_SIZE); + INCPTR(MD5_SIGNATURE_SIZE, outp); + if (namelen > 0) { + MEMCPY(outp, name, namelen); + } + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +/* + * Format and send a SRP EAP Response message. + */ +static void +eap_srp_response(esp, id, subtypenum, str, lenstr) +eap_state *esp; +u_char id; +u_char subtypenum; +u_char *str; +int lenstr; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(subtypenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send a SRP EAP Client Validator Response message. + */ +static void +eap_srpval_response(esp, id, flags, str) +eap_state *esp; +u_char id; +u32_t flags; +u_char *str; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u32_t) + + SHA_DIGESTSIZE; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CVALIDATOR, outp); + PUTLONG(flags, outp); + MEMCPY(outp, str, SHA_DIGESTSIZE); + + ppp_write(pcb, p); +} +#endif /* USE_SRP */ + +static void eap_send_nak(ppp_pcb *pcb, u_char id, u_char type) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char); + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_NAK, outp); + PUTCHAR(type, outp); + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +static char * +name_of_pn_file() +{ + char *user, *path, *file; + struct passwd *pw; + size_t pl; + static bool pnlogged = 0; + + pw = getpwuid(getuid()); + if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) { + errno = EINVAL; + return (NULL); + } + file = _PATH_PSEUDONYM; + pl = strlen(user) + strlen(file) + 2; + path = malloc(pl); + if (path == NULL) + return (NULL); + (void) slprintf(path, pl, "%s/%s", user, file); + if (!pnlogged) { + ppp_dbglog("pseudonym file: %s", path); + pnlogged = 1; + } + return (path); +} + +static int +open_pn_file(modebits) +mode_t modebits; +{ + char *path; + int fd, err; + + if ((path = name_of_pn_file()) == NULL) + return (-1); + fd = open(path, modebits, S_IRUSR | S_IWUSR); + err = errno; + free(path); + errno = err; + return (fd); +} + +static void +remove_pn_file() +{ + char *path; + + if ((path = name_of_pn_file()) != NULL) { + (void) unlink(path); + (void) free(path); + } +} + +static void +write_pseudonym(esp, inp, len, id) +eap_state *esp; +u_char *inp; +int len, id; +{ + u_char val; + u_char *datp, *digp; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int dsize, fd, olen = len; + + /* + * Do the decoding by working backwards. This eliminates the need + * to save the decoded output in a separate buffer. + */ + val = id; + while (len > 0) { + if ((dsize = len % SHA_DIGESTSIZE) == 0) + dsize = SHA_DIGESTSIZE; + len -= dsize; + datp = inp + len; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &val, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, SESSION_KEY_LEN); + if (len > 0) { + SHA1Update(&ctxt, datp, SHA_DIGESTSIZE); + } else { + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + } + SHA1Final(dig, &ctxt); + for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++) + *datp++ ^= *digp; + } + + /* Now check that the result is sane */ + if (olen <= 0 || *inp + 1 > olen) { + ppp_dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp); + return; + } + + /* Save it away */ + fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC); + if (fd < 0) { + ppp_dbglog("EAP: error saving pseudonym: %m"); + return; + } + len = write(fd, inp + 1, *inp); + if (close(fd) != -1 && len == *inp) { + ppp_dbglog("EAP: saved pseudonym"); + pcb->eap.es_usedpseudo = 0; + } else { + ppp_dbglog("EAP: failed to save pseudonym"); + remove_pn_file(); + } +} +#endif /* USE_SRP */ + +/* + * eap_request - Receive EAP Request message (client mode). + */ +static void eap_request(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_client *tc; + struct t_num sval, gval, Nval, *Ap, Bval; + u_char vals[2]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int fd; +#endif /* USE_SRP */ + + /* + * Note: we update es_client.ea_id *only if* a Response + * message is being generated. Otherwise, we leave it the + * same for duplicate detection purposes. + */ + + pcb->eap.es_client.ea_requests++; + if (pcb->settings.eap_allow_req != 0 && + pcb->eap.es_client.ea_requests > pcb->settings.eap_allow_req) { + ppp_info("EAP: received too many Request messages"); + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + auth_withpeer_fail(pcb, PPP_EAP); + return; + } + + if (len <= 0) { + ppp_error("EAP: empty Request message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (len > 0) + ppp_info("EAP: Identity prompt \"%.*q\"", len, inp); +#ifdef USE_SRP + if (pcb->eap.es_usepseudo && + (pcb->eap.es_usedpseudo == 0 || + (pcb->eap.es_usedpseudo == 1 && + id == pcb->eap.es_client.ea_id))) { + pcb->eap.es_usedpseudo = 1; + /* Try to get a pseudonym */ + if ((fd = open_pn_file(O_RDONLY)) >= 0) { + strcpy(rhostname, SRP_PSEUDO_ID); + len = read(fd, rhostname + SRP_PSEUDO_LEN, + sizeof (rhostname) - SRP_PSEUDO_LEN); + /* XXX NAI unsupported */ + if (len > 0) { + eap_send_response(pcb, id, typenum, + rhostname, len + SRP_PSEUDO_LEN); + } + (void) close(fd); + if (len > 0) + break; + } + } + /* Stop using pseudonym now. */ + if (pcb->eap.es_usepseudo && pcb->eap.es_usedpseudo != 2) { + remove_pn_file(); + pcb->eap.es_usedpseudo = 2; + } +#endif /* USE_SRP */ + eap_send_response(pcb, id, typenum, (const u_char*)pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + + case EAPT_NOTIFICATION: + if (len > 0) + ppp_info("EAP: Notification \"%.*q\"", len, inp); + eap_send_response(pcb, id, typenum, NULL, 0); + break; + + case EAPT_NAK: + /* + * Avoid the temptation to send Response Nak in reply + * to Request Nak here. It can only lead to trouble. + */ + ppp_warn("EAP: unexpected Nak in Request; ignored"); + /* Return because we're waiting for something real. */ + return; + + case EAPT_MD5CHAP: + if (len < 1) { + ppp_error("EAP: received MD5-Challenge with no data"); + /* Bogus request; wait for something real. */ + return; + } + GETCHAR(vallen, inp); + len--; + if (vallen < 8 || vallen > len) { + ppp_error("EAP: MD5-Challenge with bad length %d (8..%d)", + vallen, len); + /* Try something better. */ + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (pcb->settings.explicit_remote || + (pcb->settings.remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, pcb->settings.remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating ourselves with + * the specified host. + */ + if (!get_secret(pcb, pcb->eap.es_client.ea_name, + rhostname, secret, &secret_len, 0)) { + ppp_dbglog("EAP: no MD5 secret for auth to %q", rhostname); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + typenum = id; + lwip_md5_update(&mdContext, &typenum, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, inp, vallen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + eap_chap_response(pcb, id, hash, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: received empty SRP Request"); + /* Bogus request; wait for something real. */ + return; + } + + /* Get subtype */ + GETCHAR(vallen, inp); + len--; + switch (vallen) { + case EAPSRP_CHALLENGE: + tc = NULL; + if (pcb->eap.es_client.ea_session != NULL) { + tc = (struct t_client *)pcb->eap.es_client. + ea_session; + /* + * If this is a new challenge, then start + * over with a new client session context. + * Otherwise, just resend last response. + */ + if (id != pcb->eap.es_client.ea_id) { + t_clientclose(tc); + pcb->eap.es_client.ea_session = NULL; + tc = NULL; + } + } + /* No session key just yet */ + pcb->eap.es_client.ea_skey = NULL; + if (tc == NULL) { + int rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (name)"); + /* Ignore badly-formed messages */ + return; + } + MEMCPY(rhostname, inp, vallen); + rhostname[vallen] = '\0'; + INCPTR(vallen, inp); + len -= vallen; + + /* + * In case the remote doesn't give us his name, + * use configured name. + */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == 0)) { + strlcpy(rhostname, remote_name, + sizeof (rhostname)); + } + + rhostnamelen = (int)strlen(rhostname); + if (rhostnamelen > MAXNAMELEN) { + rhostnamelen = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_client.ea_peer, rhostname, rhostnamelen); + pcb->eap.es_client.ea_peer[rhostnamelen] = '\0'; + pcb->eap.es_client.ea_peerlen = rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (s)"); + /* Ignore badly-formed messages */ + return; + } + sval.data = inp; + sval.len = vallen; + INCPTR(vallen, inp); + len -= vallen; + + GETCHAR(vallen, inp); + len--; + if (vallen > len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (g)"); + /* Ignore badly-formed messages */ + return; + } + /* If no generator present, then use value 2 */ + if (vallen == 0) { + gval.data = (u_char *)"\002"; + gval.len = 1; + } else { + gval.data = inp; + gval.len = vallen; + } + INCPTR(vallen, inp); + len -= vallen; + + /* + * If no modulus present, then use well-known + * value. + */ + if (len == 0) { + Nval.data = (u_char *)wkmodulus; + Nval.len = sizeof (wkmodulus); + } else { + Nval.data = inp; + Nval.len = len; + } + tc = t_clientopen(pcb->eap.es_client.ea_name, + &Nval, &gval, &sval); + if (tc == NULL) { + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + pcb->eap.es_client.ea_session = (void *)tc; + + /* Add Challenge ID & type to verifier */ + vals[0] = id; + vals[1] = EAPT_SRP; + t_clientaddexdata(tc, vals, 2); + } + Ap = t_clientgenexp(tc); + eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data, + Ap->len); + break; + + case EAPSRP_SKEY: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL) { + ppp_warn("EAP: peer sent Subtype 2 without 1"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + if (pcb->eap.es_client.ea_skey != NULL) { + /* + * ID number should not change here. Warn + * if it does (but otherwise ignore). + */ + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 2 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + if (get_srp_secret(pcb->eap.es_unit, + pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_peer, secret, 0) == 0) { + /* + * Can't work with this peer because + * the secret is missing. Just give + * up. + */ + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + Bval.data = inp; + Bval.len = len; + t_clientpasswd(tc, secret); + BZERO(secret, sizeof (secret)); + pcb->eap.es_client.ea_skey = + t_clientgetkey(tc, &Bval); + if (pcb->eap.es_client.ea_skey == NULL) { + /* Server is rogue; stop now */ + ppp_error("EAP: SRP server is rogue"); + goto client_failure; + } + } + eap_srpval_response(esp, id, SRPVAL_EBIT, + t_clientresponse(tc)); + break; + + case EAPSRP_SVALIDATOR: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL || pcb->eap.es_client.ea_skey == NULL) { + ppp_warn("EAP: peer sent Subtype 3 without 1/2"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + /* + * If we're already open, then this ought to be a + * duplicate. Otherwise, check that the server is + * who we think it is. + */ + if (pcb->eap.es_client.ea_state == eapOpen) { + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 3 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + len -= sizeof (u32_t) + SHA_DIGESTSIZE; + if (len < 0 || t_clientverify(tc, inp + + sizeof (u32_t)) != 0) { + ppp_error("EAP: SRP server verification " + "failed"); + goto client_failure; + } + GETLONG(pcb->eap.es_client.ea_keyflags, inp); + /* Save pseudonym if user wants it. */ + if (len > 0 && pcb->eap.es_usepseudo) { + INCPTR(SHA_DIGESTSIZE, inp); + write_pseudonym(esp, inp, len, id); + } + } + /* + * We've verified our peer. We're now mostly done, + * except for waiting on the regular EAP Success + * message. + */ + eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0); + break; + + case EAPSRP_LWRECHALLENGE: + if (len < 4) { + ppp_warn("EAP: malformed Lightweight rechallenge"); + return; + } + SHA1Init(&ctxt); + vals[0] = id; + SHA1Update(&ctxt, vals, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, inp, len); + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + SHA1Final(dig, &ctxt); + eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig, + SHA_DIGESTSIZE); + break; + + default: + ppp_error("EAP: unknown SRP Subtype %d", vallen); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + break; +#endif /* USE_SRP */ + + default: + ppp_info("EAP: unknown authentication type %d; Naking", typenum); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); + } + return; + +#ifdef USE_SRP +client_failure: + pcb->eap.es_client.ea_state = eapBadAuth; + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, (void *)esp); + } + pcb->eap.es_client.ea_session = NULL; + t_clientclose(tc); + auth_withpeer_fail(pcb, PPP_EAP); +#endif /* USE_SRP */ +} + +#if PPP_SERVER +/* + * eap_response - Receive EAP Response message (server mode). + */ +static void eap_response(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_server *ts; + struct t_num A; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; +#endif /* USE_SRP */ + + if (pcb->eap.es_server.ea_id != id) { + ppp_dbglog("EAP: discarding Response %d; expected ID %d", id, + pcb->eap.es_server.ea_id); + return; + } + + pcb->eap.es_server.ea_responses++; + + if (len <= 0) { + ppp_error("EAP: empty Response message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (pcb->eap.es_server.ea_state != eapIdentify) { + ppp_dbglog("EAP discarding unwanted Identify \"%.q\"", len, + inp); + break; + } + ppp_info("EAP: unauthenticated peer name \"%.*q\"", len, inp); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, inp, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_NOTIFICATION: + ppp_dbglog("EAP unexpected Notification; response discarded"); + break; + + case EAPT_NAK: + if (len < 1) { + ppp_info("EAP: Nak Response with no suggested protocol"); + eap_figure_next_state(pcb, 1); + break; + } + + GETCHAR(vallen, inp); + len--; + + if ( +#if PPP_REMOTENAME + !pcb->explicit_remote && +#endif /* PPP_REMOTENAME */ + pcb->eap.es_server.ea_state == eapIdentify){ + /* Peer cannot Nak Identify Request */ + eap_figure_next_state(pcb, 1); + break; + } + + switch (vallen) { + case EAPT_SRP: + /* Run through SRP validator selection again. */ + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_MD5CHAP: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + default: + ppp_dbglog("EAP: peer requesting unknown Type %d", vallen); + switch (pcb->eap.es_server.ea_state) { + case eapSRP1: + case eapSRP2: + case eapSRP3: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + case eapMD5Chall: + case eapSRP4: + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + default: + break; + } + break; + } + break; + + case EAPT_MD5CHAP: + if (pcb->eap.es_server.ea_state != eapMD5Chall) { + ppp_error("EAP: unexpected MD5-Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < 1) { + ppp_error("EAP: received MD5-Response with no data"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen != 16 || vallen > len) { + ppp_error("EAP: MD5-Response with bad length %d", vallen); + eap_figure_next_state(pcb, 1); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating the specified + * host. + */ + if (!get_secret(pcb, rhostname, + pcb->eap.es_server.ea_name, secret, &secret_len, 1)) { + ppp_dbglog("EAP: no MD5 secret for auth of %q", rhostname); + eap_send_failure(pcb); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + lwip_md5_update(&mdContext, &pcb->eap.es_server.ea_id, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, pcb->eap.es_challenge, pcb->eap.es_challen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) { + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_type = EAPT_MD5CHAP; + eap_send_success(pcb); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, pcb->eap.es_rechallenge); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: empty SRP Response"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(typenum, inp); + len--; + switch (typenum) { + case EAPSRP_CKEY: + if (pcb->eap.es_server.ea_state != eapSRP1) { + ppp_error("EAP: unexpected SRP Subtype 1 Response"); + eap_figure_next_state(pcb, 1); + break; + } + A.data = inp; + A.len = len; + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + pcb->eap.es_server.ea_skey = t_servergetkey(ts, &A); + if (pcb->eap.es_server.ea_skey == NULL) { + /* Client's A value is bogus; terminate now */ + ppp_error("EAP: bogus A value from client"); + eap_send_failure(pcb); + } else { + eap_figure_next_state(pcb, 0); + } + break; + + case EAPSRP_CVALIDATOR: + if (pcb->eap.es_server.ea_state != eapSRP2) { + ppp_error("EAP: unexpected SRP Subtype 2 Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < sizeof (u32_t) + SHA_DIGESTSIZE) { + ppp_error("EAP: M1 length %d < %d", len, + sizeof (u32_t) + SHA_DIGESTSIZE); + eap_figure_next_state(pcb, 1); + break; + } + GETLONG(pcb->eap.es_server.ea_keyflags, inp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + if (t_serververify(ts, inp)) { + ppp_info("EAP: unable to validate client identity"); + eap_send_failure(pcb); + break; + } + eap_figure_next_state(pcb, 0); + break; + + case EAPSRP_ACK: + if (pcb->eap.es_server.ea_state != eapSRP3) { + ppp_error("EAP: unexpected SRP Subtype 3 Response"); + eap_send_failure(esp); + break; + } + pcb->eap.es_server.ea_type = EAPT_SRP; + eap_send_success(pcb, esp); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, + pcb->eap.es_rechallenge); + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, pcb, + pcb->eap.es_lwrechallenge); + break; + + case EAPSRP_LWRECHALLENGE: + if (pcb->eap.es_server.ea_state != eapSRP4) { + ppp_info("EAP: unexpected SRP Subtype 4 Response"); + return; + } + if (len != SHA_DIGESTSIZE) { + ppp_error("EAP: bad Lightweight rechallenge " + "response"); + return; + } + SHA1Init(&ctxt); + vallen = id; + SHA1Update(&ctxt, &vallen, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_challenge, pcb->eap.es_challen); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + SHA1Final(dig, &ctxt); + if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) { + ppp_error("EAP: failed Lightweight rechallenge"); + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_state = eapOpen; + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, esp, + pcb->eap.es_lwrechallenge); + break; + } + break; +#endif /* USE_SRP */ + + default: + /* This can't happen. */ + ppp_error("EAP: unknown Response type %d; ignored", typenum); + return; + } + + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + + if (pcb->eap.es_server.ea_state != eapBadAuth && + pcb->eap.es_server.ea_state != eapOpen) { + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); + } +} +#endif /* PPP_SERVER */ + +/* + * eap_success - Receive EAP Success message (client mode). + */ +static void eap_success(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (pcb->eap.es_client.ea_state != eapOpen && !eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected success message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + return; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapOpen; + auth_withpeer_success(pcb, PPP_EAP, 0); +} + +/* + * eap_failure - Receive EAP Failure message (client mode). + */ +static void eap_failure(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (!eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected failure message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapBadAuth; + + ppp_error("EAP: peer reports authentication failure"); + auth_withpeer_fail(pcb, PPP_EAP); +} + +/* + * eap_input - Handle received EAP message. + */ +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen) { + u_char code, id; + int len; + + /* + * Parse header (code, id and length). If packet too short, + * drop it. + */ + if (inlen < EAP_HEADERLEN) { + ppp_error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) { + ppp_error("EAP: packet has illegal length field %d (%d..%d)", len, + EAP_HEADERLEN, inlen); + return; + } + len -= EAP_HEADERLEN; + + /* Dispatch based on message code */ + switch (code) { + case EAP_REQUEST: + eap_request(pcb, inp, id, len); + break; + +#if PPP_SERVER + case EAP_RESPONSE: + eap_response(pcb, inp, id, len); + break; +#endif /* PPP_SERVER */ + + case EAP_SUCCESS: + eap_success(pcb, inp, id, len); + break; + + case EAP_FAILURE: + eap_failure(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + /* Note: it's not legal to send EAP Nak here. */ + ppp_warn("EAP: unknown code %d received", code); + break; + } +} + +#if PRINTPKT_SUPPORT +/* + * eap_printpkt - print the contents of an EAP packet. + */ +static const char* const eap_codenames[] = { + "Request", "Response", "Success", "Failure" +}; + +static const char* const eap_typenames[] = { + "Identity", "Notification", "Nak", "MD5-Challenge", + "OTP", "Generic-Token", NULL, NULL, + "RSA", "DSS", "KEA", "KEA-Validate", + "TLS", "Defender", "Windows 2000", "Arcot", + "Cisco", "Nokia", "SRP" +}; + +static int eap_printpkt(const u_char *inp, int inlen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, rtype, vallen; + const u_char *pstart; + u32_t uval; + + if (inlen < EAP_HEADERLEN) + return (0); + pstart = inp; + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) + return (0); + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(eap_codenames)) + printer(arg, " %s", eap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= EAP_HEADERLEN; + switch (code) { + case EAP_REQUEST: + if (len < 1) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + case EAPT_NOTIFICATION: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_MD5CHAP: + if (len <= 0) + break; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 3) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CHALLENGE: + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + if (vallen > 0) { + printer(arg, " "); + } else { + printer(arg, " "); + } + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + if (vallen == 0) { + printer(arg, " "); + } else { + printer(arg, " ", vallen, inp); + } + INCPTR(vallen, inp); + len -= vallen; + if (len == 0) { + printer(arg, " "); + } else { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_SKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_SVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + printer(arg, " ", len, inp, + len < SHA_DIGESTSIZE ? "?" : ""); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_RESPONSE: + if (len < 1) + break; + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPT_NAK: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + printer(arg, " = 1 && rtype < (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " (%s)", eap_typenames[rtype-1]); + printer(arg, ">"); + break; + + case EAPT_MD5CHAP: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 1) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_CVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_ACK: + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + INCPTR(vallen, inp); + len -= vallen; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_SUCCESS: /* No payload expected for these! */ + case EAP_FAILURE: + default: + break; + + truncated: + printer(arg, " "); + break; + } + + if (len > 8) + printer(arg, "%8B...", inp); + else if (len > 0) + printer(arg, "%.*B", len, inp); + INCPTR(len, inp); + + return (inp - pstart); +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c new file mode 100644 index 000000000..4d84f6093 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c @@ -0,0 +1,191 @@ +/* + * ecp.c - PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from ccp.c, which is: + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ecp.h" + +#if PPP_OPTIONS +static option_t ecp_option_list[] = { + { "noecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation" }, + { "-ecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation", OPT_ALIAS }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ecp_init (int unit); +/* +static void ecp_open (int unit); +static void ecp_close (int unit, char *); +static void ecp_lowerup (int unit); +static void ecp_lowerdown (int); +static void ecp_input (int unit, u_char *pkt, int len); +static void ecp_protrej (int unit); +*/ +#if PRINTPKT_SUPPORT +static int ecp_printpkt (const u_char *pkt, int len, + void (*printer) (void *, char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +/* +static void ecp_datainput (int unit, u_char *pkt, int len); +*/ + +const struct protent ecp_protent = { + PPP_ECP, + ecp_init, + NULL, /* ecp_input, */ + NULL, /* ecp_protrej, */ + NULL, /* ecp_lowerup, */ + NULL, /* ecp_lowerdown, */ + NULL, /* ecp_open, */ + NULL, /* ecp_close, */ +#if PRINTPKT_SUPPORT + ecp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* ecp_datainput, */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "ECP", + "Encrypted", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ecp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +fsm ecp_fsm[NUM_PPP]; +ecp_options ecp_wantoptions[NUM_PPP]; /* what to request the peer to use */ +ecp_options ecp_gotoptions[NUM_PPP]; /* what the peer agreed to do */ +ecp_options ecp_allowoptions[NUM_PPP]; /* what we'll agree to do */ +ecp_options ecp_hisoptions[NUM_PPP]; /* what we agreed to do */ + +static const fsm_callbacks ecp_callbacks = { + NULL, /* ecp_resetci, */ + NULL, /* ecp_cilen, */ + NULL, /* ecp_addci, */ + NULL, /* ecp_ackci, */ + NULL, /* ecp_nakci, */ + NULL, /* ecp_rejci, */ + NULL, /* ecp_reqci, */ + NULL, /* ecp_up, */ + NULL, /* ecp_down, */ + NULL, + NULL, + NULL, + NULL, + NULL, /* ecp_extcode, */ + "ECP" +}; + +/* + * ecp_init - initialize ECP. + */ +static void +ecp_init(unit) + int unit; +{ + fsm *f = &ecp_fsm[unit]; + + f->unit = unit; + f->protocol = PPP_ECP; + f->callbacks = &ecp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&ecp_wantoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_gotoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_allowoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_hisoptions[unit], 0, sizeof(ecp_options)); +#endif /* 0 */ + +} + + +#if PRINTPKT_SUPPORT +static int +ecp_printpkt(p, plen, printer, arg) + const u_char *p; + int plen; + void (*printer) (void *, char *, ...); + void *arg; +{ + return 0; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c new file mode 100644 index 000000000..01493bc1f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c @@ -0,0 +1,56 @@ +/* + * eui64.c - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.c,v 1.6 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eui64.h" + +/* + * eui64_ntoa - Make an ascii representation of an interface identifier + */ +char *eui64_ntoa(eui64_t e) { + static char buf[20]; + + sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x", + e.e8[0], e.e8[1], e.e8[2], e.e8[3], + e.e8[4], e.e8[5], e.e8[6], e.e8[7]); + return buf; +} + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c new file mode 100644 index 000000000..b1f08afff --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c @@ -0,0 +1,799 @@ +/* + * fsm.c - {Link, IP} Control Protocol Finite State Machine. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + * Randomize fsm id on link/init. + * Deal with variable outgoing MTU. + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" + +static void fsm_timeout (void *); +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len); +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len); +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len); +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len); +static void fsm_rtermack(fsm *f); +static void fsm_rcoderej(fsm *f, u_char *inp, int len); +static void fsm_sconfreq(fsm *f, int retransmit); + +#define PROTO_NAME(f) ((f)->callbacks->proto_name) + +/* + * fsm_init - Initialize fsm. + * + * Initialize fsm state. + */ +void fsm_init(fsm *f) { + ppp_pcb *pcb = f->pcb; + f->state = PPP_FSM_INITIAL; + f->flags = 0; + f->id = 0; /* XXX Start with random id? */ + f->maxnakloops = pcb->settings.fsm_max_nak_loops; + f->term_reason_len = 0; +} + + +/* + * fsm_lowerup - The lower layer is up. + */ +void fsm_lowerup(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_CLOSED; + break; + + case PPP_FSM_STARTING: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Up event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_lowerdown - The lower layer is down. + * + * Cancel all timeouts and inform upper layers. + */ +void fsm_lowerdown(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_INITIAL; + break; + + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_INITIAL; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_STARTING; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_OPENED: + if( f->callbacks->down ) + (*f->callbacks->down)(f); + f->state = PPP_FSM_STARTING; + break; + + default: + FSMDEBUG(("%s: Down event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_open - Link is allowed to come up. + */ +void fsm_open(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSED: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_STOPPING; + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + case PPP_FSM_OPENED: + if( f->flags & OPT_RESTART ){ + fsm_lowerdown(f); + fsm_lowerup(f); + } + break; + default: + break; + } +} + +/* + * terminate_layer - Start process of shutting down the FSM + * + * Cancel any timeout running, notify upper layers we're done, and + * send a terminate-request message as configured. + */ +static void terminate_layer(fsm *f, int nextstate) { + ppp_pcb *pcb = f->pcb; + + if( f->state != PPP_FSM_OPENED ) + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + else if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers we're down */ + + /* Init restart counter and send Terminate-Request */ + f->retransmits = pcb->settings.fsm_max_term_transmits; + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + + if (f->retransmits == 0) { + /* + * User asked for no terminate requests at all; just close it. + * We've already fired off one Terminate-Request just to be nice + * to the peer, but we're not going to wait for a reply. + */ + f->state = nextstate == PPP_FSM_CLOSING ? PPP_FSM_CLOSED : PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + return; + } + + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + + f->state = nextstate; +} + +/* + * fsm_close - Start closing connection. + * + * Cancel timeouts and either initiate close or possibly go directly to + * the PPP_FSM_CLOSED state. + */ +void fsm_close(fsm *f, const char *reason) { + f->term_reason = reason; + f->term_reason_len = (reason == NULL? 0: (u8_t)LWIP_MIN(strlen(reason), 0xFF) ); + switch( f->state ){ + case PPP_FSM_STARTING: + f->state = PPP_FSM_INITIAL; + break; + case PPP_FSM_STOPPED: + f->state = PPP_FSM_CLOSED; + break; + case PPP_FSM_STOPPING: + f->state = PPP_FSM_CLOSING; + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_CLOSING); + break; + default: + break; + } +} + + +/* + * fsm_timeout - Timeout expired. + */ +static void fsm_timeout(void *arg) { + fsm *f = (fsm *) arg; + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + if( f->retransmits <= 0 ){ + /* + * We've waited for an ack long enough. Peer probably heard us. + */ + f->state = (f->state == PPP_FSM_CLOSING)? PPP_FSM_CLOSED: PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + } else { + /* Send Terminate-Request */ + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + } + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + if (f->retransmits <= 0) { + ppp_warn("%s: timeout sending Config-Requests", PROTO_NAME(f)); + f->state = PPP_FSM_STOPPED; + if( (f->flags & OPT_PASSIVE) == 0 && f->callbacks->finished ) + (*f->callbacks->finished)(f); + + } else { + /* Retransmit the configure-request */ + if (f->callbacks->retransmit) + (*f->callbacks->retransmit)(f); + fsm_sconfreq(f, 1); /* Re-send Configure-Request */ + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Timeout event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_input - Input packet. + */ +void fsm_input(fsm *f, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd short header.", f->protocol)); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd illegal length.", f->protocol)); + return; + } + if (len > l) { + FSMDEBUG(("fsm_input(%x): Rcvd short packet.", f->protocol)); + return; + } + len -= HEADERLEN; /* subtract header length */ + + if( f->state == PPP_FSM_INITIAL || f->state == PPP_FSM_STARTING ){ + FSMDEBUG(("fsm_input(%x): Rcvd packet in state %d.", + f->protocol, f->state)); + return; + } + + /* + * Action depends on code. + */ + switch (code) { + case CONFREQ: + fsm_rconfreq(f, id, inp, len); + break; + + case CONFACK: + fsm_rconfack(f, id, inp, len); + break; + + case CONFNAK: + case CONFREJ: + fsm_rconfnakrej(f, code, id, inp, len); + break; + + case TERMREQ: + fsm_rtermreq(f, id, inp, len); + break; + + case TERMACK: + fsm_rtermack(f); + break; + + case CODEREJ: + fsm_rcoderej(f, inp, len); + break; + + default: + if( !f->callbacks->extcode + || !(*f->callbacks->extcode)(f, code, id, inp, len) ) + fsm_sdata(f, CODEREJ, ++f->id, inpacket, len + HEADERLEN); + break; + } +} + + +/* + * fsm_rconfreq - Receive Configure-Request. + */ +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len) { + int code, reject_if_disagree; + + switch( f->state ){ + case PPP_FSM_CLOSED: + /* Go away, we're closed */ + fsm_sdata(f, TERMACK, id, NULL, 0); + return; + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + return; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_STOPPED: + /* Negotiation started by our peer */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } + + /* + * Pass the requested configuration options + * to protocol-specific code for checking. + */ + if (f->callbacks->reqci){ /* Check CI */ + reject_if_disagree = (f->nakloops >= f->maxnakloops); + code = (*f->callbacks->reqci)(f, inp, &len, reject_if_disagree); + } else if (len) + code = CONFREJ; /* Reject all CI */ + else + code = CONFACK; + + /* send the Ack, Nak or Rej to the peer */ + fsm_sdata(f, code, id, inp, len); + + if (code == CONFACK) { + if (f->state == PPP_FSM_ACKRCVD) { + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + } else + f->state = PPP_FSM_ACKSENT; + f->nakloops = 0; + + } else { + /* we sent CONFACK or CONFREJ */ + if (f->state != PPP_FSM_ACKRCVD) + f->state = PPP_FSM_REQSENT; + if( code == CONFNAK ) + ++f->nakloops; + } +} + + +/* + * fsm_rconfack - Receive Configure-Ack. + */ +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + if( !(f->callbacks->ackci? (*f->callbacks->ackci)(f, inp, len): + (len == 0)) ){ + /* Ack is bad - ignore it */ + ppp_error("Received bad configure-ack: %P", inp, len); + return; + } + f->seen_ack = 1; + f->rnakloops = 0; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + f->state = PPP_FSM_ACKRCVD; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + break; + + case PPP_FSM_ACKRCVD: + /* Huh? an extra valid Ack? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + */ +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len) { + int ret; + int treat_as_reject; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + + if (code == CONFNAK) { + ++f->rnakloops; + treat_as_reject = (f->rnakloops >= f->maxnakloops); + if (f->callbacks->nakci == NULL + || !(ret = f->callbacks->nakci(f, inp, len, treat_as_reject))) { + ppp_error("Received bad configure-nak: %P", inp, len); + return; + } + } else { + f->rnakloops = 0; + if (f->callbacks->rejci == NULL + || !(ret = f->callbacks->rejci(f, inp, len))) { + ppp_error("Received bad configure-rej: %P", inp, len); + return; + } + } + + f->seen_ack = 1; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKSENT: + /* They didn't agree to what we wanted - try another request */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + if (ret < 0) + f->state = PPP_FSM_STOPPED; /* kludge for stopping CCP */ + else + fsm_sconfreq(f, 0); /* Send Configure-Request */ + break; + + case PPP_FSM_ACKRCVD: + /* Got a Nak/reject when we had already had an Ack?? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rtermreq - Receive Terminate-Req. + */ +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_REQSENT; /* Start over but keep trying */ + break; + + case PPP_FSM_OPENED: + if (len > 0) { + ppp_info("%s terminated by peer (%0.*v)", PROTO_NAME(f), len, p); + } else + ppp_info("%s terminated by peer", PROTO_NAME(f)); + f->retransmits = 0; + f->state = PPP_FSM_STOPPING; + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + break; + default: + break; + } + + fsm_sdata(f, TERMACK, id, NULL, 0); +} + + +/* + * fsm_rtermack - Receive Terminate-Ack. + */ +static void fsm_rtermack(fsm *f) { + switch (f->state) { + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + case PPP_FSM_STOPPING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_ACKRCVD: + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rcoderej - Receive an Code-Reject. + */ +static void fsm_rcoderej(fsm *f, u_char *inp, int len) { + u_char code, id; + + if (len < HEADERLEN) { + FSMDEBUG(("fsm_rcoderej: Rcvd short Code-Reject packet!")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + ppp_warn("%s: Rcvd Code-Reject for code %d, id %d", PROTO_NAME(f), code, id); + + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; +} + + +/* + * fsm_protreject - Peer doesn't speak this protocol. + * + * Treat this as a catastrophic error (RXJ-). + */ +void fsm_protreject(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_STOPPING); + break; + + default: + FSMDEBUG(("%s: Protocol-reject event in state %d!", + PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_sconfreq - Send a Configure-Request. + */ +static void fsm_sconfreq(fsm *f, int retransmit) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int cilen; + + if( f->state != PPP_FSM_REQSENT && f->state != PPP_FSM_ACKRCVD && f->state != PPP_FSM_ACKSENT ){ + /* Not currently negotiating - reset options */ + if( f->callbacks->resetci ) + (*f->callbacks->resetci)(f); + f->nakloops = 0; + f->rnakloops = 0; + } + + if( !retransmit ){ + /* New request - reset retransmission counter, use new ID */ + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + f->reqid = ++f->id; + } + + f->seen_ack = 0; + + /* + * Make up the request packet + */ + if( f->callbacks->cilen && f->callbacks->addci ){ + cilen = (*f->callbacks->cilen)(f); + if( cilen > pcb->peer_mru - HEADERLEN ) + cilen = pcb->peer_mru - HEADERLEN; + } else + cilen = 0; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(cilen + HEADERLEN + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + /* send the request to our peer */ + outp = (u_char*)p->payload; + MAKEHEADER(outp, f->protocol); + PUTCHAR(CONFREQ, outp); + PUTCHAR(f->reqid, outp); + PUTSHORT(cilen + HEADERLEN, outp); + if (cilen != 0) { + (*f->callbacks->addci)(f, outp, &cilen); + LWIP_ASSERT("cilen == p->len - HEADERLEN - PPP_HDRLEN", cilen == p->len - HEADERLEN - PPP_HDRLEN); + } + + ppp_write(pcb, p); + + /* start the retransmit timer */ + --f->retransmits; + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); +} + + +/* + * fsm_sdata - Send some data. + * + * Used for all packets sent to our peer by this module. + */ +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int outlen; + + /* Adjust length to be smaller than MTU */ + if (datalen > pcb->peer_mru - HEADERLEN) + datalen = pcb->peer_mru - HEADERLEN; + outlen = datalen + HEADERLEN; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(outlen + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + if (datalen) /* && data != outp + PPP_HDRLEN + HEADERLEN) -- was only for fsm_sconfreq() */ + MEMCPY(outp + PPP_HDRLEN + HEADERLEN, data, datalen); + MAKEHEADER(outp, f->protocol); + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + ppp_write(pcb, p); +} + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c new file mode 100644 index 000000000..b7c766eb0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c @@ -0,0 +1,2418 @@ +/* + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" + +#if 0 /* UNUSED */ +/* global vars */ +u32_t netmask = 0; /* IP netmask to set on interface */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +bool disable_defaultip = 0; /* Don't use hostname for default IP adrs */ +#endif /* UNUSED */ + +#if 0 /* moved to ppp_settings */ +bool noremoteip = 0; /* Let him have no IP address */ +#endif /* moved to ppp_setting */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to know when IP protocol has come up */ +void (*ip_up_hook) (void) = NULL; + +/* Hook for a plugin to know when IP protocol has come down */ +void (*ip_down_hook) (void) = NULL; + +/* Hook for a plugin to choose the remote IP address */ +void (*ip_choose_hook) (u32_t *) = NULL; +#endif /* UNUSED */ + +#if PPP_NOTIFY +/* Notifiers for when IPCP goes up and down */ +struct notifier *ip_up_notifier = NULL; +struct notifier *ip_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* local vars */ +#if 0 /* moved to ppp_pcb */ +static int default_route_set[NUM_PPP]; /* Have set up a default route */ +static int proxy_arp_set[NUM_PPP]; /* Have created proxy arp entry */ +static int ipcp_is_up; /* have called np_up() */ +static int ipcp_is_open; /* haven't called np_finished() */ +static bool ask_for_local; /* request our address from peer */ +#endif /* moved to ppp_pcb */ +#if 0 /* UNUSED */ +static char vj_value[8]; /* string form of vj option value */ +static char netmask_str[20]; /* string form of netmask value */ +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipcp_resetci(fsm *f); /* Reset our CI */ +static int ipcp_cilen(fsm *f); /* Return length of our CI */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject);/* Peer nak'd our CI */ +static int ipcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipcp_up(fsm *f); /* We're UP */ +static void ipcp_down(fsm *f); /* We're DOWN */ +static void ipcp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipcp_callbacks = { /* IPCP callback routines */ + ipcp_resetci, /* Reset our Configuration Information */ + ipcp_cilen, /* Length of our Configuration Information */ + ipcp_addci, /* Add our Configuration Information */ + ipcp_ackci, /* ACK our Configuration Information */ + ipcp_nakci, /* NAK our Configuration Information */ + ipcp_rejci, /* Reject our Configuration Information */ + ipcp_reqci, /* Request peer's Configuration Information */ + ipcp_up, /* Called when fsm reaches OPENED state */ + ipcp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPCP" /* String name of protocol */ +}; + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setvjslots (char **); +static int setdnsaddr (char **); +static int setwinsaddr (char **); +static int setnetmask (char **); +int setipaddr (char *, char **, int); + +static void printipaddr (option_t *, void (*)(void *, char *,...),void *); + +static option_t ipcp_option_list[] = { + { "noip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP" }, + { "-ip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP", OPT_ALIAS }, + + { "novj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_A2CLR, &ipcp_allowoptions[0].neg_vj }, + { "-vj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].neg_vj }, + + { "novjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + { "-vjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + + { "vj-max-slots", o_special, (void *)setvjslots, + "Set maximum VJ header slots", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, vj_value }, + + { "ipcp-accept-local", o_bool, &ipcp_wantoptions[0].accept_local, + "Accept peer's address for us", 1 }, + { "ipcp-accept-remote", o_bool, &ipcp_wantoptions[0].accept_remote, + "Accept peer's address for it", 1 }, + + { "ipparam", o_string, &ipparam, + "Set ip script parameter", OPT_PRIO }, + + { "noipdefault", o_bool, &disable_defaultip, + "Don't use name for default IP adrs", 1 }, + + { "ms-dns", 1, (void *)setdnsaddr, + "DNS address for the peer's use" }, + { "ms-wins", 1, (void *)setwinsaddr, + "Nameserver for SMB over TCP/IP for peer" }, + + { "ipcp-restart", o_int, &ipcp_fsm[0].timeouttime, + "Set timeout for IPCP", OPT_PRIO }, + { "ipcp-max-terminate", o_int, &ipcp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipcp-max-configure", o_int, &ipcp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipcp-max-failure", o_int, &ipcp_fsm[0].maxnakloops, + "Set max #conf-naks for IPCP", OPT_PRIO }, + + { "defaultroute", o_bool, &ipcp_wantoptions[0].default_route, + "Add default route", OPT_ENABLE|1, &ipcp_allowoptions[0].default_route }, + { "nodefaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + { "-defaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + + { "replacedefaultroute", o_bool, + &ipcp_wantoptions[0].replace_default_route, + "Replace default route", 1 + }, + { "noreplacedefaultroute", o_bool, + &ipcp_allowoptions[0].replace_default_route, + "Never replace default route", OPT_A2COPY, + &ipcp_wantoptions[0].replace_default_route }, + { "proxyarp", o_bool, &ipcp_wantoptions[0].proxy_arp, + "Add proxy ARP entry", OPT_ENABLE|1, &ipcp_allowoptions[0].proxy_arp }, + { "noproxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + { "-proxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + + { "usepeerdns", o_bool, &usepeerdns, + "Ask peer for DNS address(es)", 1 }, + + { "netmask", o_special, (void *)setnetmask, + "set netmask", OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, netmask_str }, + + { "ipcp-no-addresses", o_bool, &ipcp_wantoptions[0].old_addrs, + "Disable old-style IP-Addresses usage", OPT_A2CLR, + &ipcp_allowoptions[0].old_addrs }, + { "ipcp-no-address", o_bool, &ipcp_wantoptions[0].neg_addr, + "Disable IP-Address usage", OPT_A2CLR, + &ipcp_allowoptions[0].neg_addr }, + + { "noremoteip", o_bool, &noremoteip, + "Allow peer to have no IP address", 1 }, + + { "nosendip", o_bool, &ipcp_wantoptions[0].neg_addr, + "Don't send our IP address to peer", OPT_A2CLR, + &ipcp_wantoptions[0].old_addrs}, + + { "IP addresses", o_wild, (void *) &setipaddr, + "set local and remote IP addresses", + OPT_NOARG | OPT_A2PRINTER, (void *) &printipaddr }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipcp_init(ppp_pcb *pcb); +static void ipcp_open(ppp_pcb *pcb); +static void ipcp_close(ppp_pcb *pcb, const char *reason); +static void ipcp_lowerup(ppp_pcb *pcb); +static void ipcp_lowerdown(ppp_pcb *pcb); +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS +static void ip_check_options (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ip_demand_conf (int); +static int ip_active_pkt (u_char *, int); +#endif /* DEMAND_SUPPORT */ +#if 0 /* UNUSED */ +static void create_resolv (u32_t, u32_t); +#endif /* UNUSED */ + +const struct protent ipcp_protent = { + PPP_IPCP, + ipcp_init, + ipcp_input, + ipcp_protrej, + ipcp_lowerup, + ipcp_lowerdown, + ipcp_open, + ipcp_close, +#if PRINTPKT_SUPPORT + ipcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPCP", + "IP", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipcp_option_list, + ip_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ip_demand_conf, + ip_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute); + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* min length for compression protocol opt. */ +#define CILEN_VJ 6 /* length for RFC1332 Van-Jacobson opt. */ +#define CILEN_ADDR 6 /* new-style single address option */ +#define CILEN_ADDRS 10 /* old-style dual address option */ + + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED, already defined by lwIP */ +/* + * Make a string representation of a network IP address. + */ +char * +ip_ntoa(ipaddr) +u32_t ipaddr; +{ + static char b[64]; + + slprintf(b, sizeof(b), "%I", ipaddr); + return b; +} +#endif /* UNUSED, already defined by lwIP */ + +/* + * Option parsing. + */ +#if PPP_OPTIONS +/* + * setvjslots - set maximum number of connection slots for VJ compression + */ +static int +setvjslots(argv) + char **argv; +{ + int value; + + if (!int_option(*argv, &value)) + return 0; + + if (value < 2 || value > 16) { + option_error("vj-max-slots value must be between 2 and 16"); + return 0; + } + ipcp_wantoptions [0].maxslotindex = + ipcp_allowoptions[0].maxslotindex = value - 1; + slprintf(vj_value, sizeof(vj_value), "%d", value); + return 1; +} + +/* + * setdnsaddr - set the dns address(es) + */ +static int +setdnsaddr(argv) + char **argv; +{ + u32_t dns; + struct hostent *hp; + + dns = inet_addr(*argv); + if (dns == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-dns option", + *argv); + return 0; + } + dns = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].dnsaddr[1] == 0) + ipcp_allowoptions[0].dnsaddr[0] = dns; + else + ipcp_allowoptions[0].dnsaddr[0] = ipcp_allowoptions[0].dnsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].dnsaddr[1] = dns; + + return (1); +} + +/* + * setwinsaddr - set the wins address(es) + * This is primrarly used with the Samba package under UNIX or for pointing + * the caller to the existing WINS server on a Windows NT platform. + */ +static int +setwinsaddr(argv) + char **argv; +{ + u32_t wins; + struct hostent *hp; + + wins = inet_addr(*argv); + if (wins == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-wins option", + *argv); + return 0; + } + wins = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].winsaddr[1] == 0) + ipcp_allowoptions[0].winsaddr[0] = wins; + else + ipcp_allowoptions[0].winsaddr[0] = ipcp_allowoptions[0].winsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].winsaddr[1] = wins; + + return (1); +} + +/* + * setipaddr - Set the IP address + * If doit is 0, the call is to check whether this option is + * potentially an IP address specification. + * Not static so that plugins can call it to set the addresses + */ +int +setipaddr(arg, argv, doit) + char *arg; + char **argv; + int doit; +{ + struct hostent *hp; + char *colon; + u32_t local, remote; + ipcp_options *wo = &ipcp_wantoptions[0]; + static int prio_local = 0, prio_remote = 0; + + /* + * IP address pair separated by ":". + */ + if ((colon = strchr(arg, ':')) == NULL) + return 0; + if (!doit) + return 1; + + /* + * If colon first character, then no local addr. + */ + if (colon != arg && option_priority >= prio_local) { + *colon = '\0'; + if ((local = inet_addr(arg)) == (u32_t) -1) { + if ((hp = gethostbyname(arg)) == NULL) { + option_error("unknown host: %s", arg); + return 0; + } + local = *(u32_t *)hp->h_addr; + } + if (bad_ip_adrs(local)) { + option_error("bad local IP address %s", ip_ntoa(local)); + return 0; + } + if (local != 0) + wo->ouraddr = local; + *colon = ':'; + prio_local = option_priority; + } + + /* + * If colon last character, then no remote addr. + */ + if (*++colon != '\0' && option_priority >= prio_remote) { + if ((remote = inet_addr(colon)) == (u32_t) -1) { + if ((hp = gethostbyname(colon)) == NULL) { + option_error("unknown host: %s", colon); + return 0; + } + remote = *(u32_t *)hp->h_addr; + if (remote_name[0] == 0) + strlcpy(remote_name, colon, sizeof(remote_name)); + } + if (bad_ip_adrs(remote)) { + option_error("bad remote IP address %s", ip_ntoa(remote)); + return 0; + } + if (remote != 0) + wo->hisaddr = remote; + prio_remote = option_priority; + } + + return 1; +} + +static void +printipaddr(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + ipcp_options *wo = &ipcp_wantoptions[0]; + + if (wo->ouraddr != 0) + printer(arg, "%I", wo->ouraddr); + printer(arg, ":"); + if (wo->hisaddr != 0) + printer(arg, "%I", wo->hisaddr); +} + +/* + * setnetmask - set the netmask to be used on the interface. + */ +static int +setnetmask(argv) + char **argv; +{ + u32_t mask; + int n; + char *p; + + /* + * Unfortunately, if we use inet_addr, we can't tell whether + * a result of all 1s is an error or a valid 255.255.255.255. + */ + p = *argv; + n = parse_dotted_ip(p, &mask); + + mask = lwip_htonl(mask); + + if (n == 0 || p[n] != 0 || (netmask & ~mask) != 0) { + option_error("invalid netmask value '%s'", *argv); + return 0; + } + + netmask = mask; + slprintf(netmask_str, sizeof(netmask_str), "%I", mask); + + return (1); +} + +int +parse_dotted_ip(p, vp) + char *p; + u32_t *vp; +{ + int n; + u32_t v, b; + char *endp, *p0 = p; + + v = 0; + for (n = 3;; --n) { + b = strtoul(p, &endp, 0); + if (endp == p) + return 0; + if (b > 255) { + if (n < 3) + return 0; + /* accept e.g. 0xffffff00 */ + *vp = b; + return endp - p0; + } + v |= b << (n * 8); + p = endp; + if (n == 0) + break; + if (*p != '.') + return 0; + ++p; + } + *vp = v; + return p - p0; +} +#endif /* PPP_OPTIONS */ + +/* + * ipcp_init - Initialize IPCP. + */ +static void ipcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPCP; + f->callbacks = &ipcp_callbacks; + fsm_init(f); + + /* + * Some 3G modems use repeated IPCP NAKs as a way of stalling + * until they can contact a server on the network, so we increase + * the default number of NAKs we accept before we start treating + * them as rejects. + */ + f->maxnakloops = 100; + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->neg_addr = wo->old_addrs = 1; +#if VJ_SUPPORT + wo->neg_vj = 1; + wo->vj_protocol = IPCP_VJ_COMP; + wo->maxslotindex = MAX_STATES - 1; /* really max index */ + wo->cflag = 1; +#endif /* VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* wanting default route by default */ + wo->default_route = 1; +#endif /* UNUSED */ + + ao->neg_addr = ao->old_addrs = 1; +#if VJ_SUPPORT + /* max slots and slot-id compression are currently hardwired in */ + /* ppp_if.c to 16 and 1, this needs to be changed (among other */ + /* things) gmc */ + + ao->neg_vj = 1; + ao->maxslotindex = MAX_STATES - 1; + ao->cflag = 1; +#endif /* #if VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* + * XXX These control whether the user may use the proxyarp + * and defaultroute options. + */ + ao->proxy_arp = 1; + ao->default_route = 1; +#endif /* UNUSED */ +} + + +/* + * ipcp_open - IPCP is allowed to come up. + */ +static void ipcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_open(f); + pcb->ipcp_is_open = 1; +} + + +/* + * ipcp_close - Take IPCP down. + */ +static void ipcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ipcp_fsm; + fsm_close(f, reason); +} + + +/* + * ipcp_lowerup - The lower layer is up. + */ +static void ipcp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerup(f); +} + + +/* + * ipcp_lowerdown - The lower layer is down. + */ +static void ipcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_input - Input IPCP packet. + */ +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ipcp_fsm; + fsm_input(f, p, len); +} + + +/* + * ipcp_protrej - A Protocol-Reject was received for IPCP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipcp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_resetci - Reset our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + wo->req_addr = (wo->neg_addr || wo->old_addrs) && + (ao->neg_addr || ao->old_addrs); + if (wo->ouraddr == 0) + wo->accept_local = 1; + if (wo->hisaddr == 0) + wo->accept_remote = 1; +#if LWIP_DNS + wo->req_dns1 = wo->req_dns2 = pcb->settings.usepeerdns; /* Request DNS addresses from the peer */ +#endif /* LWIP_DNS */ + *go = *wo; + if (!pcb->ask_for_local) + go->ouraddr = 0; +#if 0 /* UNUSED */ + if (ip_choose_hook) { + ip_choose_hook(&wo->hisaddr); + if (wo->hisaddr) { + wo->accept_remote = 0; + } + } +#endif /* UNUSED */ + BZERO(&pcb->ipcp_hisoptions, sizeof(ipcp_options)); +} + + +/* + * ipcp_cilen - Return length of our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static int ipcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; +#if VJ_SUPPORT + ipcp_options *wo = &pcb->ipcp_wantoptions; +#endif /* VJ_SUPPORT */ + ipcp_options *ho = &pcb->ipcp_hisoptions; + +#define LENCIADDRS(neg) (neg ? CILEN_ADDRS : 0) +#if VJ_SUPPORT +#define LENCIVJ(neg, old) (neg ? (old? CILEN_COMPRESS : CILEN_VJ) : 0) +#endif /* VJ_SUPPORT */ +#define LENCIADDR(neg) (neg ? CILEN_ADDR : 0) +#if LWIP_DNS +#define LENCIDNS(neg) LENCIADDR(neg) +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define LENCIWINS(neg) LENCIADDR(neg) +#endif /* UNUSED - WINS */ + + /* + * First see if we want to change our options to the old + * forms because we have received old forms from the peer. + */ + if (go->neg_addr && go->old_addrs && !ho->neg_addr && ho->old_addrs) + go->neg_addr = 0; + +#if VJ_SUPPORT + if (wo->neg_vj && !go->neg_vj && !go->old_vj) { + /* try an older style of VJ negotiation */ + /* use the old style only if the peer did */ + if (ho->neg_vj && ho->old_vj) { + go->neg_vj = 1; + go->old_vj = 1; + go->vj_protocol = ho->vj_protocol; + } + } +#endif /* VJ_SUPPORT */ + + return (LENCIADDRS(!go->neg_addr && go->old_addrs) + +#if VJ_SUPPORT + LENCIVJ(go->neg_vj, go->old_vj) + +#endif /* VJ_SUPPORT */ + LENCIADDR(go->neg_addr) + +#if LWIP_DNS + LENCIDNS(go->req_dns1) + + LENCIDNS(go->req_dns2) + +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + LENCIWINS(go->winsaddr[0]) + + LENCIWINS(go->winsaddr[1]) + +#endif /* UNUSED - WINS */ + 0); +} + + +/* + * ipcp_addci - Add our desired CIs to a packet. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + int len = *lenp; + +#define ADDCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + if (len >= CILEN_ADDRS) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDRS, ucp); \ + l = lwip_ntohl(val1); \ + PUTLONG(l, ucp); \ + l = lwip_ntohl(val2); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDRS; \ + } else \ + go->old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define ADDCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + if (!old) { \ + PUTCHAR(maxslotindex, ucp); \ + PUTCHAR(cflag, ucp); \ + } \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define ADDCIADDR(opt, neg, val) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(val); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } + +#if LWIP_DNS +#define ADDCIDNS(opt, neg, addr) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ADDCIWINS(opt, addr) \ + if (addr) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + addr = 0; \ + } +#endif /* UNUSED - WINS */ + + ADDCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ADDCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ADDCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ADDCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ADDCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ADDCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + *lenp -= len; +} + + +/* + * ipcp_ackci - Ack our CIs. + * Called by fsm_rconfack, Receive Configure ACK. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_short cilen, citype; + u32_t cilong; +#if VJ_SUPPORT + u_short cishort; + u_char cimaxslotindex, cicflag; +#endif /* VJ_SUPPORT */ + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#define ACKCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDRS) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDRS || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val1 != cilong) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val2 != cilong) \ + goto bad; \ + } + +#if VJ_SUPPORT +#define ACKCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslotindex) \ + goto bad; \ + GETCHAR(cicflag, p); \ + if (cicflag != cflag) \ + goto bad; \ + } \ + } +#endif /* VJ_SUPPORT */ + +#define ACKCIADDR(opt, neg, val) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val != cilong) \ + goto bad; \ + } + +#if LWIP_DNS +#define ACKCIDNS(opt, neg, addr) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ACKCIWINS(opt, addr) \ + if (addr) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* UNUSED - WINS */ + + ACKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ACKCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ACKCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ACKCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ACKCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ACKCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPCPDEBUG(("ipcp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPCP is in the OPENED state. + * Calback from fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char citype, cilen, *next; +#if VJ_SUPPORT + u_char cimaxslotindex, cicflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t ciaddr1, ciaddr2, l; +#if LWIP_DNS + u32_t cidnsaddr; +#endif /* LWIP_DNS */ + ipcp_options no; /* options we've seen Naks for */ + ipcp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIADDRS(opt, neg, code) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + GETLONG(l, p); \ + ciaddr2 = lwip_htonl(l); \ + no.old_addrs = 1; \ + code \ + } + +#if VJ_SUPPORT +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS || cilen == CILEN_VJ) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* VJ_SUPPORT */ + +#define NAKCIADDR(opt, neg, code) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } + +#if LWIP_DNS +#define NAKCIDNS(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cidnsaddr = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } +#endif /* LWIP_DNS */ + + /* + * Accept the peer's idea of {our,his} address, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + if (treat_as_reject) { + try_.old_addrs = 0; + } else { + if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + if (go->accept_remote && ciaddr2) { + /* take his idea of his address */ + try_.hisaddr = ciaddr2; + } + } + ); + +#if VJ_SUPPORT + /* + * Accept the peer's value of maxslotindex provided that it + * is less than what we asked for. Turn off slot-ID compression + * if the peer wants. Send old-style compress-type option if + * the peer wants. + */ + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + if (treat_as_reject) { + try_.neg_vj = 0; + } else if (cilen == CILEN_VJ) { + GETCHAR(cimaxslotindex, p); + GETCHAR(cicflag, p); + if (cishort == IPCP_VJ_COMP) { + try_.old_vj = 0; + if (cimaxslotindex < go->maxslotindex) + try_.maxslotindex = cimaxslotindex; + if (!cicflag) + try_.cflag = 0; + } else { + try_.neg_vj = 0; + } + } else { + if (cishort == IPCP_VJ_COMP || cishort == IPCP_VJ_COMP_OLD) { + try_.old_vj = 1; + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* VJ_SUPPORT */ + + NAKCIADDR(CI_ADDR, neg_addr, + if (treat_as_reject) { + try_.neg_addr = 0; + try_.old_addrs = 0; + } else if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + ); + +#if LWIP_DNS + NAKCIDNS(CI_MS_DNS1, req_dns1, + if (treat_as_reject) { + try_.req_dns1 = 0; + } else { + try_.dnsaddr[0] = cidnsaddr; + } + ); + + NAKCIDNS(CI_MS_DNS2, req_dns2, + if (treat_as_reject) { + try_.req_dns2 = 0; + } else { + try_.dnsaddr[1] = cidnsaddr; + } + ); +#endif /* #if LWIP_DNS */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about IP addresses, we comply. + * If they want us to ask for compression, we refuse. + * If they want us to ask for ms-dns, we do that, since some + * peers get huffy if we don't. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* VJ_SUPPORT */ + case CI_ADDRS: + if ((!go->neg_addr && go->old_addrs) || no.old_addrs + || cilen != CILEN_ADDRS) + goto bad; + try_.neg_addr = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + GETLONG(l, p); + ciaddr2 = lwip_htonl(l); + if (ciaddr2 && go->accept_remote) + try_.hisaddr = ciaddr2; + no.old_addrs = 1; + break; + case CI_ADDR: + if (go->neg_addr || no.neg_addr || cilen != CILEN_ADDR) + goto bad; + try_.old_addrs = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + if (try_.ouraddr != 0) + try_.neg_addr = 1; + no.neg_addr = 1; + break; +#if LWIP_DNS + case CI_MS_DNS1: + if (go->req_dns1 || no.req_dns1 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[0] = lwip_htonl(l); + try_.req_dns1 = 1; + no.req_dns1 = 1; + break; + case CI_MS_DNS2: + if (go->req_dns2 || no.req_dns2 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[1] = lwip_htonl(l); + try_.req_dns2 = 1; + no.req_dns2 = 1; + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + if (cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1) + try_.winsaddr[citype == CI_MS_WINS2] = ciaddr1; + break; +#endif /* UNUSED - WINS */ + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any remaining options, we ignore them. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPCPDEBUG(("ipcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipcp_rejci - Reject some of our CIs. + * Callback from fsm_rconfnakrej. + */ +static int ipcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char cilen; +#if VJ_SUPPORT + u_char cimaxslotindex, ciflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + ipcp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIADDRS(opt, neg, val1, val2) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val1) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val2) \ + goto bad; \ + try_.old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define REJCIVJ(opt, neg, val, old, maxslot, cflag) \ + if (go->neg && \ + p[1] == (old? CILEN_COMPRESS : CILEN_VJ) && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslot) \ + goto bad; \ + GETCHAR(ciflag, p); \ + if (ciflag != cflag) \ + goto bad; \ + } \ + try_.neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define REJCIADDR(opt, neg, val) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if LWIP_DNS +#define REJCIDNS(opt, neg, dnsaddr) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != dnsaddr) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define REJCIWINS(opt, addr) \ + if (addr && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != addr) \ + goto bad; \ + try_.winsaddr[opt == CI_MS_WINS2] = 0; \ + } +#endif /* UNUSED - WINS */ + + REJCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + go->ouraddr, go->hisaddr); + +#if VJ_SUPPORT + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + REJCIADDR(CI_ADDR, neg_addr, go->ouraddr); + +#if LWIP_DNS + REJCIDNS(CI_MS_DNS1, req_dns1, go->dnsaddr[0]); + + REJCIDNS(CI_MS_DNS2, req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + REJCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + REJCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPCPDEBUG(("ipcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipcp_reqci - Check the peer's requested CIs and send appropriate response. + * Callback from fsm_rconfreq, Receive Configure Request + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#if VJ_SUPPORT + u_short cishort; /* Parsed short value */ +#endif /* VJ_SUPPORT */ + u32_t tl, ciaddr1, ciaddr2;/* Parsed address values */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ +#if VJ_SUPPORT + u_char maxslotindex, cflag; +#endif /* VJ_SUPPORT */ +#if LWIP_DNS + int d; +#endif /* LWIP_DNS */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPCPDEBUG(("ipcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_ADDRS: + if (!ao->old_addrs || ho->neg_addr || + cilen != CILEN_ADDRS) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * If neither we nor he knows his address, reject the option. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + /* + * If he doesn't know our address, or if we both have our address + * but disagree about it, then NAK it with our idea. + */ + GETLONG(tl, p); /* Parse desination address (ours) */ + ciaddr2 = lwip_htonl(tl); + if (ciaddr2 != wo->ouraddr) { + if (ciaddr2 == 0 || !wo->accept_local) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->ouraddr); + PUTLONG(tl, p); + } + } else { + wo->ouraddr = ciaddr2; /* accept peer's idea */ + } + } + + ho->old_addrs = 1; + ho->hisaddr = ciaddr1; + ho->ouraddr = ciaddr2; + break; + + case CI_ADDR: + if (!ao->neg_addr || ho->old_addrs || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * Don't ACK an address of 0.0.0.0 - reject it instead. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + ho->neg_addr = 1; + ho->hisaddr = ciaddr1; + break; + +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + /* Microsoft primary or secondary DNS request */ + d = citype == CI_MS_DNS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->dnsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->dnsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->dnsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + /* Microsoft primary or secondary WINS request */ + d = citype == CI_MS_WINS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->winsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->winsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->winsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (!ao->neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + if (!(cishort == IPCP_VJ_COMP || + (cishort == IPCP_VJ_COMP_OLD && cilen == CILEN_COMPRESS))) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + if (cilen == CILEN_VJ) { + GETCHAR(maxslotindex, p); + if (maxslotindex > ao->maxslotindex) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(ao->maxslotindex, p); + } + } + GETCHAR(cflag, p); + if (cflag && !ao->cflag) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(wo->cflag, p); + } + } + ho->maxslotindex = maxslotindex; + ho->cflag = cflag; + } else { + ho->old_vj = 1; + ho->maxslotindex = MAX_STATES - 1; + ho->cflag = 1; + } + break; +#endif /* VJ_SUPPORT */ + + default: + orc = CONFREJ; + break; + } +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their address, and they didn't send their address, then we + * send a NAK with a CI_ADDR option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_addr && !ho->old_addrs && + wo->req_addr && !reject_if_disagree && !pcb->settings.noremoteip) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_addr = 0; /* don't ask again */ + } + PUTCHAR(CI_ADDR, ucp); + PUTCHAR(CILEN_ADDR, ucp); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPCPDEBUG(("ipcp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +#if 0 /* UNUSED */ +/* + * ip_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void +ip_check_options() +{ + struct hostent *hp; + u32_t local; + ipcp_options *wo = &ipcp_wantoptions[0]; + + /* + * Default our local IP address based on our hostname. + * If local IP address already given, don't bother. + */ + if (wo->ouraddr == 0 && !disable_defaultip) { + /* + * Look up our hostname (possibly with domain name appended) + * and take the first IP address as our local IP address. + * If there isn't an IP address for our hostname, too bad. + */ + wo->accept_local = 1; /* don't insist on this default value */ + if ((hp = gethostbyname(hostname)) != NULL) { + local = *(u32_t *)hp->h_addr; + if (local != 0 && !bad_ip_adrs(local)) + wo->ouraddr = local; + } + } + ask_for_local = wo->ouraddr != 0 || !disable_defaultip; +} +#endif /* UNUSED */ + +#if DEMAND_SUPPORT +/* + * ip_demand_conf - configure the interface as though + * IPCP were up, for use with dial-on-demand. + */ +static int +ip_demand_conf(u) + int u; +{ + ppp_pcb *pcb = &ppp_pcb_list[u]; + ipcp_options *wo = &ipcp_wantoptions[u]; + + if (wo->hisaddr == 0 && !pcb->settings.noremoteip) { + /* make up an arbitrary address for the peer */ + wo->hisaddr = lwip_htonl(0x0a707070 + ifunit); + wo->accept_remote = 1; + } + if (wo->ouraddr == 0) { + /* make up an arbitrary address for us */ + wo->ouraddr = lwip_htonl(0x0a404040 + ifunit); + wo->accept_local = 1; + ask_for_local = 0; /* don't tell the peer this address */ + } + if (!sifaddr(pcb, wo->ouraddr, wo->hisaddr, get_mask(wo->ouraddr))) + return 0; + if (!sifup(pcb)) + return 0; + if (!sifnpmode(pcb, PPP_IP, NPMODE_QUEUE)) + return 0; +#if 0 /* UNUSED */ + if (wo->default_route) + if (sifdefaultroute(pcb, wo->ouraddr, wo->hisaddr, + wo->replace_default_route)) + default_route_set[u] = 1; +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + if (wo->proxy_arp) + if (sifproxyarp(pcb, wo->hisaddr)) + proxy_arp_set[u] = 1; +#endif /* UNUSED - PROXY ARP */ + + ppp_notice("local IP address %I", wo->ouraddr); + if (wo->hisaddr) + ppp_notice("remote IP address %I", wo->hisaddr); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + +/* + * ipcp_up - IPCP has come UP. + * + * Configure the IP network interface appropriately and bring it up. + */ +static void ipcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + u32_t mask; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *wo = &pcb->ipcp_wantoptions; + + IPCPDEBUG(("ipcp: up")); + + /* + * We must have a non-zero IP address for both ends of the link. + */ + if (!ho->neg_addr && !ho->old_addrs) + ho->hisaddr = wo->hisaddr; + + if (!(go->neg_addr || go->old_addrs) && (wo->neg_addr || wo->old_addrs) + && wo->ouraddr != 0) { + ppp_error("Peer refused to agree to our IP address"); + ipcp_close(f->pcb, "Refused our IP address"); + return; + } + if (go->ouraddr == 0) { + ppp_error("Could not determine local IP address"); + ipcp_close(f->pcb, "Could not determine local IP address"); + return; + } + if (ho->hisaddr == 0 && !pcb->settings.noremoteip) { + ho->hisaddr = lwip_htonl(0x0a404040); + ppp_warn("Could not determine remote IP address: defaulting to %I", + ho->hisaddr); + } +#if 0 /* UNUSED */ + script_setenv("IPLOCAL", ip_ntoa(go->ouraddr), 0); + if (ho->hisaddr != 0) + script_setenv("IPREMOTE", ip_ntoa(ho->hisaddr), 1); +#endif /* UNUSED */ + +#if LWIP_DNS + if (!go->req_dns1) + go->dnsaddr[0] = 0; + if (!go->req_dns2) + go->dnsaddr[1] = 0; +#if 0 /* UNUSED */ + if (go->dnsaddr[0]) + script_setenv("DNS1", ip_ntoa(go->dnsaddr[0]), 0); + if (go->dnsaddr[1]) + script_setenv("DNS2", ip_ntoa(go->dnsaddr[1]), 0); +#endif /* UNUSED */ + if (pcb->settings.usepeerdns && (go->dnsaddr[0] || go->dnsaddr[1])) { + sdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#if 0 /* UNUSED */ + script_setenv("USEPEERDNS", "1", 0); + create_resolv(go->dnsaddr[0], go->dnsaddr[1]); +#endif /* UNUSED */ + } +#endif /* LWIP_DNS */ + + /* + * Check that the peer is allowed to use the IP address it wants. + */ + if (ho->hisaddr != 0) { + u32_t addr = lwip_ntohl(ho->hisaddr); + if ((addr >> IP_CLASSA_NSHIFT) == IP_LOOPBACKNET + || IP_MULTICAST(addr) || IP_BADCLASS(addr) + /* + * For now, consider that PPP in server mode with peer required + * to authenticate must provide the peer IP address, reject any + * IP address wanted by peer different than the one we wanted. + */ +#if PPP_SERVER && PPP_AUTH_SUPPORT + || (pcb->settings.auth_required && wo->hisaddr != ho->hisaddr) +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ + ) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(pcb, "Unauthorized remote IP address"); + return; + } + } +#if 0 /* Unused */ + /* Upstream checking code */ + if (ho->hisaddr != 0 && !auth_ip_addr(f->unit, ho->hisaddr)) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(f->unit, "Unauthorized remote IP address"); + return; + } +#endif /* Unused */ + +#if VJ_SUPPORT + /* set tcp compression */ + sifvjcomp(pcb, ho->neg_vj, ho->cflag, ho->maxslotindex); +#endif /* VJ_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IP packets. + */ + if (demand) { + if (go->ouraddr != wo->ouraddr || ho->hisaddr != wo->hisaddr) { + ipcp_clear_addrs(f->unit, wo->ouraddr, wo->hisaddr, + wo->replace_default_route); + if (go->ouraddr != wo->ouraddr) { + ppp_warn("Local IP address changed to %I", go->ouraddr); + script_setenv("OLDIPLOCAL", ip_ntoa(wo->ouraddr), 0); + wo->ouraddr = go->ouraddr; + } else + script_unsetenv("OLDIPLOCAL"); + if (ho->hisaddr != wo->hisaddr && wo->hisaddr != 0) { + ppp_warn("Remote IP address changed to %I", ho->hisaddr); + script_setenv("OLDIPREMOTE", ip_ntoa(wo->hisaddr), 0); + wo->hisaddr = ho->hisaddr; + } else + script_unsetenv("OLDIPREMOTE"); + + /* Set the interface to the new addresses */ + mask = get_mask(go->ouraddr); + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } + + /* assign a default route through the interface if required */ + if (ipcp_wantoptions[f->unit].default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + default_route_set[f->unit] = 1; + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && ipcp_wantoptions[f->unit].proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + proxy_arp_set[f->unit] = 1; +#endif /* UNUSED - PROXY ARP */ + + } + demand_rexmit(PPP_IP,go->ouraddr); + sifnpmode(pcb, PPP_IP, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set IP addresses and (if specified) netmask. + */ + mask = get_mask(go->ouraddr); + +#if !(defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } +#endif + + /* bring the interface up for IP */ + if (!sifup(pcb)) { +#if PPP_DEBUG + ppp_warn("Interface failed to come up"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } + +#if (defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } +#endif +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + +#if 0 /* UNUSED */ + /* assign a default route through the interface if required */ + if (wo->default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + pcb->default_route_set = 1; +#endif /* UNUSED */ + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && wo->proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + pcb->proxy_arp_set = 1; +#endif /* UNUSED - PROXY ARP */ + + wo->ouraddr = go->ouraddr; + + ppp_notice("local IP address %I", go->ouraddr); + if (ho->hisaddr != 0) + ppp_notice("remote IP address %I", ho->hisaddr); +#if LWIP_DNS + if (go->dnsaddr[0]) + ppp_notice("primary DNS address %I", go->dnsaddr[0]); + if (go->dnsaddr[1]) + ppp_notice("secondary DNS address %I", go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } + +#if PPP_STATS_SUPPORT + reset_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + + np_up(pcb, PPP_IP); + pcb->ipcp_is_up = 1; + +#if PPP_NOTIFY + notify(ip_up_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_up_hook) + ip_up_hook(); +#endif /* UNUSED */ +} + + +/* + * ipcp_down - IPCP has gone DOWN. + * + * Take the IP network interface down, clear its addresses + * and delete routes through it. + */ +static void ipcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + + IPCPDEBUG(("ipcp: down")); +#if PPP_STATS_SUPPORT + /* XXX a bit IPv4-centric here, we only need to get the stats + * before the interface is marked down. */ + /* XXX more correct: we must get the stats before running the notifiers, + * at least for the radius plugin */ + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ +#if PPP_NOTIFY + notify(ip_down_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_down_hook) + ip_down_hook(); +#endif /* UNUSED */ + if (pcb->ipcp_is_up) { + pcb->ipcp_is_up = 0; + np_down(pcb, PPP_IP); + } +#if VJ_SUPPORT + sifvjcomp(pcb, 0, 0, 0); +#endif /* VJ_SUPPORT */ + +#if PPP_STATS_SUPPORT + print_link_stats(); /* _after_ running the notifiers and ip_down_hook(), + * because print_link_stats() sets link_stats_valid + * to 0 (zero) */ +#endif /* PPP_STATS_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(pcb, PPP_IP, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + sifdown(pcb); + ipcp_clear_addrs(pcb, go->ouraddr, + ho->hisaddr, 0); +#if LWIP_DNS + cdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } +} + + +/* + * ipcp_clear_addrs() - clear the interface addresses, routes, + * proxy arp entries, etc. + */ +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute) { + LWIP_UNUSED_ARG(replacedefaultroute); + +#if 0 /* UNUSED - PROXY ARP */ + if (pcb->proxy_arp_set) { + cifproxyarp(pcb, hisaddr); + pcb->proxy_arp_set = 0; + } +#endif /* UNUSED - PROXY ARP */ +#if 0 /* UNUSED */ + /* If replacedefaultroute, sifdefaultroute will be called soon + * with replacedefaultroute set and that will overwrite the current + * default route. This is the case only when doing demand, otherwise + * during demand, this cifdefaultroute would restore the old default + * route which is not what we want in this case. In the non-demand + * case, we'll delete the default route and restore the old if there + * is one saved by an sifdefaultroute with replacedefaultroute. + */ + if (!replacedefaultroute && pcb->default_route_set) { + cifdefaultroute(pcb, ouraddr, hisaddr); + pcb->default_route_set = 0; + } +#endif /* UNUSED */ + cifaddr(pcb, ouraddr, hisaddr); +} + + +/* + * ipcp_finished - possibly shut down the lower layers. + */ +static void ipcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (pcb->ipcp_is_open) { + pcb->ipcp_is_open = 0; + np_finished(pcb, PPP_IP); + } +} + + +#if 0 /* UNUSED */ +/* + * create_resolv - create the replacement resolv.conf file + */ +static void +create_resolv(peerdns1, peerdns2) + u32_t peerdns1, peerdns2; +{ + +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipcp_printpkt - print the contents of an IPCP packet. + */ +static const char* const ipcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#if VJ_SUPPORT + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipcp_codenames)) + printer(arg, " %s", ipcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_ADDRS: + if (olen == CILEN_ADDRS) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addrs %I", lwip_htonl(cilong)); + GETLONG(cilong, p); + printer(arg, " %I", lwip_htonl(cilong)); + } + break; +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + switch (cishort) { + case IPCP_VJ_COMP: + printer(arg, "VJ"); + break; + case IPCP_VJ_COMP_OLD: + printer(arg, "old-VJ"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* VJ_SUPPORT */ + case CI_ADDR: + if (olen == CILEN_ADDR) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addr %I", lwip_htonl(cilong)); + } + break; +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-dns%d %I", (code == CI_MS_DNS1? 1: 2), + htonl(cilong)); + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-wins %I", lwip_htonl(cilong)); + break; +#endif /* UNUSED - WINS */ + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ip_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP_HDRLEN 20 /* bytes */ +#define IP_OFFMASK 0x1fff +#ifndef IPPROTO_TCP +#define IPPROTO_TCP 6 +#endif +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define net_short(x) (((x)[0] << 8) + (x)[1]) +#define get_iphl(x) (((unsigned char *)(x))[0] & 0xF) +#define get_ipoff(x) net_short((unsigned char *)(x) + 6) +#define get_ipproto(x) (((unsigned char *)(x))[9]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int +ip_active_pkt(pkt, len) + u_char *pkt; + int len; +{ + u_char *tcp; + int hlen; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP_HDRLEN) + return 0; + if ((get_ipoff(pkt) & IP_OFFMASK) != 0) + return 0; + if (get_ipproto(pkt) != IPPROTO_TCP) + return 1; + hlen = get_iphl(pkt) * 4; + if (len < hlen + TCP_HDRLEN) + return 0; + tcp = pkt + hlen; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == hlen + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c new file mode 100644 index 000000000..11c18df74 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c @@ -0,0 +1,1533 @@ +/* + * ipv6cp.c - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.c,v 1.21 2005/08/25 23:59:34 paulus Exp $ + */ + +/* + * @todo: + * + * Proxy Neighbour Discovery. + * + * Better defines for selecting the ordering of + * interface up / set address. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/ipv6cp.h" +#include "netif/ppp/magic.h" + +/* global vars */ +#if 0 /* UNUSED */ +int no_ifaceid_neg = 0; +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipv6cp_resetci(fsm *f); /* Reset our CI */ +static int ipv6cp_cilen(fsm *f); /* Return length of our CI */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipv6cp_up(fsm *f); /* We're UP */ +static void ipv6cp_down(fsm *f); /* We're DOWN */ +static void ipv6cp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipv6cp_callbacks = { /* IPV6CP callback routines */ + ipv6cp_resetci, /* Reset our Configuration Information */ + ipv6cp_cilen, /* Length of our Configuration Information */ + ipv6cp_addci, /* Add our Configuration Information */ + ipv6cp_ackci, /* ACK our Configuration Information */ + ipv6cp_nakci, /* NAK our Configuration Information */ + ipv6cp_rejci, /* Reject our Configuration Information */ + ipv6cp_reqci, /* Request peer's Configuration Information */ + ipv6cp_up, /* Called when fsm reaches OPENED state */ + ipv6cp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipv6cp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPV6CP" /* String name of protocol */ +}; + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static int setifaceid(char **arg)); +static void printifaceid(option_t *, + void (*)(void *, char *, ...), void *)); + +static option_t ipv6cp_option_list[] = { + { "ipv6", o_special, (void *)setifaceid, + "Set interface identifiers for IPV6", + OPT_A2PRINTER, (void *)printifaceid }, + + { "+ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Enable IPv6 and IPv6CP", OPT_PRIO | 1 }, + { "noipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB }, + { "-ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB | OPT_ALIAS }, + + { "ipv6cp-accept-local", o_bool, &ipv6cp_allowoptions[0].accept_local, + "Accept peer's interface identifier for us", 1 }, + + { "ipv6cp-use-ipaddr", o_bool, &ipv6cp_allowoptions[0].use_ip, + "Use (default) IPv4 address as interface identifier", 1 }, + + { "ipv6cp-use-persistent", o_bool, &ipv6cp_wantoptions[0].use_persistent, + "Use uniquely-available persistent value for link local address", 1 }, + + { "ipv6cp-restart", o_int, &ipv6cp_fsm[0].timeouttime, + "Set timeout for IPv6CP", OPT_PRIO }, + { "ipv6cp-max-terminate", o_int, &ipv6cp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipv6cp-max-configure", o_int, &ipv6cp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipv6cp-max-failure", o_int, &ipv6cp_fsm[0].maxnakloops, + "Set max #conf-naks for IPv6CP", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipv6cp_init(ppp_pcb *pcb); +static void ipv6cp_open(ppp_pcb *pcb); +static void ipv6cp_close(ppp_pcb *pcb, const char *reason); +static void ipv6cp_lowerup(ppp_pcb *pcb); +static void ipv6cp_lowerdown(ppp_pcb *pcb); +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipv6cp_protrej(ppp_pcb *pcb); +#if PPP_OPTIONS +static void ipv6_check_options(void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ipv6_demand_conf(int u); +#endif /* DEMAND_SUPPORT */ +#if PRINTPKT_SUPPORT +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if DEMAND_SUPPORT +static int ipv6_active_pkt(u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ + +const struct protent ipv6cp_protent = { + PPP_IPV6CP, + ipv6cp_init, + ipv6cp_input, + ipv6cp_protrej, + ipv6cp_lowerup, + ipv6cp_lowerdown, + ipv6cp_open, + ipv6cp_close, +#if PRINTPKT_SUPPORT + ipv6cp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPV6CP", + "IPV6", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipv6cp_option_list, + ipv6_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ipv6_demand_conf, + ipv6_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid); +#if 0 /* UNUSED */ +static void ipv6cp_script(char *)); +static void ipv6cp_script_done(void *)); +#endif /* UNUSED */ + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* length for RFC2023 compress opt. */ +#define CILEN_IFACEID 10 /* RFC2472, interface identifier */ + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED */ +/* + * This state variable is used to ensure that we don't + * run an ipcp-up/down script while one is already running. + */ +static enum script_state { + s_down, + s_up, +} ipv6cp_script_state; +static pid_t ipv6cp_script_pid; +#endif /* UNUSED */ + +static char *llv6_ntoa(eui64_t ifaceid); + +#if PPP_OPTIONS +/* + * setifaceid - set the interface identifiers manually + */ +static int +setifaceid(argv) + char **argv; +{ + char *comma, *arg, c; + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + struct in6_addr addr; + static int prio_local, prio_remote; + +#define VALIDID(a) ( (((a).s6_addr32[0] == 0) && ((a).s6_addr32[1] == 0)) && \ + (((a).s6_addr32[2] != 0) || ((a).s6_addr32[3] != 0)) ) + + arg = *argv; + if ((comma = strchr(arg, ',')) == NULL) + comma = arg + strlen(arg); + + /* + * If comma first character, then no local identifier + */ + if (comma != arg) { + c = *comma; + *comma = '\0'; + + if (inet_pton(AF_INET6, arg, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (local): %s", arg); + return 0; + } + + if (option_priority >= prio_local) { + eui64_copy(addr.s6_addr32[2], wo->ourid); + wo->opt_local = 1; + prio_local = option_priority; + } + *comma = c; + } + + /* + * If comma last character, the no remote identifier + */ + if (*comma != 0 && *++comma != '\0') { + if (inet_pton(AF_INET6, comma, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (remote): %s", comma); + return 0; + } + if (option_priority >= prio_remote) { + eui64_copy(addr.s6_addr32[2], wo->hisid); + wo->opt_remote = 1; + prio_remote = option_priority; + } + } + + if (override_value("+ipv6", option_priority, option_source)) + ipv6cp_protent.enabled_flag = 1; + return 1; +} + +static void +printifaceid(opt, printer, arg) + option_t *opt; + void (*printer)(void *, char *, ...)); + void *arg; +{ + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (wo->opt_local) + printer(arg, "%s", llv6_ntoa(wo->ourid)); + printer(arg, ","); + if (wo->opt_remote) + printer(arg, "%s", llv6_ntoa(wo->hisid)); +} +#endif /* PPP_OPTIONS */ + +/* + * Make a string representation of a network address. + */ +static char * +llv6_ntoa(eui64_t ifaceid) +{ + static char b[26]; + + sprintf(b, "fe80::%02x%02x:%02x%02x:%02x%02x:%02x%02x", + ifaceid.e8[0], ifaceid.e8[1], ifaceid.e8[2], ifaceid.e8[3], + ifaceid.e8[4], ifaceid.e8[5], ifaceid.e8[6], ifaceid.e8[7]); + + return b; +} + + +/* + * ipv6cp_init - Initialize IPV6CP. + */ +static void ipv6cp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipv6cp_fsm; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPV6CP; + f->callbacks = &ipv6cp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->accept_local = 1; + wo->neg_ifaceid = 1; + ao->neg_ifaceid = 1; + +#ifdef IPV6CP_COMP + wo->neg_vj = 1; + ao->neg_vj = 1; + wo->vj_protocol = IPV6CP_COMP; +#endif + +} + + +/* + * ipv6cp_open - IPV6CP is allowed to come up. + */ +static void ipv6cp_open(ppp_pcb *pcb) { + fsm_open(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_close - Take IPV6CP down. + */ +static void ipv6cp_close(ppp_pcb *pcb, const char *reason) { + fsm_close(&pcb->ipv6cp_fsm, reason); +} + + +/* + * ipv6cp_lowerup - The lower layer is up. + */ +static void ipv6cp_lowerup(ppp_pcb *pcb) { + fsm_lowerup(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_lowerdown - The lower layer is down. + */ +static void ipv6cp_lowerdown(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_input - Input IPV6CP packet. + */ +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm_input(&pcb->ipv6cp_fsm, p, len); +} + + +/* + * ipv6cp_protrej - A Protocol-Reject was received for IPV6CP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipv6cp_protrej(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_resetci - Reset our CI. + */ +static void ipv6cp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + wo->req_ifaceid = wo->neg_ifaceid && ao->neg_ifaceid; + + if (!wo->opt_local) { + eui64_magic_nz(wo->ourid); + } + + *go = *wo; + eui64_zero(go->hisid); /* last proposed interface identifier */ +} + + +/* + * ipv6cp_cilen - Return length of our CI. + */ +static int ipv6cp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + +#ifdef IPV6CP_COMP +#define LENCIVJ(neg) (neg ? CILEN_COMPRESS : 0) +#endif /* IPV6CP_COMP */ +#define LENCIIFACEID(neg) (neg ? CILEN_IFACEID : 0) + + return (LENCIIFACEID(go->neg_ifaceid) + +#ifdef IPV6CP_COMP + LENCIVJ(go->neg_vj) + +#endif /* IPV6CP_COMP */ + 0); +} + + +/* + * ipv6cp_addci - Add our desired CIs to a packet. + */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + int len = *lenp; + +#ifdef IPV6CP_COMP +#define ADDCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* IPV6CP_COMP */ + +#define ADDCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if (len >= idlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(idlen, ucp); \ + eui64_put(val1, ucp); \ + len -= idlen; \ + } else \ + neg = 0; \ + } + + ADDCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + *lenp -= len; +} + + +/* + * ipv6cp_ackci - Ack our CIs. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_short cilen, citype; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#ifdef IPV6CP_COMP +#define ACKCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#endif /* IPV6CP_COMP */ + +#define ACKCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if ((len -= idlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != idlen || \ + citype != opt) \ + goto bad; \ + eui64_get(ifaceid, p); \ + if (! eui64_equals(val1, ifaceid)) \ + goto bad; \ + } + + ACKCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPV6CPDEBUG(("ipv6cp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipv6cp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPV6CP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char citype, cilen, *next; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options no; /* options we've seen Naks for */ + ipv6cp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIIFACEID(opt, neg, code) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + no.neg = 1; \ + code \ + } + +#ifdef IPV6CP_COMP +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* IPV6CP_COMP */ + + /* + * Accept the peer's idea of {our,his} interface identifier, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIIFACEID(CI_IFACEID, neg_ifaceid, + if (treat_as_reject) { + try_.neg_ifaceid = 0; + } else if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + IPV6CPDEBUG(("local LL address %s", llv6_ntoa(ifaceid))); + } + ); + +#ifdef IPV6CP_COMP + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + { + if (cishort == IPV6CP_COMP && !treat_as_reject) { + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* IPV6CP_COMP */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about interface identifier, we comply. + * If they want us to ask for compression, we refuse. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (go->neg_ifaceid || no.neg_ifaceid || cilen != CILEN_IFACEID) + goto bad; + try_.neg_ifaceid = 1; + eui64_get(ifaceid, p); + if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + } + no.neg_ifaceid = 1; + break; + default: + break; + } + p = next; + } + + /* If there is still anything left, this packet is bad. */ + if (len != 0) + goto bad; + + /* + * OK, the Nak is good. Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipv6cp_rejci - Reject some of our CIs. + */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char cilen; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIIFACEID(opt, neg, val1) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + /* Check rejected value. */ \ + if (! eui64_equals(ifaceid, val1)) \ + goto bad; \ + try_.neg = 0; \ + } + +#ifdef IPV6CP_COMP +#define REJCIVJ(opt, neg, val) \ + if (go->neg && \ + p[1] == CILEN_COMPRESS && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* IPV6CP_COMP */ + + REJCIIFACEID(CI_IFACEID, neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipv6cp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + * + */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#ifdef IPV6CP_COMP + u_short cishort; /* Parsed short value */ +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; /* Parsed interface identifier */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPV6CPDEBUG(("ipv6cp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_IFACEID: + IPV6CPDEBUG(("ipv6cp: received interface identifier ")); + + if (!ao->neg_ifaceid || + cilen != CILEN_IFACEID) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no interface identifier, or if we both have same + * identifier then NAK it with new idea. + * In particular, if we don't know his identifier, but he does, + * then accept it. + */ + eui64_get(ifaceid, p); + IPV6CPDEBUG(("(%s)", llv6_ntoa(ifaceid))); + if (eui64_iszero(ifaceid) && eui64_iszero(go->ourid)) { + orc = CONFREJ; /* Reject CI */ + break; + } + if (!eui64_iszero(wo->hisid) && + !eui64_equals(ifaceid, wo->hisid) && + eui64_iszero(go->hisid)) { + + orc = CONFNAK; + ifaceid = wo->hisid; + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } else + if (eui64_iszero(ifaceid) || eui64_equals(ifaceid, go->ourid)) { + orc = CONFNAK; + if (eui64_iszero(go->hisid)) /* first time, try option */ + ifaceid = wo->hisid; + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->ourid)) /* bad luck */ + eui64_magic(ifaceid); + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } + + ho->neg_ifaceid = 1; + ho->hisid = ifaceid; + break; + +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + IPV6CPDEBUG(("ipv6cp: received COMPRESSTYPE ")); + if (!ao->neg_vj || + (cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + IPV6CPDEBUG(("(%d)", cishort)); + + if (!(cishort == IPV6CP_COMP)) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + break; +#endif /* IPV6CP_COMP */ + + default: + orc = CONFREJ; + break; + } + +endswitch: + IPV6CPDEBUG((" (%s)\n", CODENAME(orc))); + + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their identifier and they didn't send their identifier, then we + * send a NAK with a CI_IFACEID option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_ifaceid && + wo->req_ifaceid && !reject_if_disagree) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_ifaceid = 0; /* don't ask again */ + } + PUTCHAR(CI_IFACEID, ucp); + PUTCHAR(CILEN_IFACEID, ucp); + eui64_put(wo->hisid, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPV6CPDEBUG(("ipv6cp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + +#if PPP_OPTIONS +/* + * ipv6_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void ipv6_check_options() { + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (!ipv6cp_protent.enabled_flag) + return; + + /* + * Persistent link-local id is only used when user has not explicitly + * configure/hard-code the id + */ + if ((wo->use_persistent) && (!wo->opt_local) && (!wo->opt_remote)) { + + /* + * On systems where there are no Ethernet interfaces used, there + * may be other ways to obtain a persistent id. Right now, it + * will fall back to using magic [see eui64_magic] below when + * an EUI-48 from MAC address can't be obtained. Other possibilities + * include obtaining EEPROM serial numbers, or some other unique + * yet persistent number. On Sparc platforms, this is possible, + * but too bad there's no standards yet for x86 machines. + */ + if (ether_to_eui64(&wo->ourid)) { + wo->opt_local = 1; + } + } + + if (!wo->opt_local) { /* init interface identifier */ + if (wo->use_ip && eui64_iszero(wo->ourid)) { + eui64_setlo32(wo->ourid, lwip_ntohl(ipcp_wantoptions[0].ouraddr)); + if (!eui64_iszero(wo->ourid)) + wo->opt_local = 1; + } + + while (eui64_iszero(wo->ourid)) + eui64_magic(wo->ourid); + } + + if (!wo->opt_remote) { + if (wo->use_ip && eui64_iszero(wo->hisid)) { + eui64_setlo32(wo->hisid, lwip_ntohl(ipcp_wantoptions[0].hisaddr)); + if (!eui64_iszero(wo->hisid)) + wo->opt_remote = 1; + } + } + + if (demand && (eui64_iszero(wo->ourid) || eui64_iszero(wo->hisid))) { + option_error("local/remote LL address required for demand-dialling\n"); + exit(1); + } +} +#endif /* PPP_OPTIONS */ + +#if DEMAND_SUPPORT +/* + * ipv6_demand_conf - configure the interface as though + * IPV6CP were up, for use with dial-on-demand. + */ +static int ipv6_demand_conf(int u) { + ipv6cp_options *wo = &ipv6cp_wantoptions[u]; + + if (!sif6up(u)) + return 0; + + if (!sif6addr(u, wo->ourid, wo->hisid)) + return 0; + + if (!sifnpmode(u, PPP_IPV6, NPMODE_QUEUE)) + return 0; + + ppp_notice("ipv6_demand_conf"); + ppp_notice("local LL address %s", llv6_ntoa(wo->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(wo->hisid)); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + + +/* + * ipv6cp_up - IPV6CP has come UP. + * + * Configure the IPv6 network interface appropriately and bring it up. + */ +static void ipv6cp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + + IPV6CPDEBUG(("ipv6cp: up")); + + /* + * We must have a non-zero LL address for both ends of the link. + */ + if (!ho->neg_ifaceid) + ho->hisid = wo->hisid; + +#if 0 /* UNUSED */ + if(!no_ifaceid_neg) { +#endif /* UNUSED */ + if (eui64_iszero(ho->hisid)) { + ppp_error("Could not determine remote LL address"); + ipv6cp_close(f->pcb, "Could not determine remote LL address"); + return; + } + if (eui64_iszero(go->ourid)) { + ppp_error("Could not determine local LL address"); + ipv6cp_close(f->pcb, "Could not determine local LL address"); + return; + } + if (eui64_equals(go->ourid, ho->hisid)) { + ppp_error("local and remote LL addresses are equal"); + ipv6cp_close(f->pcb, "local and remote LL addresses are equal"); + return; + } +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ +#if 0 /* UNUSED */ + script_setenv("LLLOCAL", llv6_ntoa(go->ourid), 0); + script_setenv("LLREMOTE", llv6_ntoa(ho->hisid), 0); +#endif /* UNUSED */ + +#ifdef IPV6CP_COMP + /* set tcp compression */ + sif6comp(f->unit, ho->neg_vj); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IPv6 packets. + */ + if (demand) { + if (! eui64_equals(go->ourid, wo->ourid) || + ! eui64_equals(ho->hisid, wo->hisid)) { + if (! eui64_equals(go->ourid, wo->ourid)) + warn("Local LL address changed to %s", + llv6_ntoa(go->ourid)); + if (! eui64_equals(ho->hisid, wo->hisid)) + warn("Remote LL address changed to %s", + llv6_ntoa(ho->hisid)); + ipv6cp_clear_addrs(f->pcb, go->ourid, ho->hisid); + + /* Set the interface to the new addresses */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + if (debug) + warn("sif6addr failed"); + ipv6cp_close(f->unit, "Interface configuration failed"); + return; + } + + } + demand_rexmit(PPP_IPV6); + sifnpmode(f->unit, PPP_IPV6, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set LL addresses + */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + PPPDEBUG(LOG_DEBUG, ("sif6addr failed")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } + + /* bring the interface up for IPv6 */ + if (!sif6up(f->pcb)) { + PPPDEBUG(LOG_DEBUG, ("sif6up failed (IPV6)")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + + ppp_notice("local LL address %s", llv6_ntoa(go->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(ho->hisid)); + } + + np_up(f->pcb, PPP_IPV6); + pcb->ipv6cp_is_up = 1; + +#if 0 /* UNUSED */ + /* + * Execute the ipv6-up script, like this: + * /etc/ppp/ipv6-up interface tty speed local-LL remote-LL + */ + if (ipv6cp_script_state == s_down && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_down - IPV6CP has gone DOWN. + * + * Take the IPv6 network interface down, clear its addresses + * and delete routes through it. + */ +static void ipv6cp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + + IPV6CPDEBUG(("ipv6cp: down")); +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + if (pcb->ipv6cp_is_up) { + pcb->ipv6cp_is_up = 0; + np_down(f->pcb, PPP_IPV6); + } +#ifdef IPV6CP_COMP + sif6comp(f->unit, 0); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(f->pcb, PPP_IPV6, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + ipv6cp_clear_addrs(f->pcb, + go->ourid, + ho->hisid); + sif6down(f->pcb); + } + +#if 0 /* UNUSED */ + /* Execute the ipv6-down script */ + if (ipv6cp_script_state == s_up && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_clear_addrs() - clear the interface addresses, routes, + * proxy neighbour discovery entries, etc. + */ +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid) { + cif6addr(pcb, ourid, hisid); +} + + +/* + * ipv6cp_finished - possibly shut down the lower layers. + */ +static void ipv6cp_finished(fsm *f) { + np_finished(f->pcb, PPP_IPV6); +} + + +#if 0 /* UNUSED */ +/* + * ipv6cp_script_done - called when the ipv6-up or ipv6-down script + * has finished. + */ +static void +ipv6cp_script_done(arg) + void *arg; +{ + ipv6cp_script_pid = 0; + switch (ipv6cp_script_state) { + case s_up: + if (ipv6cp_fsm[0].state != PPP_FSM_OPENED) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } + break; + case s_down: + if (ipv6cp_fsm[0].state == PPP_FSM_OPENED) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } + break; + } +} + + +/* + * ipv6cp_script - Execute a script with arguments + * interface-name tty-name speed local-LL remote-LL. + */ +static void +ipv6cp_script(script) + char *script; +{ + char strspeed[32], strlocal[32], strremote[32]; + char *argv[8]; + + sprintf(strspeed, "%d", baud_rate); + strcpy(strlocal, llv6_ntoa(ipv6cp_gotoptions[0].ourid)); + strcpy(strremote, llv6_ntoa(ipv6cp_hisoptions[0].hisid)); + + argv[0] = script; + argv[1] = ifname; + argv[2] = devnam; + argv[3] = strspeed; + argv[4] = strlocal; + argv[5] = strremote; + argv[6] = ipparam; + argv[7] = NULL; + + ipv6cp_script_pid = run_program(script, argv, 0, ipv6cp_script_done, + NULL, 0); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipv6cp_printpkt - print the contents of an IPV6CP packet. + */ +static const char* const ipv6cp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipv6cp_codenames)) + printer(arg, " %s", ipv6cp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + printer(arg, "0x%x", cishort); + } + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (olen == CILEN_IFACEID) { + p += 2; + eui64_get(ifaceid, p); + printer(arg, "addr %s", llv6_ntoa(ifaceid)); + } + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ipv6_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP6_HDRLEN 40 /* bytes */ +#define IP6_NHDR_FRAG 44 /* fragment IPv6 header */ +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define get_ip6nh(x) (((unsigned char *)(x))[6]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int ipv6_active_pkt(u_char *pkt, int len) { + u_char *tcp; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP6_HDRLEN) + return 0; + if (get_ip6nh(pkt) == IP6_NHDR_FRAG) + return 0; + if (get_ip6nh(pkt) != IPPROTO_TCP) + return 1; + if (len < IP6_HDRLEN + TCP_HDRLEN) + return 0; + tcp = pkt + IP6_HDRLEN; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == IP6_HDRLEN + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c new file mode 100644 index 000000000..90ed183b7 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c @@ -0,0 +1,2790 @@ +/* + * lcp.c - PPP Link Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#include "netif/ppp/magic.h" + +/* + * When the link comes up we want to be able to wait for a short while, + * or until seeing some input from the peer, before starting to send + * configure-requests. We do this by delaying the fsm_lowerup call. + */ +/* steal a bit in fsm flags word */ +#define DELAYED_UP 0x80 + +static void lcp_delayed_up(void *arg); + +/* + * LCP-related command-line options. + */ +#if 0 /* UNUSED */ +int lcp_echo_interval = 0; /* Interval between LCP echo-requests */ +int lcp_echo_fails = 0; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* options */ +static u_int lcp_echo_interval = LCP_ECHOINTERVAL; /* Interval between LCP echo-requests */ +static u_int lcp_echo_fails = LCP_MAXECHOFAILS; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +#if PPP_LCP_ADAPTIVE +bool lcp_echo_adaptive = 0; /* request echo only if the link was idle */ +#endif +bool lax_recv = 0; /* accept control chars in asyncmap */ +bool noendpoint = 0; /* don't send/accept endpoint discriminator */ +#endif /* UNUSED */ + +#if PPP_OPTIONS +static int noopt (char **); +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int setendpoint (char **); +static void printendpoint (option_t *, void (*)(void *, char *, ...), + void *); +#endif /* HAVE_MULTILINK */ + +#if PPP_OPTIONS +static option_t lcp_option_list[] = { + /* LCP options */ + { "-all", o_special_noarg, (void *)noopt, + "Don't request/allow any LCP options" }, + + { "noaccomp", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + { "-ac", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + + { "asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "-as", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_ALIAS | OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "default-asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + { "-am", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_ALIAS | OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + + { "nomagic", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + { "-mn", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + + { "mru", o_int, &lcp_wantoptions[0].mru, + "Set MRU (maximum received packet size) for negotiation", + OPT_PRIO, &lcp_wantoptions[0].neg_mru }, + { "default-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + { "-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + + { "mtu", o_int, &lcp_allowoptions[0].mru, + "Set our MTU", OPT_LIMITS, NULL, MAXMRU, MINMRU }, + + { "nopcomp", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + { "-pc", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + + { "passive", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", 1 }, + { "-p", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", OPT_ALIAS | 1 }, + + { "silent", o_bool, &lcp_wantoptions[0].silent, + "Set silent mode", 1 }, + + { "lcp-echo-failure", o_int, &lcp_echo_fails, + "Set number of consecutive echo failures to indicate link failure", + OPT_PRIO }, + { "lcp-echo-interval", o_int, &lcp_echo_interval, + "Set time in seconds between LCP echo requests", OPT_PRIO }, +#if PPP_LCP_ADAPTIVE + { "lcp-echo-adaptive", o_bool, &lcp_echo_adaptive, + "Suppress LCP echo requests if traffic was received", 1 }, +#endif + { "lcp-restart", o_int, &lcp_fsm[0].timeouttime, + "Set time in seconds between LCP retransmissions", OPT_PRIO }, + { "lcp-max-terminate", o_int, &lcp_fsm[0].maxtermtransmits, + "Set maximum number of LCP terminate-request transmissions", OPT_PRIO }, + { "lcp-max-configure", o_int, &lcp_fsm[0].maxconfreqtransmits, + "Set maximum number of LCP configure-request transmissions", OPT_PRIO }, + { "lcp-max-failure", o_int, &lcp_fsm[0].maxnakloops, + "Set limit on number of LCP configure-naks", OPT_PRIO }, + + { "receive-all", o_bool, &lax_recv, + "Accept all received control characters", 1 }, + +#ifdef HAVE_MULTILINK + { "mrru", o_int, &lcp_wantoptions[0].mrru, + "Maximum received packet size for multilink bundle", + OPT_PRIO, &lcp_wantoptions[0].neg_mrru }, + + { "mpshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Use short sequence numbers in multilink headers", + OPT_PRIO | 1, &lcp_allowoptions[0].neg_ssnhf }, + { "nompshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Don't use short sequence numbers in multilink headers", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_ssnhf }, + + { "endpoint", o_special, (void *) setendpoint, + "Endpoint discriminator for multilink", + OPT_PRIO | OPT_A2PRINTER, (void *) printendpoint }, +#endif /* HAVE_MULTILINK */ + + { "noendpoint", o_bool, &noendpoint, + "Don't send or accept multilink endpoint discriminator", 1 }, + + {NULL} +}; +#endif /* PPP_OPTIONS */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void lcp_resetci(fsm *f); /* Reset our CI */ +static int lcp_cilen(fsm *f); /* Return length of our CI */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI to pkt */ +static int lcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int lcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree); /* Rcv peer CI */ +static void lcp_up(fsm *f); /* We're UP */ +static void lcp_down(fsm *f); /* We're DOWN */ +static void lcp_starting (fsm *); /* We need lower layer up */ +static void lcp_finished (fsm *); /* We need lower layer down */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len); +static void lcp_rprotrej(fsm *f, u_char *inp, int len); + +/* + * routines to send LCP echos to peer + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb); +static void lcp_echo_lowerdown(ppp_pcb *pcb); +static void LcpEchoTimeout(void *arg); +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len); +static void LcpSendEchoRequest(fsm *f); +static void LcpLinkFailure(fsm *f); +static void LcpEchoCheck(fsm *f); + +static const fsm_callbacks lcp_callbacks = { /* LCP callback routines */ + lcp_resetci, /* Reset our Configuration Information */ + lcp_cilen, /* Length of our Configuration Information */ + lcp_addci, /* Add our Configuration Information */ + lcp_ackci, /* ACK our Configuration Information */ + lcp_nakci, /* NAK our Configuration Information */ + lcp_rejci, /* Reject our Configuration Information */ + lcp_reqci, /* Request peer's Configuration Information */ + lcp_up, /* Called when fsm reaches OPENED state */ + lcp_down, /* Called when fsm leaves OPENED state */ + lcp_starting, /* Called when we want the lower layer up */ + lcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + lcp_extcode, /* Called to handle LCP-specific codes */ + "LCP" /* String name of protocol */ +}; + +/* + * Protocol entry points. + * Some of these are called directly. + */ + +static void lcp_init(ppp_pcb *pcb); +static void lcp_input(ppp_pcb *pcb, u_char *p, int len); +static void lcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent lcp_protent = { + PPP_LCP, + lcp_init, + lcp_input, + lcp_protrej, + lcp_lowerup, + lcp_lowerdown, + lcp_open, + lcp_close, +#if PRINTPKT_SUPPORT + lcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "LCP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + lcp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Length of each type of configuration option (in octets) + */ +#define CILEN_VOID 2 +#define CILEN_CHAR 3 +#define CILEN_SHORT 4 /* CILEN_VOID + 2 */ +#if CHAP_SUPPORT +#define CILEN_CHAP 5 /* CILEN_VOID + 2 + 1 */ +#endif /* CHAP_SUPPORT */ +#define CILEN_LONG 6 /* CILEN_VOID + 4 */ +#if LQR_SUPPORT +#define CILEN_LQR 8 /* CILEN_VOID + 2 + 4 */ +#endif /* LQR_SUPPORT */ +#define CILEN_CBCP 3 + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if PPP_OPTIONS +/* + * noopt - Disable all options (why?). + */ +static int +noopt(argv) + char **argv; +{ + BZERO((char *) &lcp_wantoptions[0], sizeof (struct lcp_options)); + BZERO((char *) &lcp_allowoptions[0], sizeof (struct lcp_options)); + + return (1); +} +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int +setendpoint(argv) + char **argv; +{ + if (str_to_epdisc(&lcp_wantoptions[0].endpoint, *argv)) { + lcp_wantoptions[0].neg_endpoint = 1; + return 1; + } + option_error("Can't parse '%s' as an endpoint discriminator", *argv); + return 0; +} + +static void +printendpoint(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + printer(arg, "%s", epdisc_to_str(&lcp_wantoptions[0].endpoint)); +} +#endif /* HAVE_MULTILINK */ + +/* + * lcp_init - Initialize LCP. + */ +static void lcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_LCP; + f->callbacks = &lcp_callbacks; + + fsm_init(f); + + BZERO(wo, sizeof(*wo)); + wo->neg_mru = 1; + wo->mru = PPP_DEFMRU; + wo->neg_asyncmap = 1; + wo->neg_magicnumber = 1; + wo->neg_pcompression = 1; + wo->neg_accompression = 1; + + BZERO(ao, sizeof(*ao)); + ao->neg_mru = 1; + ao->mru = PPP_MAXMRU; + ao->neg_asyncmap = 1; +#if CHAP_SUPPORT + ao->neg_chap = 1; + ao->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ao->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 1; +#endif /* EAP_SUPPORT */ + ao->neg_magicnumber = 1; + ao->neg_pcompression = 1; + ao->neg_accompression = 1; + ao->neg_endpoint = 1; +} + + +/* + * lcp_open - LCP is allowed to come up. + */ +void lcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + + f->flags &= ~(OPT_PASSIVE | OPT_SILENT); + if (wo->passive) + f->flags |= OPT_PASSIVE; + if (wo->silent) + f->flags |= OPT_SILENT; + fsm_open(f); +} + + +/* + * lcp_close - Take LCP down. + */ +void lcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->lcp_fsm; + int oldstate; + + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_TERMINATE); + + if (f->flags & DELAYED_UP) { + UNTIMEOUT(lcp_delayed_up, f); + f->state = PPP_FSM_STOPPED; + } + oldstate = f->state; + + fsm_close(f, reason); + if (oldstate == PPP_FSM_STOPPED && (f->flags & (OPT_PASSIVE|OPT_SILENT|DELAYED_UP))) { + /* + * This action is not strictly according to the FSM in RFC1548, + * but it does mean that the program terminates if you do a + * lcp_close() when a connection hasn't been established + * because we are in passive/silent mode or because we have + * delayed the fsm_lowerup() call and it hasn't happened yet. + */ + f->flags &= ~DELAYED_UP; + lcp_finished(f); + } +} + + +/* + * lcp_lowerup - The lower layer is up. + */ +void lcp_lowerup(ppp_pcb *pcb) { + lcp_options *wo = &pcb->lcp_wantoptions; + fsm *f = &pcb->lcp_fsm; + /* + * Don't use A/C or protocol compression on transmission, + * but accept A/C and protocol compressed packets + * if we are going to ask for A/C and protocol compression. + */ + if (ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (pcb->settings.lax_recv? 0: 0xffffffff), + wo->neg_pcompression, wo->neg_accompression) < 0) + return; + pcb->peer_mru = PPP_MRU; + + if (pcb->settings.listen_time != 0) { + f->flags |= DELAYED_UP; + TIMEOUTMS(lcp_delayed_up, f, pcb->settings.listen_time); + } else + fsm_lowerup(f); +} + + +/* + * lcp_lowerdown - The lower layer is down. + */ +void lcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + } else + fsm_lowerdown(f); +} + + +/* + * lcp_delayed_up - Bring the lower layer up now. + */ +static void lcp_delayed_up(void *arg) { + fsm *f = (fsm*)arg; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + fsm_lowerup(f); + } +} + + +/* + * lcp_input - Input LCP packet. + */ +static void lcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + fsm_lowerup(f); + } + fsm_input(f, p, len); +} + +/* + * lcp_extcode - Handle a LCP-specific code. + */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *magp; + + switch( code ){ + case PROTREJ: + lcp_rprotrej(f, inp, len); + break; + + case ECHOREQ: + if (f->state != PPP_FSM_OPENED) + break; + magp = inp; + PUTLONG(go->magicnumber, magp); + fsm_sdata(f, ECHOREP, id, inp, len); + break; + + case ECHOREP: + lcp_received_echo_reply(f, id, inp, len); + break; + + case DISCREQ: + case IDENTIF: + case TIMEREM: + break; + + default: + return 0; + } + return 1; +} + + +/* + * lcp_rprotrej - Receive an Protocol-Reject. + * + * Figure out which protocol is rejected and inform it. + */ +static void lcp_rprotrej(fsm *f, u_char *inp, int len) { + int i; + const struct protent *protp; + u_short prot; +#if PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_PROTOCOLNAME */ + + if (len < 2) { + LCPDEBUG(("lcp_rprotrej: Rcvd short Protocol-Reject packet!")); + return; + } + + GETSHORT(prot, inp); + + /* + * Protocol-Reject packets received in any state other than the LCP + * OPENED state SHOULD be silently discarded. + */ + if( f->state != PPP_FSM_OPENED ){ + LCPDEBUG(("Protocol-Reject discarded: LCP in state %d", f->state)); + return; + } + +#if PPP_PROTOCOLNAME + pname = protocol_name(prot); +#endif /* PPP_PROTOCOLNAME */ + + /* + * Upcall the proper Protocol-Reject routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol == prot) { +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_dbglog("Protocol-Reject for '%s' (0x%x) received", pname, + prot); + else +#endif /* PPP_PROTOCOLNAME */ + ppp_dbglog("Protocol-Reject for 0x%x received", prot); + (*protp->protrej)(f->pcb); + return; + } + +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_warn("Protocol-Reject for unsupported protocol '%s' (0x%x)", pname, + prot); + else +#endif /* #if PPP_PROTOCOLNAME */ + ppp_warn("Protocol-Reject for unsupported protocol 0x%x", prot); +} + + +/* + * lcp_protrej - A Protocol-Reject was received. + */ +/*ARGSUSED*/ +static void lcp_protrej(ppp_pcb *pcb) { + /* + * Can't reject LCP! + */ + ppp_error("Received Protocol-Reject for LCP!"); + fsm_protreject(&pcb->lcp_fsm); +} + + +/* + * lcp_sprotrej - Send a Protocol-Reject for some protocol. + */ +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + /* + * Send back the protocol and the information field of the + * rejected packet. We only get here if LCP is in the OPENED state. + */ +#if 0 + p += 2; + len -= 2; +#endif + + fsm_sdata(f, PROTREJ, ++f->id, + p, len); +} + + +/* + * lcp_resetci - Reset our CI. + */ +static void lcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + +#if PPP_AUTH_SUPPORT + + /* note: default value is true for allow options */ + if (pcb->settings.user && pcb->settings.passwd) { +#if PAP_SUPPORT + if (pcb->settings.refuse_pap) { + ao->neg_upap = 0; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (pcb->settings.refuse_chap) { + ao->chap_mdtype &= ~MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (pcb->settings.refuse_mschap) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT; + } + if (pcb->settings.refuse_mschap_v2) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + ao->neg_chap = (ao->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (pcb->settings.refuse_eap) { + ao->neg_eap = 0; + } +#endif /* EAP_SUPPORT */ + +#if PPP_SERVER + /* note: default value is false for wanted options */ + if (pcb->settings.auth_required) { +#if PAP_SUPPORT + if (!pcb->settings.refuse_pap) { + wo->neg_upap = 1; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (!pcb->settings.refuse_chap) { + wo->chap_mdtype |= MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (!pcb->settings.refuse_mschap) { + wo->chap_mdtype |= MDTYPE_MICROSOFT; + } + if (!pcb->settings.refuse_mschap_v2) { + wo->chap_mdtype |= MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + wo->neg_chap = (wo->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (!pcb->settings.refuse_eap) { + wo->neg_eap = 1; + } +#endif /* EAP_SUPPORT */ + } +#endif /* PPP_SERVER */ + + } else { +#if PAP_SUPPORT + ao->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + ao->neg_chap = 0; + ao->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + PPPDEBUG(LOG_DEBUG, ("ppp: auth protocols:")); +#if PAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" PAP=%d", ao->neg_upap)); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP=%d CHAP_MD5=%d", ao->neg_chap, !!(ao->chap_mdtype&MDTYPE_MD5))); +#if MSCHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP_MS=%d CHAP_MS2=%d", !!(ao->chap_mdtype&MDTYPE_MICROSOFT), !!(ao->chap_mdtype&MDTYPE_MICROSOFT_V2))); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" EAP=%d", ao->neg_eap)); +#endif /* EAP_SUPPORT */ + PPPDEBUG(LOG_DEBUG, ("\n")); + +#endif /* PPP_AUTH_SUPPORT */ + + wo->magicnumber = magic(); + wo->numloops = 0; + *go = *wo; +#ifdef HAVE_MULTILINK + if (!multilink) { + go->neg_mrru = 0; +#endif /* HAVE_MULTILINK */ + go->neg_ssnhf = 0; + go->neg_endpoint = 0; +#ifdef HAVE_MULTILINK + } +#endif /* HAVE_MULTILINK */ + if (pcb->settings.noendpoint) + ao->neg_endpoint = 0; + pcb->peer_mru = PPP_MRU; +#if 0 /* UNUSED */ + auth_reset(pcb); +#endif /* UNUSED */ +} + + +/* + * lcp_cilen - Return length of our CI. + */ +static int lcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + +#define LENCIVOID(neg) ((neg) ? CILEN_VOID : 0) +#if CHAP_SUPPORT +#define LENCICHAP(neg) ((neg) ? CILEN_CHAP : 0) +#endif /* CHAP_SUPPORT */ +#define LENCISHORT(neg) ((neg) ? CILEN_SHORT : 0) +#define LENCILONG(neg) ((neg) ? CILEN_LONG : 0) +#if LQR_SUPPORT +#define LENCILQR(neg) ((neg) ? CILEN_LQR: 0) +#endif /* LQR_SUPPORT */ +#define LENCICBCP(neg) ((neg) ? CILEN_CBCP: 0) + /* + * NB: we only ask for one of CHAP, UPAP, or EAP, even if we will + * accept more than one. We prefer EAP first, then CHAP, then + * PAP. + */ + return (LENCISHORT(go->neg_mru && go->mru != PPP_DEFMRU) + + LENCILONG(go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + +#if EAP_SUPPORT + LENCISHORT(go->neg_eap) + +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + LENCICHAP(!go->neg_eap && go->neg_chap) + +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + LENCICHAP(go->neg_chap) + +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_eap && !go->neg_chap && go->neg_upap) + +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(!go->neg_eap && go->neg_upap) + +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_chap && go->neg_upap) + +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(go->neg_upap) + +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + LENCILQR(go->neg_lqr) + +#endif /* LQR_SUPPORT */ + LENCICBCP(go->neg_cbcp) + + LENCILONG(go->neg_magicnumber) + + LENCIVOID(go->neg_pcompression) + + LENCIVOID(go->neg_accompression) + +#ifdef HAVE_MULTILINK + LENCISHORT(go->neg_mrru) + +#endif /* HAVE_MULTILINK */ + LENCIVOID(go->neg_ssnhf) + + (go->neg_endpoint? CILEN_CHAR + go->endpoint.length: 0)); +} + + +/* + * lcp_addci - Add our desired CIs to a packet. + */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *start_ucp = ucp; + +#define ADDCIVOID(opt, neg) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_VOID, ucp); \ + } +#define ADDCISHORT(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_SHORT, ucp); \ + PUTSHORT(val, ucp); \ + } +#if CHAP_SUPPORT +#define ADDCICHAP(opt, neg, val) \ + if (neg) { \ + PUTCHAR((opt), ucp); \ + PUTCHAR(CILEN_CHAP, ucp); \ + PUTSHORT(PPP_CHAP, ucp); \ + PUTCHAR((CHAP_DIGEST(val)), ucp); \ + } +#endif /* CHAP_SUPPORT */ +#define ADDCILONG(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LONG, ucp); \ + PUTLONG(val, ucp); \ + } +#if LQR_SUPPORT +#define ADDCILQR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LQR, ucp); \ + PUTSHORT(PPP_LQR, ucp); \ + PUTLONG(val, ucp); \ + } +#endif /* LQR_SUPPORT */ +#define ADDCICHAR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR, ucp); \ + PUTCHAR(val, ucp); \ + } +#define ADDCIENDP(opt, neg, class, val, len) \ + if (neg) { \ + int i; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR + len, ucp); \ + PUTCHAR(class, ucp); \ + for (i = 0; i < len; ++i) \ + PUTCHAR(val[i], ucp); \ + } + + ADDCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ADDCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ADDCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ADDCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ADDCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ADDCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ADDCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ADDCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif + ADDCIVOID(CI_SSNHF, go->neg_ssnhf); + ADDCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + if (ucp - start_ucp != *lenp) { + /* this should never happen, because peer_mtu should be 1500 */ + ppp_error("Bug in lcp_addci: wrong length"); + } +} + + +/* + * lcp_ackci - Ack our CIs. + * This should not modify any state if the Ack is bad. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int lcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cilen, citype, cichar; + u_short cishort; + u32_t cilong; + + /* + * CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define ACKCIVOID(opt, neg) \ + if (neg) { \ + if ((len -= CILEN_VOID) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_VOID || \ + citype != opt) \ + goto bad; \ + } +#define ACKCISHORT(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_SHORT) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_SHORT || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#define ACKCICHAR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != val) \ + goto bad; \ + } +#if CHAP_SUPPORT +#define ACKCICHAP(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAP) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAP || \ + citype != (opt)) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_CHAP) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != (CHAP_DIGEST(val))) \ + goto bad; \ + } +#endif /* CHAP_SUPPORT */ +#define ACKCILONG(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LONG) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LONG || \ + citype != opt) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#if LQR_SUPPORT +#define ACKCILQR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LQR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LQR || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_LQR) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#endif /* LQR_SUPPORT */ +#define ACKCIENDP(opt, neg, class, val, vlen) \ + if (neg) { \ + int i; \ + if ((len -= CILEN_CHAR + vlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR + vlen || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + } + + ACKCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ACKCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ACKCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ACKCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ACKCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ACKCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ACKCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ACKCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + ACKCIVOID(CI_SSNHF, go->neg_ssnhf); + ACKCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); +bad: + LCPDEBUG(("lcp_acki: received bad Ack!")); + return (0); +} + + +/* + * lcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *wo = &pcb->lcp_wantoptions; + u_char citype, cichar, *next; + u_short cishort; + u32_t cilong; + lcp_options no; /* options we've seen Naks for */ + lcp_options try_; /* options to request next time */ + int looped_back = 0; + int cilen; + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + no.neg = 1; \ + try_.neg = 0; \ + } +#if CHAP_SUPPORT +#define NAKCICHAP(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#endif /* CHAP_SUPPORT */ +#define NAKCICHAR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[1] == CILEN_CHAR && \ + p[0] == opt) { \ + len -= CILEN_CHAR; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#define NAKCISHORT(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#define NAKCILONG(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#if LQR_SUPPORT +#define NAKCILQR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#endif /* LQR_SUPPORT */ +#define NAKCIENDP(opt, neg) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[0] == opt && \ + p[1] >= CILEN_CHAR && \ + p[1] <= len) { \ + len -= p[1]; \ + INCPTR(p[1], p); \ + no.neg = 1; \ + try_.neg = 0; \ + } + + /* + * NOTE! There must be no assignments to individual fields of *go in + * the code below. Any such assignment is a BUG! + */ + /* + * We don't care if they want to send us smaller packets than + * we want. Therefore, accept any MRU less than what we asked for, + * but then ignore the new value when setting the MRU in the kernel. + * If they send us a bigger MRU than what we asked, accept it, up to + * the limit of the default MRU we'd get if we didn't negotiate. + */ + if (go->neg_mru && go->mru != PPP_DEFMRU) { + NAKCISHORT(CI_MRU, neg_mru, + if (cishort <= wo->mru || cishort <= PPP_DEFMRU) + try_.mru = cishort; + ); + } + + /* + * Add any characters they want to our (receive-side) asyncmap. + */ + if (go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) { + NAKCILONG(CI_ASYNCMAP, neg_asyncmap, + try_.asyncmap = go->asyncmap | cilong; + ); + } + + /* + * If they've nak'd our authentication-protocol, check whether + * they are proposing a different protocol, or a different + * hash algorithm for CHAP. + */ + if ((0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) + && len >= CILEN_SHORT + && p[0] == CI_AUTHTYPE && p[1] >= CILEN_SHORT && p[1] <= len) { + cilen = p[1]; + len -= cilen; +#if CHAP_SUPPORT + no.neg_chap = go->neg_chap; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + no.neg_upap = go->neg_upap; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + no.neg_eap = go->neg_eap; +#endif /* EAP_SUPPORT */ + INCPTR(2, p); + GETSHORT(cishort, p); + +#if PAP_SUPPORT + if (cishort == PPP_PAP && cilen == CILEN_SHORT) { +#if EAP_SUPPORT + /* If we were asking for EAP, then we need to stop that. */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + /* If we were asking for CHAP, then we need to stop that. */ + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + + /* + * If we weren't asking for CHAP or EAP, then we were asking for + * PAP, in which case this Nak is bad. + */ + goto bad; + } else +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + if (cishort == PPP_CHAP && cilen == CILEN_CHAP) { + GETCHAR(cichar, p); +#if EAP_SUPPORT + /* Stop asking for EAP, if we were. */ + if (go->neg_eap) { + try_.neg_eap = 0; + /* Try to set up to use their suggestion, if possible */ + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else +#endif /* EAP_SUPPORT */ + if (go->neg_chap) { + /* + * We were asking for our preferred algorithm, they must + * want something different. + */ + if (cichar != CHAP_DIGEST(go->chap_mdtype)) { + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) { + /* Use their suggestion if we support it ... */ + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else { + /* ... otherwise, try our next-preferred algorithm. */ + try_.chap_mdtype &= ~(CHAP_MDTYPE(try_.chap_mdtype)); + if (try_.chap_mdtype == MDTYPE_NONE) /* out of algos */ + try_.neg_chap = 0; + } + } else { + /* + * Whoops, they Nak'd our algorithm of choice + * but then suggested it back to us. + */ + goto bad; + } + } else { + /* + * Stop asking for PAP if we were asking for it. + */ +#if PAP_SUPPORT + try_.neg_upap = 0; +#endif /* PAP_SUPPORT */ + } + + } else +#endif /* CHAP_SUPPORT */ + { + +#if EAP_SUPPORT + /* + * If we were asking for EAP, and they're Conf-Naking EAP, + * well, that's just strange. Nobody should do that. + */ + if (cishort == PPP_EAP && cilen == CILEN_SHORT && go->neg_eap) + ppp_dbglog("Unexpected Conf-Nak for EAP"); + + /* + * We don't recognize what they're suggesting. + * Stop asking for what we were asking for. + */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + +#if PAP_SUPPORT + if(1) + try_.neg_upap = 0; + else +#endif /* PAP_SUPPORT */ + {} + + p += cilen - CILEN_SHORT; + } + } + +#if LQR_SUPPORT + /* + * If they can't cope with our link quality protocol, we'll have + * to stop asking for LQR. We haven't got any other protocol. + * If they Nak the reporting period, take their value XXX ? + */ + NAKCILQR(CI_QUALITY, neg_lqr, + if (cishort != PPP_LQR) + try_.neg_lqr = 0; + else + try_.lqr_period = cilong; + ); +#endif /* LQR_SUPPORT */ + + /* + * Only implementing CBCP...not the rest of the callback options + */ + NAKCICHAR(CI_CALLBACK, neg_cbcp, + try_.neg_cbcp = 0; + (void)cichar; /* if CHAP support is not compiled, cichar is set but not used, which makes some compilers complaining */ + ); + + /* + * Check for a looped-back line. + */ + NAKCILONG(CI_MAGICNUMBER, neg_magicnumber, + try_.magicnumber = magic(); + looped_back = 1; + ); + + /* + * Peer shouldn't send Nak for protocol compression or + * address/control compression requests; they should send + * a Reject instead. If they send a Nak, treat it as a Reject. + */ + NAKCIVOID(CI_PCOMPRESSION, neg_pcompression); + NAKCIVOID(CI_ACCOMPRESSION, neg_accompression); + +#ifdef HAVE_MULTILINK + /* + * Nak for MRRU option - accept their value if it is smaller + * than the one we want. + */ + if (go->neg_mrru) { + NAKCISHORT(CI_MRRU, neg_mrru, + if (treat_as_reject) + try_.neg_mrru = 0; + else if (cishort <= wo->mrru) + try_.mrru = cishort; + ); + } +#else /* HAVE_MULTILINK */ + LWIP_UNUSED_ARG(treat_as_reject); +#endif /* HAVE_MULTILINK */ + + /* + * Nak for short sequence numbers shouldn't be sent, treat it + * like a reject. + */ + NAKCIVOID(CI_SSNHF, neg_ssnhf); + + /* + * Nak of the endpoint discriminator option is not permitted, + * treat it like a reject. + */ + NAKCIENDP(CI_EPDISC, neg_endpoint); + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If we see an option that we requested, or one we've already seen + * in this packet, then this packet is bad. + * If we wanted to respond by starting to negotiate on the requested + * option(s), we could, but we don't, because except for the + * authentication type and quality protocol, if we are not negotiating + * an option, it is because we were told not to. + * For the authentication type, the Nak from the peer means + * `let me authenticate myself with you' which is a bit pointless. + * For the quality protocol, the Nak means `ask me to send you quality + * reports', but if we didn't ask for them, we don't want them. + * An option we don't recognize represents the peer asking to + * negotiate some option we don't support, so ignore it. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if (cilen < CILEN_VOID || (len -= cilen) < 0) + goto bad; + next = p + cilen - 2; + + switch (citype) { + case CI_MRU: + if ((go->neg_mru && go->mru != PPP_DEFMRU) + || no.neg_mru || cilen != CILEN_SHORT) + goto bad; + GETSHORT(cishort, p); + if (cishort < PPP_DEFMRU) { + try_.neg_mru = 1; + try_.mru = cishort; + } + break; + case CI_ASYNCMAP: + if ((go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + || no.neg_asyncmap || cilen != CILEN_LONG) + goto bad; + break; + case CI_AUTHTYPE: + if (0 +#if CHAP_SUPPORT + || go->neg_chap || no.neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap || no.neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap || no.neg_eap +#endif /* EAP_SUPPORT */ + ) + goto bad; + break; + case CI_MAGICNUMBER: + if (go->neg_magicnumber || no.neg_magicnumber || + cilen != CILEN_LONG) + goto bad; + break; + case CI_PCOMPRESSION: + if (go->neg_pcompression || no.neg_pcompression + || cilen != CILEN_VOID) + goto bad; + break; + case CI_ACCOMPRESSION: + if (go->neg_accompression || no.neg_accompression + || cilen != CILEN_VOID) + goto bad; + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (go->neg_lqr || no.neg_lqr || cilen != CILEN_LQR) + goto bad; + break; +#endif /* LQR_SUPPORT */ +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (go->neg_mrru || no.neg_mrru || cilen != CILEN_SHORT) + goto bad; + break; +#endif /* HAVE_MULTILINK */ + case CI_SSNHF: + if (go->neg_ssnhf || no.neg_ssnhf || cilen != CILEN_VOID) + goto bad; + try_.neg_ssnhf = 1; + break; + case CI_EPDISC: + if (go->neg_endpoint || no.neg_endpoint || cilen < CILEN_CHAR) + goto bad; + break; + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any options left we ignore them. + */ + if (f->state != PPP_FSM_OPENED) { + if (looped_back) { + if (++try_.numloops >= pcb->settings.lcp_loopbackfail) { + ppp_notice("Serial line is looped back."); + pcb->err_code = PPPERR_LOOPBACK; + lcp_close(f->pcb, "Loopback detected"); + } + } else + try_.numloops = 0; + *go = try_; + } + + return 1; + +bad: + LCPDEBUG(("lcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * lcp_rejci - Peer has Rejected some of our CIs. + * This should not modify any state if the Reject is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Reject was bad. + * 1 - Reject was good. + */ +static int lcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cichar; + u_short cishort; + u32_t cilong; + lcp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + try_.neg = 0; \ + } +#define REJCISHORT(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT */ + +#define REJCILONG(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#if LQR_SUPPORT +#define REJCILQR(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cishort != PPP_LQR || cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LQR_SUPPORT */ +#define REJCICBCP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CBCP && \ + p[1] == CILEN_CBCP && \ + p[0] == opt) { \ + len -= CILEN_CBCP; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if (cichar != val) \ + goto bad; \ + try_.neg = 0; \ + } +#define REJCIENDP(opt, neg, class, val, vlen) \ + if (go->neg && \ + len >= CILEN_CHAR + vlen && \ + p[0] == opt && \ + p[1] == CILEN_CHAR + vlen) { \ + int i; \ + len -= CILEN_CHAR + vlen; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + try_.neg = 0; \ + } + + REJCISHORT(CI_MRU, neg_mru, go->mru); + REJCILONG(CI_ASYNCMAP, neg_asyncmap, go->asyncmap); +#if EAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_eap, PPP_EAP); + if (!go->neg_eap) { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + REJCICHAP(CI_AUTHTYPE, neg_chap, go->chap_mdtype); + if (!go->neg_chap) { +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_upap, PPP_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ +#if LQR_SUPPORT + REJCILQR(CI_QUALITY, neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + REJCICBCP(CI_CALLBACK, neg_cbcp, CBCP_OPT); + REJCILONG(CI_MAGICNUMBER, neg_magicnumber, go->magicnumber); + REJCIVOID(CI_PCOMPRESSION, neg_pcompression); + REJCIVOID(CI_ACCOMPRESSION, neg_accompression); +#ifdef HAVE_MULTILINK + REJCISHORT(CI_MRRU, neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + REJCIVOID(CI_SSNHF, neg_ssnhf); + REJCIENDP(CI_EPDISC, neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + LCPDEBUG(("lcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * lcp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * lenp = Length of requested CIs + */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + int cilen, citype, cichar; /* Parsed len, type, char value */ + u_short cishort; /* Parsed short value */ + u32_t cilong; /* Parse long value */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *rejp; /* Pointer to next char in reject frame */ + struct pbuf *nakp; /* Nak buffer */ + u_char *nakoutp; /* Pointer to next char in Nak frame */ + int l = *lenp; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + nakp = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == nakp) + return 0; + if(nakp->tot_len != nakp->len) { + pbuf_free(nakp); + return 0; + } + + nakoutp = (u_char*)nakp->payload; + rejp = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + LCPDEBUG(("lcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + citype = 0; + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_MRU: + if (!ao->neg_mru || /* Allow option? */ + cilen != CILEN_SHORT) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETSHORT(cishort, p); /* Parse MRU */ + + /* + * He must be able to receive at least our minimum. + * No need to check a maximum. If he sends a large number, + * we'll just ignore it. + */ + if (cishort < PPP_MINMRU) { + orc = CONFNAK; /* Nak CI */ + PUTCHAR(CI_MRU, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_MINMRU, nakoutp); /* Give him a hint */ + break; + } + ho->neg_mru = 1; /* Remember he sent MRU */ + ho->mru = cishort; /* And remember value */ + break; + + case CI_ASYNCMAP: + if (!ao->neg_asyncmap || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * Asyncmap must have set at least the bits + * which are set in lcp_allowoptions[unit].asyncmap. + */ + if ((ao->asyncmap & ~cilong) != 0) { + orc = CONFNAK; + PUTCHAR(CI_ASYNCMAP, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(ao->asyncmap | cilong, nakoutp); + break; + } + ho->neg_asyncmap = 1; + ho->asyncmap = cilong; + break; + + case CI_AUTHTYPE: + if (cilen < CILEN_SHORT || + !(0 +#if PAP_SUPPORT + || ao->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || ao->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ao->neg_eap +#endif /* EAP_SUPPORT */ + )) { + /* + * Reject the option if we're not willing to authenticate. + */ + ppp_dbglog("No auth is possible"); + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + /* + * Authtype must be PAP, CHAP, or EAP. + * + * Note: if more than one of ao->neg_upap, ao->neg_chap, and + * ao->neg_eap are set, and the peer sends a Configure-Request + * with two or more authenticate-protocol requests, then we will + * reject the second request. + * Whether we end up doing CHAP, UPAP, or EAP depends then on + * the ordering of the CIs in the peer's Configure-Request. + */ + +#if PAP_SUPPORT + if (cishort == PPP_PAP) { + /* we've already accepted CHAP or EAP */ + if (0 +#if CHAP_SUPPORT + || ho->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ho->neg_eap +#endif /* EAP_SUPPORT */ + || cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE PAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_upap) { /* we don't want to do PAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or EAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ + break; + } + ho->neg_upap = 1; + break; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (cishort == PPP_CHAP) { + /* we've already accepted PAP or EAP */ + if ( +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ho->neg_eap || +#endif /* EAP_SUPPORT */ + cilen != CILEN_CHAP) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE CHAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_chap) { /* we don't want to do CHAP */ + orc = CONFNAK; /* NAK it and suggest EAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTSHORT(PPP_PAP, nakoutp); + } + else +#endif /* PAP_SUPPORT */ + {} + break; + } + GETCHAR(cichar, p); /* get digest type */ + if (!(CHAP_CANDIGEST(ao->chap_mdtype, cichar))) { + /* + * We can't/won't do the requested type, + * suggest something else. + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + break; + } + ho->chap_mdtype = CHAP_MDTYPE_D(cichar); /* save md type */ + ho->neg_chap = 1; + break; + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (cishort == PPP_EAP) { + /* we've already accepted CHAP or PAP */ + if ( +#if CHAP_SUPPORT + ho->neg_chap || +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ + cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE EAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_eap) { /* we don't want to do EAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + } + ho->neg_eap = 1; + break; + } +#endif /* EAP_SUPPORT */ + + /* + * We don't recognize the protocol they're asking for. + * Nak it with something we're willing to do. + * (At this point we know ao->neg_upap || ao->neg_chap || + * ao->neg_eap.) + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + +#if LQR_SUPPORT + case CI_QUALITY: + if (!ao->neg_lqr || + cilen != CILEN_LQR) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + GETLONG(cilong, p); + + /* + * Check the protocol and the reporting period. + * XXX When should we Nak this, and what with? + */ + if (cishort != PPP_LQR) { + orc = CONFNAK; + PUTCHAR(CI_QUALITY, nakoutp); + PUTCHAR(CILEN_LQR, nakoutp); + PUTSHORT(PPP_LQR, nakoutp); + PUTLONG(ao->lqr_period, nakoutp); + break; + } + break; +#endif /* LQR_SUPPORT */ + + case CI_MAGICNUMBER: + if (!(ao->neg_magicnumber || go->neg_magicnumber) || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * He must have a different magic number. + */ + if (go->neg_magicnumber && + cilong == go->magicnumber) { + cilong = magic(); /* Don't put magic() inside macro! */ + orc = CONFNAK; + PUTCHAR(CI_MAGICNUMBER, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(cilong, nakoutp); + break; + } + ho->neg_magicnumber = 1; + ho->magicnumber = cilong; + break; + + + case CI_PCOMPRESSION: + if (!ao->neg_pcompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_pcompression = 1; + break; + + case CI_ACCOMPRESSION: + if (!ao->neg_accompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_accompression = 1; + break; + +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (!ao->neg_mrru + || !multilink + || cilen != CILEN_SHORT) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + /* possibly should insist on a minimum/maximum MRRU here */ + ho->neg_mrru = 1; + ho->mrru = cishort; + break; +#endif /* HAVE_MULTILINK */ + + case CI_SSNHF: + if (!ao->neg_ssnhf +#ifdef HAVE_MULTILINK + || !multilink +#endif /* HAVE_MULTILINK */ + || cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_ssnhf = 1; + break; + + case CI_EPDISC: + if (!ao->neg_endpoint || + cilen < CILEN_CHAR || + cilen > CILEN_CHAR + MAX_ENDP_LEN) { + orc = CONFREJ; + break; + } + GETCHAR(cichar, p); + cilen -= CILEN_CHAR; + ho->neg_endpoint = 1; + ho->endpoint.class_ = cichar; + ho->endpoint.length = cilen; + MEMCPY(ho->endpoint.value, p, cilen); + INCPTR(cilen, p); + break; + + default: + LCPDEBUG(("lcp_reqci: rcvd unknown option %d", citype)); + orc = CONFREJ; + break; + } + +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree /* Getting fed up with sending NAKs? */ + && citype != CI_MAGICNUMBER) { + orc = CONFREJ; /* Get tough if so */ + } else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + rc = CONFNAK; + } + } + if (orc == CONFREJ) { /* Reject this CI */ + rc = CONFREJ; + if (cip != rejp) /* Need to move rejected CI? */ + MEMCPY(rejp, cip, cilen); /* Move it */ + INCPTR(cilen, rejp); /* Update output pointer */ + } + } + + /* + * If we wanted to send additional NAKs (for unsent CIs), the + * code would go here. The extra NAKs would go at *nakoutp. + * At present there are no cases where we want to ask the + * peer to negotiate an option. + */ + + switch (rc) { + case CONFACK: + *lenp = next - inp; + break; + case CONFNAK: + /* + * Copy the Nak'd options from the nak buffer to the caller's buffer. + */ + *lenp = nakoutp - (u_char*)nakp->payload; + MEMCPY(inp, nakp->payload, *lenp); + break; + case CONFREJ: + *lenp = rejp - inp; + break; + default: + break; + } + + pbuf_free(nakp); + LCPDEBUG(("lcp_reqci: returning CONF%s.", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +/* + * lcp_up - LCP has come UP. + */ +static void lcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + int mtu, mru; + + if (!go->neg_magicnumber) + go->magicnumber = 0; + if (!ho->neg_magicnumber) + ho->magicnumber = 0; + + /* + * Set our MTU to the smaller of the MTU we wanted and + * the MRU our peer wanted. If we negotiated an MRU, + * set our MRU to the larger of value we wanted and + * the value we got in the negotiation. + * Note on the MTU: the link MTU can be the MRU the peer wanted, + * the interface MTU is set to the lowest of that, the + * MTU we want to use, and our link MRU. + */ + mtu = ho->neg_mru? ho->mru: PPP_MRU; + mru = go->neg_mru? LWIP_MAX(wo->mru, go->mru): PPP_MRU; +#ifdef HAVE_MULTILINK + if (!(multilink && go->neg_mrru && ho->neg_mrru)) +#endif /* HAVE_MULTILINK */ + netif_set_mtu(pcb, LWIP_MIN(LWIP_MIN(mtu, mru), ao->mru)); + ppp_send_config(pcb, mtu, + (ho->neg_asyncmap? ho->asyncmap: 0xffffffff), + ho->neg_pcompression, ho->neg_accompression); + ppp_recv_config(pcb, mru, + (pcb->settings.lax_recv? 0: go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + + if (ho->neg_mru) + pcb->peer_mru = ho->mru; + + lcp_echo_lowerup(f->pcb); /* Enable echo messages */ + + link_established(pcb); +} + + +/* + * lcp_down - LCP has gone DOWN. + * + * Alert other protocols. + */ +static void lcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + + lcp_echo_lowerdown(f->pcb); + + link_down(pcb); + + ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0); + ppp_recv_config(pcb, PPP_MRU, + (go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + pcb->peer_mru = PPP_MRU; +} + + +/* + * lcp_starting - LCP needs the lower layer up. + */ +static void lcp_starting(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_required(pcb); +} + + +/* + * lcp_finished - LCP has finished with the lower layer. + */ +static void lcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_terminated(pcb); +} + + +#if PRINTPKT_SUPPORT +/* + * lcp_printpkt - print the contents of an LCP packet. + */ +static const char* const lcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", "ProtRej", + "EchoReq", "EchoRep", "DiscReq", "Ident", + "TimeRem" +}; + +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen, i; + const u_char *pstart, *optend; + u_short cishort; + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(lcp_codenames)) + printer(arg, " %s", lcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_MRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mru %d", cishort); + } + break; + case CI_ASYNCMAP: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "asyncmap 0x%x", cilong); + } + break; + case CI_AUTHTYPE: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "auth "); + GETSHORT(cishort, p); + switch (cishort) { +#if PAP_SUPPORT + case PPP_PAP: + printer(arg, "pap"); + break; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + case PPP_CHAP: + printer(arg, "chap"); + if (p < optend) { + switch (*p) { + case CHAP_MD5: + printer(arg, " MD5"); + ++p; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + printer(arg, " MS"); + ++p; + break; + + case CHAP_MICROSOFT_V2: + printer(arg, " MS-v2"); + ++p; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + } + break; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + printer(arg, "eap"); + break; +#endif /* EAP_SUPPORT */ + default: + printer(arg, "0x%x", cishort); + } + } + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "quality "); + GETSHORT(cishort, p); + switch (cishort) { + case PPP_LQR: + printer(arg, "lqr"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* LQR_SUPPORT */ + case CI_CALLBACK: + if (olen >= CILEN_CHAR) { + p += 2; + printer(arg, "callback "); + GETCHAR(cishort, p); + switch (cishort) { + case CBCP_OPT: + printer(arg, "CBCP"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; + case CI_MAGICNUMBER: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "magic 0x%x", cilong); + } + break; + case CI_PCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "pcomp"); + } + break; + case CI_ACCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "accomp"); + } + break; + case CI_MRRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mrru %d", cishort); + } + break; + case CI_SSNHF: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "ssnhf"); + } + break; + case CI_EPDISC: +#ifdef HAVE_MULTILINK + if (olen >= CILEN_CHAR) { + struct epdisc epd; + p += 2; + GETCHAR(epd.class, p); + epd.length = olen - CILEN_CHAR; + if (epd.length > MAX_ENDP_LEN) + epd.length = MAX_ENDP_LEN; + if (epd.length > 0) { + MEMCPY(epd.value, p, epd.length); + p += epd.length; + } + printer(arg, "endpoint [%s]", epdisc_to_str(&epd)); + } +#else + printer(arg, "endpoint"); +#endif + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + + case ECHOREQ: + case ECHOREP: + case DISCREQ: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + break; + + case IDENTIF: + case TIMEREM: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + if (code == TIMEREM) { + if (len < 4) + break; + GETLONG(cilong, p); + printer(arg, " seconds=%u", cilong); + len -= 4; + } + if (len > 0) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (i = 0; i < len && i < 32; ++i) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + if (i < len) { + printer(arg, " ..."); + p += len - i; + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +/* + * Time to shut down the link because there is nothing out there. + */ + +static void LcpLinkFailure(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (f->state == PPP_FSM_OPENED) { + ppp_info("No response to %d echo-requests", pcb->lcp_echos_pending); + ppp_notice("Serial link appears to be disconnected."); + pcb->err_code = PPPERR_PEERDEAD; + lcp_close(pcb, "Peer not responding"); + } +} + +/* + * Timer expired for the LCP echo requests from this process. + */ + +static void LcpEchoCheck(fsm *f) { + ppp_pcb *pcb = f->pcb; + + LcpSendEchoRequest (f); + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Start the timer for the next interval. + */ + if (pcb->lcp_echo_timer_running) + ppp_warn("assertion lcp_echo_timer_running==0 failed"); + TIMEOUT (LcpEchoTimeout, f, pcb->settings.lcp_echo_interval); + pcb->lcp_echo_timer_running = 1; +} + +/* + * LcpEchoTimeout - Timer expired on the LCP echo + */ + +static void LcpEchoTimeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + if (pcb->lcp_echo_timer_running != 0) { + pcb->lcp_echo_timer_running = 0; + LcpEchoCheck ((fsm *) arg); + } +} + +/* + * LcpEchoReply - LCP has received a reply to the echo + */ + +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t magic_val; + LWIP_UNUSED_ARG(id); + + /* Check the magic number - don't count replies from ourselves. */ + if (len < 4) { + ppp_dbglog("lcp: received short Echo-Reply, length %d", len); + return; + } + GETLONG(magic_val, inp); + if (go->neg_magicnumber + && magic_val == go->magicnumber) { + ppp_warn("appear to have received our own echo-reply!"); + return; + } + + /* Reset the number of outstanding echo frames */ + pcb->lcp_echos_pending = 0; +} + +/* + * LcpSendEchoRequest - Send an echo request frame to the peer + */ + +static void LcpSendEchoRequest(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t lcp_magic; + u_char pkt[4], *pktp; + + /* + * Detect the failure of the peer at this point. + */ + if (pcb->settings.lcp_echo_fails != 0) { + if (pcb->lcp_echos_pending >= pcb->settings.lcp_echo_fails) { + LcpLinkFailure(f); + pcb->lcp_echos_pending = 0; + } + } + +#if PPP_LCP_ADAPTIVE + /* + * If adaptive echos have been enabled, only send the echo request if + * no traffic was received since the last one. + */ + if (pcb->settings.lcp_echo_adaptive) { + static unsigned int last_pkts_in = 0; + +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ + + if (link_stats.pkts_in != last_pkts_in) { + last_pkts_in = link_stats.pkts_in; + return; + } + } +#endif + + /* + * Make and send the echo request frame. + */ + if (f->state == PPP_FSM_OPENED) { + lcp_magic = go->magicnumber; + pktp = pkt; + PUTLONG(lcp_magic, pktp); + fsm_sdata(f, ECHOREQ, pcb->lcp_echo_number++, pkt, pktp - pkt); + ++pcb->lcp_echos_pending; + } +} + +/* + * lcp_echo_lowerup - Start the timer for the LCP frame + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + /* Clear the parameters for generating echo frames */ + pcb->lcp_echos_pending = 0; + pcb->lcp_echo_number = 0; + pcb->lcp_echo_timer_running = 0; + + /* If a timeout interval is specified then start the timer */ + if (pcb->settings.lcp_echo_interval != 0) + LcpEchoCheck (f); +} + +/* + * lcp_echo_lowerdown - Stop the timer for the LCP frame + */ + +static void lcp_echo_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (pcb->lcp_echo_timer_running != 0) { + UNTIMEOUT (LcpEchoTimeout, f); + pcb->lcp_echo_timer_running = 0; + } +} + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c new file mode 100644 index 000000000..d0d87c5e5 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c @@ -0,0 +1,294 @@ +/* + * magic.c - PPP Magic Number routines. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/***************************************************************************** +* randm.c - Random number generator program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-06-03 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/magic.h" + +#if PPP_MD5_RANDM /* Using MD5 for better randomness if enabled */ + +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +static char magic_randpool[MD5_HASH_SIZE]; /* Pool of randomness. */ +static long magic_randcount; /* Pseudo-random incrementer */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + +/* + * Churn the randomness pool on a random event. Call this early and often + * on random and semi-random system events to build randomness in time for + * usage. For randomly timed events, pass a null pointer and a zero length + * and this will use the system timer and other sources to add randomness. + * If new random data is available, pass a pointer to that and it will be + * included. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + */ +static void magic_churnrand(char *rand_data, u32_t rand_len) { + lwip_md5_context md5_ctx; + + /* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: %u@%P\n", rand_len, rand_data)); */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + if (rand_data) { + lwip_md5_update(&md5_ctx, (u_char *)rand_data, rand_len); + } else { + struct { + /* INCLUDE fields for any system sources of randomness */ + u32_t jiffies; +#ifdef LWIP_RAND + u32_t rand; +#endif /* LWIP_RAND */ + } sys_data; + magic_randomseed += sys_jiffies(); + sys_data.jiffies = magic_randomseed; +#ifdef LWIP_RAND + sys_data.rand = LWIP_RAND(); +#endif /* LWIP_RAND */ + /* Load sys_data fields here. */ + lwip_md5_update(&md5_ctx, (u_char *)&sys_data, sizeof(sys_data)); + } + lwip_md5_finish(&md5_ctx, (u_char *)magic_randpool); + lwip_md5_free(&md5_ctx); +/* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: -> 0\n")); */ +} + +/* + * Initialize the random number generator. + */ +void magic_init(void) { + magic_churnrand(NULL, 0); +} + +/* + * Randomize our random seed value. + */ +void magic_randomize(void) { + magic_churnrand(NULL, 0); +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Note: It's important that there be sufficient randomness in magic_randpool + * before this is called for otherwise the range of the result may be + * narrow enough to make a search feasible. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + * + * XXX Why does he not just call magic_churnrand() for each block? Probably + * so that you don't ever publish the seed which could possibly help + * predict future values. + * XXX Why don't we preserve md5 between blocks and just update it with + * magic_randcount each time? Probably there is a weakness but I wish that + * it was documented. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + lwip_md5_context md5_ctx; + u_char tmp[MD5_HASH_SIZE]; + u32_t n; + + while (buf_len > 0) { + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + lwip_md5_update(&md5_ctx, (u_char *)&magic_randcount, sizeof(magic_randcount)); + lwip_md5_finish(&md5_ctx, tmp); + lwip_md5_free(&md5_ctx); + magic_randcount++; + n = LWIP_MIN(buf_len, MD5_HASH_SIZE); + MEMCPY(buf, tmp, n); + buf += n; + buf_len -= n; + } +} + +/* + * Return a new random number. + */ +u32_t magic(void) { + u32_t new_rand; + + magic_random_bytes((unsigned char *)&new_rand, sizeof(new_rand)); + + return new_rand; +} + +#else /* PPP_MD5_RANDM */ + +/*****************************/ +/*** LOCAL DATA STRUCTURES ***/ +/*****************************/ +#ifndef LWIP_RAND +static int magic_randomized; /* Set when truely randomized. */ +#endif /* LWIP_RAND */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ + +/* + * Initialize the random number generator. + * + * Here we attempt to compute a random number seed but even if + * it isn't random, we'll randomize it later. + * + * The current method uses the fields from the real time clock, + * the idle process counter, the millisecond counter, and the + * hardware timer tick counter. When this is invoked + * in startup(), then the idle counter and timer values may + * repeat after each boot and the real time clock may not be + * operational. Thus we call it again on the first random + * event. + */ +void magic_init(void) { + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + /* Initialize the Borland random number generator. */ + srand((unsigned)magic_randomseed); +#endif /* LWIP_RAND */ +} + +/* + * magic_init - Initialize the magic number generator. + * + * Randomize our random seed value. Here we use the fact that + * this function is called at *truely random* times by the polling + * and network functions. Here we only get 16 bits of new random + * value but we use the previous value to randomize the other 16 + * bits. + */ +void magic_randomize(void) { +#ifndef LWIP_RAND + if (!magic_randomized) { + magic_randomized = !0; + magic_init(); + /* The initialization function also updates the seed. */ + } else { +#endif /* LWIP_RAND */ + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + } +#endif /* LWIP_RAND */ +} + +/* + * Return a new random number. + * + * Here we use the Borland rand() function to supply a pseudo random + * number which we make truely random by combining it with our own + * seed which is randomized by truely random events. + * Thus the numbers will be truely random unless there have been no + * operator or network events in which case it will be pseudo random + * seeded by the real time clock. + */ +u32_t magic(void) { +#ifdef LWIP_RAND + return LWIP_RAND() + magic_randomseed; +#else /* LWIP_RAND */ + return ((u32_t)rand() << 16) + (u32_t)rand() + magic_randomseed; +#endif /* LWIP_RAND */ +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + u32_t new_rand, n; + + while (buf_len > 0) { + new_rand = magic(); + n = LWIP_MIN(buf_len, sizeof(new_rand)); + MEMCPY(buf, &new_rand, n); + buf += n; + buf_len -= n; + } +} +#endif /* PPP_MD5_RANDM */ + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow) { + return magic() & ~(~0UL<. + * Copyright (c) 2002,2003,2004 Google, Inc. + * All rights reserved. + * + * License: + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. + * + * Changelog: + * 08/12/05 - Matt Domsch + * Only need extra skb padding on transmit, not receive. + * 06/18/04 - Matt Domsch , Oleg Makarenko + * Use Linux kernel 2.6 arc4 and sha1 routines rather than + * providing our own. + * 2/15/04 - TS: added #include and testing for Kernel + * version before using + * MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are + * deprecated in 2.6 + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/err.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/ccp.h" +#include "netif/ppp/mppe.h" +#include "netif/ppp/pppdebug.h" +#include "netif/ppp/pppcrypt.h" + +#define SHA1_SIGNATURE_SIZE 20 + +/* ppp_mppe_state.bits definitions */ +#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */ +#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */ +#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */ +#define MPPE_BIT_D 0x10 /* This is an encrypted frame */ + +#define MPPE_BIT_FLUSHED MPPE_BIT_A +#define MPPE_BIT_ENCRYPTED MPPE_BIT_D + +#define MPPE_BITS(p) ((p)[0] & 0xf0) +#define MPPE_CCOUNT(p) ((((p)[0] & 0x0f) << 8) + (p)[1]) +#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */ + +#define MPPE_OVHD 2 /* MPPE overhead/packet */ +#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */ + +/* + * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. + * Well, not what's written there, but rather what they meant. + */ +static void mppe_rekey(ppp_mppe_state * state, int initial_key) +{ + lwip_sha1_context sha1_ctx; + u8_t sha1_digest[SHA1_SIGNATURE_SIZE]; + + /* + * Key Derivation, from RFC 3078, RFC 3079. + * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. + */ + lwip_sha1_init(&sha1_ctx); + lwip_sha1_starts(&sha1_ctx); + lwip_sha1_update(&sha1_ctx, state->master_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1_ctx, state->session_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1_ctx, sha1_digest); + lwip_sha1_free(&sha1_ctx); + MEMCPY(state->session_key, sha1_digest, state->keylen); + + if (!initial_key) { + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, sha1_digest, state->keylen); + lwip_arc4_crypt(&state->arc4, state->session_key, state->keylen); + lwip_arc4_free(&state->arc4); + } + if (state->keylen == 8) { + /* See RFC 3078 */ + state->session_key[0] = 0xd1; + state->session_key[1] = 0x26; + state->session_key[2] = 0x9e; + } + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, state->session_key, state->keylen); +} + +/* + * Set key, used by MSCHAP before mppe_init() is actually called by CCP so we + * don't have to keep multiple copies of keys. + */ +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key) { + LWIP_UNUSED_ARG(pcb); + MEMCPY(state->master_key, key, MPPE_MAX_KEY_LEN); +} + +/* + * Initialize (de)compressor state. + */ +void +mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options) +{ +#if PPP_DEBUG + const u8_t *debugstr = (const u8_t*)"mppe_comp_init"; + if (&pcb->mppe_decomp == state) { + debugstr = (const u8_t*)"mppe_decomp_init"; + } +#endif /* PPP_DEBUG */ + + /* Save keys. */ + MEMCPY(state->session_key, state->master_key, sizeof(state->master_key)); + + if (options & MPPE_OPT_128) + state->keylen = 16; + else if (options & MPPE_OPT_40) + state->keylen = 8; + else { + PPPDEBUG(LOG_DEBUG, ("%s[%d]: unknown key length\n", debugstr, + pcb->netif->num)); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + return; + } + if (options & MPPE_OPT_STATEFUL) + state->stateful = 1; + + /* Generate the initial session key. */ + mppe_rekey(state, 1); + +#if PPP_DEBUG + { + int i; + char mkey[sizeof(state->master_key) * 2 + 1]; + char skey[sizeof(state->session_key) * 2 + 1]; + + PPPDEBUG(LOG_DEBUG, ("%s[%d]: initialized with %d-bit %s mode\n", + debugstr, pcb->netif->num, (state->keylen == 16) ? 128 : 40, + (state->stateful) ? "stateful" : "stateless")); + + for (i = 0; i < (int)sizeof(state->master_key); i++) + sprintf(mkey + i * 2, "%02x", state->master_key[i]); + for (i = 0; i < (int)sizeof(state->session_key); i++) + sprintf(skey + i * 2, "%02x", state->session_key[i]); + PPPDEBUG(LOG_DEBUG, + ("%s[%d]: keys: master: %s initial session: %s\n", + debugstr, pcb->netif->num, mkey, skey)); + } +#endif /* PPP_DEBUG */ + + /* + * Initialize the coherency count. The initial value is not specified + * in RFC 3078, but we can make a reasonable assumption that it will + * start at 0. Setting it to the max here makes the comp/decomp code + * do the right thing (determined through experiment). + */ + state->ccount = MPPE_CCOUNT_SPACE - 1; + + /* + * Note that even though we have initialized the key table, we don't + * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. + */ + state->bits = MPPE_BIT_ENCRYPTED; +} + +/* + * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), + * tell the compressor to rekey. Note that we MUST NOT rekey for + * every CCP Reset-Request; we only rekey on the next xmit packet. + * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. + * So, rekeying for every CCP Reset-Request is broken as the peer will not + * know how many times we've rekeyed. (If we rekey and THEN get another + * CCP Reset-Request, we must rekey again.) + */ +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + state->bits |= MPPE_BIT_FLUSHED; +} + +/* + * Compress (encrypt) a packet. + * It's strange to call this a compressor, since the output is always + * MPPE_OVHD + 2 bytes larger than the input. + */ +err_t +mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol) +{ + struct pbuf *n, *np; + u8_t *pl; + err_t err; + + LWIP_UNUSED_ARG(pcb); + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before encryption. + */ + np = pbuf_alloc(PBUF_RAW, MPPE_OVHD + sizeof(protocol) + (*pb)->tot_len, PBUF_RAM); + if (!np) { + return ERR_MEM; + } + + /* Hide MPPE header + protocol */ + pbuf_remove_header(np, MPPE_OVHD + sizeof(protocol)); + + if ((err = pbuf_copy(np, *pb)) != ERR_OK) { + pbuf_free(np); + return err; + } + + /* Reveal MPPE header + protocol */ + pbuf_add_header(np, MPPE_OVHD + sizeof(protocol)); + + *pb = np; + pl = (u8_t*)np->payload; + + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: ccount %d\n", pcb->netif->num, state->ccount)); + /* FIXME: use PUT* macros */ + pl[0] = state->ccount>>8; + pl[1] = state->ccount; + + if (!state->stateful || /* stateless mode */ + ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ + (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ + /* We must rekey */ + if (state->stateful) { + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: rekeying\n", pcb->netif->num)); + } + mppe_rekey(state, 0); + state->bits |= MPPE_BIT_FLUSHED; + } + pl[0] |= state->bits; + state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ + pl += MPPE_OVHD; + + /* Add protocol */ + /* FIXME: add PFC support */ + pl[0] = protocol >> 8; + pl[1] = protocol; + + /* Hide MPPE header */ + pbuf_remove_header(np, MPPE_OVHD); + + /* Encrypt packet */ + for (n = np; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* Reveal MPPE header */ + pbuf_add_header(np, MPPE_OVHD); + + return ERR_OK; +} + +/* + * We received a CCP Reset-Ack. Just ignore it. + */ +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(state); + return; +} + +/* + * Decompress (decrypt) an MPPE packet. + */ +err_t +mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb) +{ + struct pbuf *n0 = *pb, *n; + u8_t *pl; + u16_t ccount; + u8_t flushed; + + /* MPPE Header */ + if (n0->len < MPPE_OVHD) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: short pkt (%d)\n", + pcb->netif->num, n0->len)); + state->sanity_errors += 100; + goto sanity_error; + } + + pl = (u8_t*)n0->payload; + flushed = MPPE_BITS(pl) & MPPE_BIT_FLUSHED; + ccount = MPPE_CCOUNT(pl); + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: ccount %d\n", + pcb->netif->num, ccount)); + + /* sanity checks -- terminate with extreme prejudice */ + if (!(MPPE_BITS(pl) & MPPE_BIT_ENCRYPTED)) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: ENCRYPTED bit not set!\n", + pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (!state->stateful && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set in " + "stateless mode!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set on " + "flag packet!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + + /* + * Check the coherency count. + */ + + if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + + /* RFC 3078, sec 8.1. Rekey for every packet. */ + while (state->ccount != ccount) { + mppe_rekey(state, 0); + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + } + } else { + /* RFC 3078, sec 8.2. */ + if (!state->discard) { + /* normal state */ + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + if (ccount != state->ccount) { + /* + * (ccount > state->ccount) + * Packet loss detected, enter the discard state. + * Signal the peer to rekey (by sending a CCP Reset-Request). + */ + state->discard = 1; + ccp_resetrequest(pcb); + return ERR_BUF; + } + } else { + /* discard state */ + if (!flushed) { + /* ccp.c will be silent (no additional CCP Reset-Requests). */ + return ERR_BUF; + } else { + /* Rekey for every missed "flag" packet. */ + while ((ccount & ~0xff) != + (state->ccount & ~0xff)) { + mppe_rekey(state, 0); + state->ccount = + (state->ccount + + 256) % MPPE_CCOUNT_SPACE; + } + + /* reset */ + state->discard = 0; + state->ccount = ccount; + /* + * Another problem with RFC 3078 here. It implies that the + * peer need not send a Reset-Ack packet. But RFC 1962 + * requires it. Hopefully, M$ does send a Reset-Ack; even + * though it isn't required for MPPE synchronization, it is + * required to reset CCP state. + */ + } + } + if (flushed) + mppe_rekey(state, 0); + } + + /* Hide MPPE header */ + pbuf_remove_header(n0, MPPE_OVHD); + + /* Decrypt the packet. */ + for (n = n0; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* good packet credit */ + state->sanity_errors >>= 1; + + return ERR_OK; + +sanity_error: + if (state->sanity_errors >= SANITY_MAX) { + /* + * Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + lcp_close(pcb, "Too many MPPE errors"); + } + return ERR_BUF; +} + +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c new file mode 100644 index 000000000..62014e8c8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c @@ -0,0 +1,609 @@ +/* + * multilink.c - support routines for multilink. + * + * Copyright (c) 2000-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && defined(HAVE_MULTILINK) /* don't build if not configured for use in lwipopts.h */ + +/* Multilink support + * + * Multilink uses Samba TDB (Trivial Database Library), which + * we cannot port, because it needs a filesystem. + * + * We have to choose between doing a memory-shared TDB-clone, + * or dropping multilink support at all. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/tdb.h" + +bool endpoint_specified; /* user gave explicit endpoint discriminator */ +char *bundle_id; /* identifier for our bundle */ +char *blinks_id; /* key for the list of links */ +bool doing_multilink; /* multilink was enabled and agreed to */ +bool multilink_master; /* we own the multilink bundle */ + +extern TDB_CONTEXT *pppdb; +extern char db_key[]; + +static void make_bundle_links (int append); +static void remove_bundle_link (void); +static void iterate_bundle_links (void (*func) (char *)); + +static int get_default_epdisc (struct epdisc *); +static int parse_num (char *str, const char *key, int *valp); +static int owns_unit (TDB_DATA pid, int unit); + +#define set_ip_epdisc(ep, addr) do { \ + ep->length = 4; \ + ep->value[0] = addr >> 24; \ + ep->value[1] = addr >> 16; \ + ep->value[2] = addr >> 8; \ + ep->value[3] = addr; \ +} while (0) + +#define LOCAL_IP_ADDR(addr) \ + (((addr) & 0xff000000) == 0x0a000000 /* 10.x.x.x */ \ + || ((addr) & 0xfff00000) == 0xac100000 /* 172.16.x.x */ \ + || ((addr) & 0xffff0000) == 0xc0a80000) /* 192.168.x.x */ + +#define process_exists(n) (kill((n), 0) == 0 || errno != ESRCH) + +void +mp_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + + doing_multilink = 0; + if (!multilink) + return; + /* if we're doing multilink, we have to negotiate MRRU */ + if (!wo->neg_mrru) { + /* mrru not specified, default to mru */ + wo->mrru = wo->mru; + wo->neg_mrru = 1; + } + ao->mrru = ao->mru; + ao->neg_mrru = 1; + + if (!wo->neg_endpoint && !noendpoint) { + /* get a default endpoint value */ + wo->neg_endpoint = get_default_epdisc(&wo->endpoint); + } +} + +/* + * Make a new bundle or join us to an existing bundle + * if we are doing multilink. + */ +int +mp_join_bundle() +{ + lcp_options *go = &lcp_gotoptions[0]; + lcp_options *ho = &lcp_hisoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + int unit, pppd_pid; + int l, mtu; + char *p; + TDB_DATA key, pid, rec; + + if (doing_multilink) { + /* have previously joined a bundle */ + if (!go->neg_mrru || !ho->neg_mrru) { + notice("oops, didn't get multilink on renegotiation"); + lcp_close(pcb, "multilink required"); + return 0; + } + /* XXX should check the peer_authname and ho->endpoint + are the same as previously */ + return 0; + } + + if (!go->neg_mrru || !ho->neg_mrru) { + /* not doing multilink */ + if (go->neg_mrru) + notice("oops, multilink negotiated only for receive"); + mtu = ho->neg_mru? ho->mru: PPP_MRU; + if (mtu > ao->mru) + mtu = ao->mru; + if (demand) { + /* already have a bundle */ + cfg_bundle(0, 0, 0, 0); + netif_set_mtu(pcb, mtu); + return 0; + } + make_new_bundle(0, 0, 0, 0); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + return 0; + } + + doing_multilink = 1; + + /* + * Find the appropriate bundle or join a new one. + * First we make up a name for the bundle. + * The length estimate is worst-case assuming every + * character has to be quoted. + */ + l = 4 * strlen(peer_authname) + 10; + if (ho->neg_endpoint) + l += 3 * ho->endpoint.length + 8; + if (bundle_name) + l += 3 * strlen(bundle_name) + 2; + bundle_id = malloc(l); + if (bundle_id == 0) + novm("bundle identifier"); + + p = bundle_id; + p += slprintf(p, l-1, "BUNDLE=\"%q\"", peer_authname); + if (ho->neg_endpoint || bundle_name) + *p++ = '/'; + if (ho->neg_endpoint) + p += slprintf(p, bundle_id+l-p, "%s", + epdisc_to_str(&ho->endpoint)); + if (bundle_name) + p += slprintf(p, bundle_id+l-p, "/%v", bundle_name); + + /* Make the key for the list of links belonging to the bundle */ + l = p - bundle_id; + blinks_id = malloc(l + 7); + if (blinks_id == NULL) + novm("bundle links key"); + slprintf(blinks_id, l + 7, "BUNDLE_LINKS=%s", bundle_id + 7); + + /* + * For demand mode, we only need to configure the bundle + * and attach the link. + */ + mtu = LWIP_MIN(ho->mrru, ao->mru); + if (demand) { + cfg_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + return 0; + } + + /* + * Check if the bundle ID is already in the database. + */ + unit = -1; + lock_db(); + key.dptr = bundle_id; + key.dsize = p - bundle_id; + pid = tdb_fetch(pppdb, key); + if (pid.dptr != NULL) { + /* bundle ID exists, see if the pppd record exists */ + rec = tdb_fetch(pppdb, pid); + if (rec.dptr != NULL && rec.dsize > 0) { + /* make sure the string is null-terminated */ + rec.dptr[rec.dsize-1] = 0; + /* parse the interface number */ + parse_num(rec.dptr, "IFNAME=ppp", &unit); + /* check the pid value */ + if (!parse_num(rec.dptr, "PPPD_PID=", &pppd_pid) + || !process_exists(pppd_pid) + || !owns_unit(pid, unit)) + unit = -1; + free(rec.dptr); + } + free(pid.dptr); + } + + if (unit >= 0) { + /* attach to existing unit */ + if (bundle_attach(unit)) { + set_ifunit(0); + script_setenv("BUNDLE", bundle_id + 7, 0); + make_bundle_links(1); + unlock_db(); + info("Link attached to %s", ifname); + return 1; + } + /* attach failed because bundle doesn't exist */ + } + + /* we have to make a new bundle */ + make_new_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + make_bundle_links(pcb); + unlock_db(); + info("New bundle %s created", ifname); + multilink_master = 1; + return 0; +} + +void mp_exit_bundle() +{ + lock_db(); + remove_bundle_link(); + unlock_db(); +} + +static void sendhup(char *str) +{ + int pid; + + if (parse_num(str, "PPPD_PID=", &pid) && pid != getpid()) { + if (debug) + dbglog("sending SIGHUP to process %d", pid); + kill(pid, SIGHUP); + } +} + +void mp_bundle_terminated() +{ + TDB_DATA key; + + bundle_terminating = 1; + upper_layers_down(pcb); + notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + if (!demand) { + remove_pidfiles(); + script_unsetenv("IFNAME"); + } + + lock_db(); + destroy_bundle(); + iterate_bundle_links(sendhup); + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + tdb_delete(pppdb, key); + unlock_db(); + + new_phase(PPP_PHASE_DEAD); + + doing_multilink = 0; + multilink_master = 0; +} + +static void make_bundle_links(int append) +{ + TDB_DATA key, rec; + char *p; + char entry[32]; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + p = entry; + if (append) { + rec = tdb_fetch(pppdb, key); + if (rec.dptr != NULL && rec.dsize > 0) { + rec.dptr[rec.dsize-1] = 0; + if (strstr(rec.dptr, db_key) != NULL) { + /* already in there? strange */ + warn("link entry already exists in tdb"); + return; + } + l = rec.dsize + strlen(entry); + p = malloc(l); + if (p == NULL) + novm("bundle link list"); + slprintf(p, l, "%s%s", rec.dptr, entry); + } else { + warn("bundle link list not found"); + } + if (rec.dptr != NULL) + free(rec.dptr); + } + rec.dptr = p; + rec.dsize = strlen(p) + 1; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't %s bundle link list", + append? "update": "create"); + if (p != entry) + free(p); +} + +static void remove_bundle_link() +{ + TDB_DATA key, rec; + char entry[32]; + char *p, *q; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + rec.dptr[rec.dsize-1] = 0; + p = strstr(rec.dptr, entry); + if (p != NULL) { + q = p + strlen(entry); + l = strlen(q) + 1; + memmove(p, q, l); + rec.dsize = p - rec.dptr + l; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't update bundle link list (removal)"); + } + free(rec.dptr); +} + +static void iterate_bundle_links(void (*func)(char *)) +{ + TDB_DATA key, rec, pp; + char *p, *q; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + error("bundle link list not found (iterating list)"); + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + p = rec.dptr; + p[rec.dsize-1] = 0; + while ((q = strchr(p, ';')) != NULL) { + *q = 0; + key.dptr = p; + key.dsize = q - p; + pp = tdb_fetch(pppdb, key); + if (pp.dptr != NULL && pp.dsize > 0) { + pp.dptr[pp.dsize-1] = 0; + func(pp.dptr); + } + if (pp.dptr != NULL) + free(pp.dptr); + p = q + 1; + } + free(rec.dptr); +} + +static int +parse_num(str, key, valp) + char *str; + const char *key; + int *valp; +{ + char *p, *endp; + int i; + + p = strstr(str, key); + if (p != 0) { + p += strlen(key); + i = strtol(p, &endp, 10); + if (endp != p && (*endp == 0 || *endp == ';')) { + *valp = i; + return 1; + } + } + return 0; +} + +/* + * Check whether the pppd identified by `key' still owns ppp unit `unit'. + */ +static int +owns_unit(key, unit) + TDB_DATA key; + int unit; +{ + char ifkey[32]; + TDB_DATA kd, vd; + int ret = 0; + + slprintf(ifkey, sizeof(ifkey), "IFNAME=ppp%d", unit); + kd.dptr = ifkey; + kd.dsize = strlen(ifkey); + vd = tdb_fetch(pppdb, kd); + if (vd.dptr != NULL) { + ret = vd.dsize == key.dsize + && memcmp(vd.dptr, key.dptr, vd.dsize) == 0; + free(vd.dptr); + } + return ret; +} + +static int +get_default_epdisc(ep) + struct epdisc *ep; +{ + char *p; + struct hostent *hp; + u32_t addr; + + /* First try for an ethernet MAC address */ + p = get_first_ethernet(); + if (p != 0 && get_if_hwaddr(ep->value, p) >= 0) { + ep->class = EPD_MAC; + ep->length = 6; + return 1; + } + + /* see if our hostname corresponds to a reasonable IP address */ + hp = gethostbyname(hostname); + if (hp != NULL) { + addr = *(u32_t *)hp->h_addr; + if (!bad_ip_adrs(addr)) { + addr = lwip_ntohl(addr); + if (!LOCAL_IP_ADDR(addr)) { + ep->class = EPD_IP; + set_ip_epdisc(ep, addr); + return 1; + } + } + } + + return 0; +} + +/* + * epdisc_to_str - make a printable string from an endpoint discriminator. + */ + +static char *endp_class_names[] = { + "null", "local", "IP", "MAC", "magic", "phone" +}; + +char * +epdisc_to_str(ep) + struct epdisc *ep; +{ + static char str[MAX_ENDP_LEN*3+8]; + u_char *p = ep->value; + int i, mask = 0; + char *q, c, c2; + + if (ep->class == EPD_NULL && ep->length == 0) + return "null"; + if (ep->class == EPD_IP && ep->length == 4) { + u32_t addr; + + GETLONG(addr, p); + slprintf(str, sizeof(str), "IP:%I", lwip_htonl(addr)); + return str; + } + + c = ':'; + c2 = '.'; + if (ep->class == EPD_MAC && ep->length == 6) + c2 = ':'; + else if (ep->class == EPD_MAGIC && (ep->length % 4) == 0) + mask = 3; + q = str; + if (ep->class <= EPD_PHONENUM) + q += slprintf(q, sizeof(str)-1, "%s", + endp_class_names[ep->class]); + else + q += slprintf(q, sizeof(str)-1, "%d", ep->class); + c = ':'; + for (i = 0; i < ep->length && i < MAX_ENDP_LEN; ++i) { + if ((i & mask) == 0) { + *q++ = c; + c = c2; + } + q += slprintf(q, str + sizeof(str) - q, "%.2x", ep->value[i]); + } + return str; +} + +static int hexc_val(int c) +{ + if (c >= 'a') + return c - 'a' + 10; + if (c >= 'A') + return c - 'A' + 10; + return c - '0'; +} + +int +str_to_epdisc(ep, str) + struct epdisc *ep; + char *str; +{ + int i, l; + char *p, *endp; + + for (i = EPD_NULL; i <= EPD_PHONENUM; ++i) { + int sl = strlen(endp_class_names[i]); + if (strncasecmp(str, endp_class_names[i], sl) == 0) { + str += sl; + break; + } + } + if (i > EPD_PHONENUM) { + /* not a class name, try a decimal class number */ + i = strtol(str, &endp, 10); + if (endp == str) + return 0; /* can't parse class number */ + str = endp; + } + ep->class = i; + if (*str == 0) { + ep->length = 0; + return 1; + } + if (*str != ':' && *str != '.') + return 0; + ++str; + + if (i == EPD_IP) { + u32_t addr; + i = parse_dotted_ip(str, &addr); + if (i == 0 || str[i] != 0) + return 0; + set_ip_epdisc(ep, addr); + return 1; + } + if (i == EPD_MAC && get_if_hwaddr(ep->value, str) >= 0) { + ep->length = 6; + return 1; + } + + p = str; + for (l = 0; l < MAX_ENDP_LEN; ++l) { + if (*str == 0) + break; + if (p <= str) + for (p = str; isxdigit(*p); ++p) + ; + i = p - str; + if (i == 0) + return 0; + ep->value[l] = hexc_val(*str++); + if ((i & 1) == 0) + ep->value[l] = (ep->value[l] << 4) + hexc_val(*str++); + if (*str == ':' || *str == '.') + ++str; + } + if (*str != 0 || (ep->class == EPD_MAC && l != 6)) + return 0; + ep->length = l; + return 1; +} + +#endif /* PPP_SUPPORT && HAVE_MULTILINK */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c new file mode 100644 index 000000000..a9c18e304 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c @@ -0,0 +1,1628 @@ +/***************************************************************************** +* ppp.c - Network Point to Point Protocol program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original. +*****************************************************************************/ + +/* + * ppp_defs.h - PPP definitions. + * + * if_pppvar.h - private structures and declarations for PPP. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + */ + +/* + * if_ppp.h - Point-to-Point Protocol definitions. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/** + * @defgroup ppp PPP + * @ingroup netifs + * @verbinclude "ppp.txt" + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/tcpip.h" +#include "lwip/api.h" +#include "lwip/snmp.h" +#include "lwip/ip4.h" /* for ip4_input() */ +#if PPP_IPV6_SUPPORT +#include "lwip/ip6.h" /* for ip6_input() */ +#endif /* PPP_IPV6_SUPPORT */ +#include "lwip/dns.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/magic.h" + +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" +#endif /* MPPE_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "netif/ppp/vj.h" +#endif /* VJ_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "netif/ppp/ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "netif/ppp/ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ + +/*************************/ +/*** LOCAL DEFINITIONS ***/ +/*************************/ + +/* Memory pools */ +#if PPPOS_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_PROTOTYPE(PPPAPI_MSG); +#endif +LWIP_MEMPOOL_DECLARE(PPP_PCB, MEMP_NUM_PPP_PCB, sizeof(ppp_pcb), "PPP_PCB") + +/* FIXME: add stats per PPP session */ +#if PPP_STATS_SUPPORT +static struct timeval start_time; /* Time when link was started. */ +static struct pppd_stats old_link_stats; +struct pppd_stats link_stats; +unsigned link_connect_time; +int link_stats_valid; +#endif /* PPP_STATS_SUPPORT */ + +/* + * PPP Data Link Layer "protocol" table. + * One entry per supported protocol. + * The last entry must be NULL. + */ +const struct protent* const protocols[] = { + &lcp_protent, +#if PAP_SUPPORT + &pap_protent, +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + &chap_protent, +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT + &cbcp_protent, +#endif /* CBCP_SUPPORT */ +#if PPP_IPV4_SUPPORT + &ipcp_protent, +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + &ipv6cp_protent, +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT + &ccp_protent, +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + &ecp_protent, +#endif /* ECP_SUPPORT */ +#ifdef AT_CHANGE + &atcp_protent, +#endif /* AT_CHANGE */ +#if EAP_SUPPORT + &eap_protent, +#endif /* EAP_SUPPORT */ + NULL +}; + +/* Prototypes for procedures local to this file. */ +static void ppp_do_connect(void *arg); +static err_t ppp_netif_init_cb(struct netif *netif); +#if PPP_IPV4_SUPPORT +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr); +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr); +#endif /* PPP_IPV6_SUPPORT */ +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol); + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ +#if PPP_AUTH_SUPPORT +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd) { + LWIP_ASSERT_CORE_LOCKED(); +#if PAP_SUPPORT + pcb->settings.refuse_pap = !(authtype & PPPAUTHTYPE_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + pcb->settings.refuse_chap = !(authtype & PPPAUTHTYPE_CHAP); +#if MSCHAP_SUPPORT + pcb->settings.refuse_mschap = !(authtype & PPPAUTHTYPE_MSCHAP); + pcb->settings.refuse_mschap_v2 = !(authtype & PPPAUTHTYPE_MSCHAP_V2); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + pcb->settings.refuse_eap = !(authtype & PPPAUTHTYPE_EAP); +#endif /* EAP_SUPPORT */ + pcb->settings.user = user; + pcb->settings.passwd = passwd; +} +#endif /* PPP_AUTH_SUPPORT */ + +#if MPPE_SUPPORT +/* Set MPPE configuration */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags) { + if (flags == PPP_MPPE_DISABLE) { + pcb->settings.require_mppe = 0; + return; + } + + pcb->settings.require_mppe = 1; + pcb->settings.refuse_mppe_stateful = !(flags & PPP_MPPE_ALLOW_STATEFUL); + pcb->settings.refuse_mppe_40 = !!(flags & PPP_MPPE_REFUSE_40); + pcb->settings.refuse_mppe_128 = !!(flags & PPP_MPPE_REFUSE_128); +} +#endif /* MPPE_SUPPORT */ + +#if PPP_NOTIFY_PHASE +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) { + pcb->notify_phase_cb = notify_phase_cb; + notify_phase_cb(pcb, pcb->phase, pcb->ctx_cb); +} +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_connect[%d]: holdoff=%d\n", pcb->netif->num, holdoff)); + + magic_randomize(); + + if (holdoff == 0) { + ppp_do_connect(pcb); + return ERR_OK; + } + + new_phase(pcb, PPP_PHASE_HOLDOFF); + sys_timeout((u32_t)(holdoff*1000), ppp_do_connect, pcb); + return ERR_OK; +} + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_listen[%d]\n", pcb->netif->num)); + + magic_randomize(); + + if (pcb->link_cb->listen) { + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->listen(pcb, pcb->link_ctx_cb); + return ERR_OK; + } + return ERR_IF; +} +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t +ppp_close(ppp_pcb *pcb, u8_t nocarrier) +{ + LWIP_ASSERT_CORE_LOCKED(); + + pcb->err_code = PPPERR_USER; + + /* holdoff phase, cancel the reconnection */ + if (pcb->phase == PPP_PHASE_HOLDOFF) { + sys_untimeout(ppp_do_connect, pcb); + new_phase(pcb, PPP_PHASE_DEAD); + } + + /* dead phase, nothing to do, call the status callback to be consistent */ + if (pcb->phase == PPP_PHASE_DEAD) { + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return ERR_OK; + } + + /* Already terminating, nothing to do */ + if (pcb->phase >= PPP_PHASE_TERMINATE) { + return ERR_INPROGRESS; + } + + /* LCP not open, close link protocol */ + if (pcb->phase < PPP_PHASE_ESTABLISH) { + new_phase(pcb, PPP_PHASE_DISCONNECT); + ppp_link_terminated(pcb); + return ERR_OK; + } + + /* + * Only accept carrier lost signal on the stable running phase in order + * to prevent changing the PPP phase FSM in transition phases. + * + * Always using nocarrier = 0 is still recommended, this is going to + * take a little longer time, but is a safer choice from FSM point of view. + */ + if (nocarrier && pcb->phase == PPP_PHASE_RUNNING) { + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: carrier lost -> lcp_lowerdown\n", pcb->netif->num)); + lcp_lowerdown(pcb); + /* forced link termination, this will force link protocol to disconnect. */ + link_terminated(pcb); + return ERR_OK; + } + + /* Disconnect */ + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: kill_link -> lcp_close\n", pcb->netif->num)); + /* LCP soft close request. */ + lcp_close(pcb, "User request"); + return ERR_OK; +} + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb) { + err_t err; + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_CONN; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_free[%d]\n", pcb->netif->num)); + + netif_remove(pcb->netif); + + err = pcb->link_cb->free(pcb, pcb->link_ctx_cb); + + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + return err; +} + +/* Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. */ +err_t +ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb == NULL) { + return ERR_VAL; + } + + switch(cmd) { + case PPPCTLG_UPSTATUS: /* Get the PPP up status. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(0 +#if PPP_IPV4_SUPPORT + || pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ); + return ERR_OK; + + case PPPCTLG_ERRCODE: /* Get the PPP error code. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(pcb->err_code); + return ERR_OK; + + default: + goto fail; + } + +fail: + return ERR_VAL; +} + + +/**********************************/ +/*** LOCAL FUNCTION DEFINITIONS ***/ +/**********************************/ + +static void ppp_do_connect(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + LWIP_ASSERT("pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF", pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF); + + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->connect(pcb, pcb->link_ctx_cb); +} + +/* + * ppp_netif_init_cb - netif init callback + */ +static err_t ppp_netif_init_cb(struct netif *netif) { + netif->name[0] = 'p'; + netif->name[1] = 'p'; +#if PPP_IPV4_SUPPORT + netif->output = ppp_netif_output_ip4; +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + netif->output_ip6 = ppp_netif_output_ip6; +#endif /* PPP_IPV6_SUPPORT */ + netif->flags = NETIF_FLAG_UP; +#if LWIP_NETIF_HOSTNAME + /* @todo: Initialize interface hostname */ + /* netif_set_hostname(netif, "lwip"); */ +#endif /* LWIP_NETIF_HOSTNAME */ + return ERR_OK; +} + +#if PPP_IPV4_SUPPORT +/* + * Send an IPv4 packet on the given connection. + */ +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IP); +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +/* + * Send an IPv6 packet on the given connection. + */ +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IPV6); +} +#endif /* PPP_IPV6_SUPPORT */ + +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol) { + ppp_pcb *pcb = (ppp_pcb*)netif->state; + err_t err; + struct pbuf *fpb = NULL; + + /* Check that the link is up. */ + if (0 +#if PPP_IPV4_SUPPORT + || (protocol == PPP_IP && !pcb->if4_up) +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || (protocol == PPP_IPV6 && !pcb->if6_up) +#endif /* PPP_IPV6_SUPPORT */ + ) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: link not up\n", pcb->netif->num)); + goto err_rte_drop; + } + +#if MPPE_SUPPORT + /* If MPPE is required, refuse any IP packet until we are able to crypt them. */ + if (pcb->settings.require_mppe && pcb->ccp_transmit_method != CI_MPPE) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: MPPE required, not up\n", pcb->netif->num)); + goto err_rte_drop; + } +#endif /* MPPE_SUPPORT */ + +#if VJ_SUPPORT + /* + * Attempt Van Jacobson header compression if VJ is configured and + * this is an IP packet. + */ + if (protocol == PPP_IP && pcb->vj_enabled) { + switch (vj_compress_tcp(&pcb->vj_comp, &pb)) { + case TYPE_IP: + /* No change... + protocol = PPP_IP; */ + break; + case TYPE_COMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_COMP; + break; + case TYPE_UNCOMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_UNCOMP; + break; + default: + PPPDEBUG(LOG_WARNING, ("ppp_netif_output[%d]: bad IP packet\n", pcb->netif->num)); + LINK_STATS_INC(link.proterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifoutdiscards); + return ERR_VAL; + } + } +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + switch (pcb->ccp_transmit_method) { + case 0: + break; /* Don't compress */ +#if MPPE_SUPPORT + case CI_MPPE: + if ((err = mppe_compress(pcb, &pcb->mppe_comp, &pb, protocol)) != ERR_OK) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + goto err; + } + /* if VJ compressor returned a new allocated pbuf, free it */ + if (fpb) { + pbuf_free(fpb); + } + /* mppe_compress() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_COMP; + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: bad CCP transmit method\n", pcb->netif->num)); + goto err_rte_drop; /* Cannot really happen, we only negotiate what we are able to do */ + } +#endif /* CCP_SUPPORT */ + + err = pcb->link_cb->netif_output(pcb, pcb->link_ctx_cb, pb, protocol); + goto err; + +err_rte_drop: + err = ERR_RTE; + LINK_STATS_INC(link.rterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); +err: + if (fpb) { + pbuf_free(fpb); + } + return err; +} + +/************************************/ +/*** PRIVATE FUNCTION DEFINITIONS ***/ +/************************************/ + +/* Initialize the PPP subsystem. */ +int ppp_init(void) +{ +#if PPPOS_SUPPORT + LWIP_MEMPOOL_INIT(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT + LWIP_MEMPOOL_INIT(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT + LWIP_MEMPOOL_INIT(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE + LWIP_MEMPOOL_INIT(PPPAPI_MSG); +#endif + + LWIP_MEMPOOL_INIT(PPP_PCB); + + /* + * Initialize magic number generator now so that protocols may + * use magic numbers in initialization. + */ + magic_init(); + + return 0; +} + +/* + * Create a new PPP control block. + * + * This initializes the PPP control block but does not + * attempt to negotiate the LCP session. + * + * Return a new PPP connection control block pointer + * on success or a null pointer on failure. + */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *pcb; + const struct protent *protp; + int i; + + /* PPP is single-threaded: without a callback, + * there is no way to know when the link is up. */ + if (link_status_cb == NULL) { + return NULL; + } + + pcb = (ppp_pcb*)LWIP_MEMPOOL_ALLOC(PPP_PCB); + if (pcb == NULL) { + return NULL; + } + + memset(pcb, 0, sizeof(ppp_pcb)); + + /* default configuration */ +#if PAP_SUPPORT + pcb->settings.pap_timeout_time = UPAP_DEFTIMEOUT; + pcb->settings.pap_max_transmits = UPAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.pap_req_timeout = UPAP_DEFREQTIME; +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + pcb->settings.chap_timeout_time = CHAP_DEFTIMEOUT; + pcb->settings.chap_max_transmits = CHAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.chap_rechallenge_time = CHAP_DEFRECHALLENGETIME; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + pcb->settings.eap_req_time = EAP_DEFREQTIME; + pcb->settings.eap_allow_req = EAP_DEFALLOWREQ; +#if PPP_SERVER + pcb->settings.eap_timeout_time = EAP_DEFTIMEOUT; + pcb->settings.eap_max_transmits = EAP_DEFTRANSMITS; +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + + pcb->settings.lcp_loopbackfail = LCP_DEFLOOPBACKFAIL; + pcb->settings.lcp_echo_interval = LCP_ECHOINTERVAL; + pcb->settings.lcp_echo_fails = LCP_MAXECHOFAILS; + + pcb->settings.fsm_timeout_time = FSM_DEFTIMEOUT; + pcb->settings.fsm_max_conf_req_transmits = FSM_DEFMAXCONFREQS; + pcb->settings.fsm_max_term_transmits = FSM_DEFMAXTERMREQS; + pcb->settings.fsm_max_nak_loops = FSM_DEFMAXNAKLOOPS; + + pcb->netif = pppif; + MIB2_INIT_NETIF(pppif, snmp_ifType_ppp, 0); + if (!netif_add(pcb->netif, +#if LWIP_IPV4 + IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4, +#endif /* LWIP_IPV4 */ + (void *)pcb, ppp_netif_init_cb, NULL)) { + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + PPPDEBUG(LOG_ERR, ("ppp_new: netif_add failed\n")); + return NULL; + } + + pcb->link_cb = callbacks; + pcb->link_ctx_cb = link_ctx_cb; + pcb->link_status_cb = link_status_cb; + pcb->ctx_cb = ctx_cb; + + /* + * Initialize each protocol. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + (*protp->init)(pcb); + } + + new_phase(pcb, PPP_PHASE_DEAD); + return pcb; +} + +/** Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]\n", pcb->netif->num)); + + /* Clean data not taken care by anything else, mostly shared data. */ +#if PPP_STATS_SUPPORT + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ +#if MPPE_SUPPORT + pcb->mppe_keys_set = 0; + memset(&pcb->mppe_comp, 0, sizeof(pcb->mppe_comp)); + memset(&pcb->mppe_decomp, 0, sizeof(pcb->mppe_decomp)); +#endif /* MPPE_SUPPORT */ +#if VJ_SUPPORT + vj_compress_init(&pcb->vj_comp); +#endif /* VJ_SUPPORT */ + + /* Start protocol */ + new_phase(pcb, PPP_PHASE_ESTABLISH); + lcp_open(pcb); + lcp_lowerup(pcb); + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]: finished\n", pcb->netif->num)); +} + +/** Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_failed[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + pcb->err_code = PPPERR_OPEN; + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/** Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_end[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + if (pcb->err_code == PPPERR_NONE) { + pcb->err_code = PPPERR_CONNECT; + } + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/* + * Pass the processed input packet to the appropriate handler. + * This function and all handlers run in the context of the tcpip_thread + */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb) { + u16_t protocol; +#if PPP_DEBUG && PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_DEBUG && PPP_PROTOCOLNAME */ + + magic_randomize(); + + if (pb->len < 2) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: packet too short\n", pcb->netif->num)); + goto drop; + } + protocol = (((u8_t *)pb->payload)[0] << 8) | ((u8_t*)pb->payload)[1]; + +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "rcvd", (unsigned char *)pb->payload, pb->len); +#endif /* PRINTPKT_SUPPORT */ + + pbuf_remove_header(pb, sizeof(protocol)); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_INC(pcb->netif, ifinucastpkts); + MIB2_STATS_NETIF_ADD(pcb->netif, ifinoctets, pb->tot_len); + + /* + * Toss all non-LCP packets unless LCP is OPEN. + */ + if (protocol != PPP_LCP && pcb->lcp_fsm.state != PPP_FSM_OPENED) { + ppp_dbglog("Discarded non-LCP packet when LCP not open"); + goto drop; + } + + /* + * Until we get past the authentication phase, toss all packets + * except LCP, LQR and authentication packets. + */ + if (pcb->phase <= PPP_PHASE_AUTHENTICATE + && !(protocol == PPP_LCP +#if LQR_SUPPORT + || protocol == PPP_LQR +#endif /* LQR_SUPPORT */ +#if PAP_SUPPORT + || protocol == PPP_PAP +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || protocol == PPP_CHAP +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || protocol == PPP_EAP +#endif /* EAP_SUPPORT */ + )) { + ppp_dbglog("discarding proto 0x%x in phase %d", protocol, pcb->phase); + goto drop; + } + +#if CCP_SUPPORT +#if MPPE_SUPPORT + /* + * MPPE is required and unencrypted data has arrived (this + * should never happen!). We should probably drop the link if + * the protocol is in the range of what should be encrypted. + * At the least, we drop this packet. + */ + if (pcb->settings.require_mppe && protocol != PPP_COMP && protocol < 0x8000) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: MPPE required, received unencrypted data!\n", pcb->netif->num)); + goto drop; + } +#endif /* MPPE_SUPPORT */ + + if (protocol == PPP_COMP) { + u8_t *pl; + + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + if (mppe_decompress(pcb, &pcb->mppe_decomp, &pb) != ERR_OK) { + goto drop; + } + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: bad CCP receive method\n", pcb->netif->num)); + goto drop; /* Cannot really happen, we only negotiate what we are able to do */ + } + + /* Assume no PFC */ + if (pb->len < 2) { + goto drop; + } + + /* Extract and hide protocol (do PFC decompression if necessary) */ + pl = (u8_t*)pb->payload; + if (pl[0] & 0x01) { + protocol = pl[0]; + pbuf_remove_header(pb, 1); + } else { + protocol = (pl[0] << 8) | pl[1]; + pbuf_remove_header(pb, 2); + } + } +#endif /* CCP_SUPPORT */ + + switch(protocol) { + +#if PPP_IPV4_SUPPORT + case PPP_IP: /* Internet Protocol */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip4_input(pb, pcb->netif); + return; +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + case PPP_IPV6: /* Internet Protocol Version 6 */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip6 in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip6_input(pb, pcb->netif); + return; +#endif /* PPP_IPV6_SUPPORT */ + +#if VJ_SUPPORT + case PPP_VJC_COMP: /* VJ compressed TCP */ + /* + * Clip off the VJ header and prepend the rebuilt TCP/IP header and + * pass the result to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_comp in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_tcp(&pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ compressed\n", pcb->netif->num)); + break; + + case PPP_VJC_UNCOMP: /* VJ uncompressed TCP */ + /* + * Process the TCP/IP header for VJ header compression and then pass + * the packet to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_un in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_uncomp(pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ uncompressed\n", pcb->netif->num)); + break; +#endif /* VJ_SUPPORT */ + + default: { + int i; + const struct protent *protp; + + /* + * Upcall the proper protocol input routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol == protocol) { + pb = pbuf_coalesce(pb, PBUF_RAW); + (*protp->input)(pcb, (u8_t*)pb->payload, pb->len); + goto out; + } +#if 0 /* UNUSED + * + * This is actually a (hacked?) way for the Linux kernel to pass a data + * packet to pppd. pppd in normal condition only do signaling + * (LCP, PAP, CHAP, IPCP, ...) and does not handle any data packet at all. + * + * We don't even need this interface, which is only there because of PPP + * interface limitation between Linux kernel and pppd. For MPPE, which uses + * CCP to negotiate although it is not really a (de)compressor, we added + * ccp_resetrequest() in CCP and MPPE input data flow is calling either + * ccp_resetrequest() or lcp_close() if the issue is, respectively, non-fatal + * or fatal, this is what ccp_datainput() really do. + */ + if (protocol == (protp->protocol & ~0x8000) + && protp->datainput != NULL) { + (*protp->datainput)(pcb, pb->payload, pb->len); + goto out; + } +#endif /* UNUSED */ + } + +#if PPP_DEBUG +#if PPP_PROTOCOLNAME + pname = protocol_name(protocol); + if (pname != NULL) { + ppp_warn("Unsupported protocol '%s' (0x%x) received", pname, protocol); + } else +#endif /* PPP_PROTOCOLNAME */ + ppp_warn("Unsupported protocol 0x%x received", protocol); +#endif /* PPP_DEBUG */ + if (pbuf_add_header(pb, sizeof(protocol))) { + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping (pbuf_add_header failed)\n", pcb->netif->num)); + goto drop; + } + lcp_sprotrej(pcb, (u8_t*)pb->payload, pb->len); + } + break; + } + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifindiscards); + +out: + pbuf_free(pb); +} + +/* + * Write a pbuf to a ppp link, only used from PPP functions + * to send PPP packets. + * + * IPv4 and IPv6 packets from lwIP are sent, respectively, + * with ppp_netif_output_ip4() and ppp_netif_output_ip6() + * functions (which are callbacks of the netif PPP interface). + */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p) { +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "sent", (unsigned char *)p->payload+2, p->len-2); +#endif /* PRINTPKT_SUPPORT */ + return pcb->link_cb->write(pcb, pcb->link_ctx_cb, p); +} + +void ppp_link_terminated(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]\n", pcb->netif->num)); + pcb->link_cb->disconnect(pcb, pcb->link_ctx_cb); + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]: finished.\n", pcb->netif->num)); +} + + +/************************************************************************ + * Functions called by various PPP subsystems to configure + * the PPP interface or change the PPP phase. + */ + +/* + * new_phase - signal the start of a new phase of pppd's operation. + */ +void new_phase(ppp_pcb *pcb, int p) { + pcb->phase = p; + PPPDEBUG(LOG_DEBUG, ("ppp phase changed[%d]: phase=%d\n", pcb->netif->num, pcb->phase)); +#if PPP_NOTIFY_PHASE + if (pcb->notify_phase_cb != NULL) { + pcb->notify_phase_cb(pcb, p, pcb->ctx_cb); + } +#endif /* PPP_NOTIFY_PHASE */ +} + +/* + * ppp_send_config - configure the transmit-side characteristics of + * the ppp interface. + */ +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mtu); + /* pcb->mtu = mtu; -- set correctly with netif_set_mtu */ + + if (pcb->link_cb->send_config) { + pcb->link_cb->send_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_send_config[%d]\n", pcb->netif->num) ); + return 0; +} + +/* + * ppp_recv_config - configure the receive-side characteristics of + * the ppp interface. + */ +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mru); + + if (pcb->link_cb->recv_config) { + pcb->link_cb->recv_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_recv_config[%d]\n", pcb->netif->num)); + return 0; +} + +#if PPP_IPV4_SUPPORT +/* + * sifaddr - Config the interface IP addresses and netmask. + */ +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask) { + ip4_addr_t ip, nm, gw; + + ip4_addr_set_u32(&ip, our_adr); + ip4_addr_set_u32(&nm, netmask); + ip4_addr_set_u32(&gw, his_adr); + netif_set_addr(pcb->netif, &ip, &nm, &gw); + return 1; +} + +/******************************************************************** + * + * cifaddr - Clear the interface IP addresses, and delete routes + * through the interface if possible. + */ +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr) { + LWIP_UNUSED_ARG(our_adr); + LWIP_UNUSED_ARG(his_adr); + + netif_set_addr(pcb->netif, IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4); + return 1; +} + +#if 0 /* UNUSED - PROXY ARP */ +/******************************************************************** + * + * sifproxyarp - Make a proxy ARP entry for the peer. + */ + +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} + +/******************************************************************** + * + * cifproxyarp - Delete the proxy ARP entry for the peer. + */ + +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} +#endif /* UNUSED - PROXY ARP */ + +#if LWIP_DNS +/* + * sdns - Config the DNS servers + */ +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + ip_addr_t ns; + LWIP_UNUSED_ARG(pcb); + + ip_addr_set_ip4_u32_val(ns, ns1); + dns_setserver(0, &ns); + ip_addr_set_ip4_u32_val(ns, ns2); + dns_setserver(1, &ns); + return 1; +} + +/******************************************************************** + * + * cdns - Clear the DNS servers + */ +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + const ip_addr_t *nsa; + ip_addr_t nsb; + LWIP_UNUSED_ARG(pcb); + + nsa = dns_getserver(0); + ip_addr_set_ip4_u32_val(nsb, ns1); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(0, IP_ADDR_ANY); + } + nsa = dns_getserver(1); + ip_addr_set_ip4_u32_val(nsb, ns2); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(1, IP_ADDR_ANY); + } + return 1; +} +#endif /* LWIP_DNS */ + +#if VJ_SUPPORT +/******************************************************************** + * + * sifvjcomp - config tcp header compression + */ +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid) { + pcb->vj_enabled = vjcomp; + pcb->vj_comp.compressSlot = cidcomp; + pcb->vj_comp.maxSlotIndex = maxcid; + PPPDEBUG(LOG_INFO, ("sifvjcomp[%d]: VJ compress enable=%d slot=%d max slot=%d\n", + pcb->netif->num, vjcomp, cidcomp, maxcid)); + return 0; +} +#endif /* VJ_SUPPORT */ + +/* + * sifup - Config the interface up and enable IP packets to pass. + */ +int sifup(ppp_pcb *pcb) { + pcb->if4_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sifup[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sifdown - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sifdown(ppp_pcb *pcb) { + + pcb->if4_up = 0; + + if (1 +#if PPP_IPV6_SUPPORT + /* set the interface down if IPv6 is down as well */ + && !pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sifdown[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} + +/******************************************************************** + * + * Return user specified netmask, modified by any mask we might determine + * for address `addr' (in network byte order). + * Here we scan through the system's list of interfaces, looking for + * any non-point-to-point interfaces which might appear to be on the same + * network as `addr'. If we find any, we OR in their netmask to the + * user-specified netmask. + */ +u32_t get_mask(u32_t addr) { +#if 0 + u32_t mask, nmask; + + addr = lwip_htonl(addr); + if (IP_CLASSA(addr)) { /* determine network mask for address class */ + nmask = IP_CLASSA_NET; + } else if (IP_CLASSB(addr)) { + nmask = IP_CLASSB_NET; + } else { + nmask = IP_CLASSC_NET; + } + + /* class D nets are disallowed by bad_ip_adrs */ + mask = PP_HTONL(0xffffff00UL) | lwip_htonl(nmask); + + /* XXX + * Scan through the system's network interfaces. + * Get each netmask and OR them into our mask. + */ + /* return mask; */ + return mask; +#endif /* 0 */ + LWIP_UNUSED_ARG(addr); + return IPADDR_BROADCAST; +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +#define IN6_LLADDR_FROM_EUI64(ip6, eui64) do { \ + ip6.addr[0] = PP_HTONL(0xfe800000); \ + ip6.addr[1] = 0; \ + eui64_copy(eui64, ip6.addr[2]); \ + } while (0) + +/******************************************************************** + * + * sif6addr - Config the interface with an IPv6 link-local address + */ +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + ip6_addr_t ip6; + LWIP_UNUSED_ARG(his_eui64); + + IN6_LLADDR_FROM_EUI64(ip6, our_eui64); + netif_ip6_addr_set(pcb->netif, 0, &ip6); + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_PREFERRED); + /* FIXME: should we add an IPv6 static neighbor using his_eui64 ? */ + return 1; +} + +/******************************************************************** + * + * cif6addr - Remove IPv6 address from interface + */ +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + LWIP_UNUSED_ARG(our_eui64); + LWIP_UNUSED_ARG(his_eui64); + + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_INVALID); + netif_ip6_addr_set(pcb->netif, 0, IP6_ADDR_ANY6); + return 1; +} + +/* + * sif6up - Config the interface up and enable IPv6 packets to pass. + */ +int sif6up(ppp_pcb *pcb) { + + pcb->if6_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sif6up[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sif6down - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sif6down(ppp_pcb *pcb) { + + pcb->if6_up = 0; + + if (1 +#if PPP_IPV4_SUPPORT + /* set the interface down if IPv4 is down as well */ + && !pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sif6down[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * sifnpmode - Set the mode for handling packets for a given NP. + */ +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(proto); + LWIP_UNUSED_ARG(mode); + return 0; +} +#endif /* DEMAND_SUPPORT */ + +/* + * netif_set_mtu - set the MTU on the PPP network interface. + */ +void netif_set_mtu(ppp_pcb *pcb, int mtu) { + + pcb->netif->mtu = mtu; + PPPDEBUG(LOG_INFO, ("netif_set_mtu[%d]: mtu=%d\n", pcb->netif->num, mtu)); +} + +/* + * netif_get_mtu - get PPP interface MTU + */ +int netif_get_mtu(ppp_pcb *pcb) { + + return pcb->netif->mtu; +} + +#if CCP_SUPPORT +#if 0 /* unused */ +/* + * ccp_test - whether a given compression method is acceptable for use. + */ +int +ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(opt_ptr); + LWIP_UNUSED_ARG(opt_len); + LWIP_UNUSED_ARG(for_transmit); + return -1; +} +#endif /* unused */ + +/* + * ccp_set - inform about the current state of CCP. + */ +void +ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method) +{ + LWIP_UNUSED_ARG(isopen); + LWIP_UNUSED_ARG(isup); + pcb->ccp_receive_method = receive_method; + pcb->ccp_transmit_method = transmit_method; + PPPDEBUG(LOG_DEBUG, ("ccp_set[%d]: is_open=%d, is_up=%d, receive_method=%u, transmit_method=%u\n", + pcb->netif->num, isopen, isup, receive_method, transmit_method)); +} + +void +ccp_reset_comp(ppp_pcb *pcb) +{ + switch (pcb->ccp_transmit_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_comp_reset(pcb, &pcb->mppe_comp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +void +ccp_reset_decomp(ppp_pcb *pcb) +{ + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_decomp_reset(pcb, &pcb->mppe_decomp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +#if 0 /* unused */ +/* + * ccp_fatal_error - returns 1 if decompression was disabled as a + * result of an error detected after decompression of a packet, + * 0 otherwise. This is necessary because of patent nonsense. + */ +int +ccp_fatal_error(ppp_pcb *pcb) +{ + LWIP_UNUSED_ARG(pcb); + return 1; +} +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/******************************************************************** + * + * get_idle_time - return how long the link has been idle. + */ +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip) { + /* FIXME: add idle time support and make it optional */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(ip); + return 1; +} +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +/******************************************************************** + * + * get_loop_output - get outgoing packets from the ppp device, + * and detect when we want to bring the real link up. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int get_loop_output(void) { + return 0; +} +#endif /* DEMAND_SUPPORT */ + +#if PPP_PROTOCOLNAME +/* List of protocol names, to make our messages a little more informative. */ +struct protocol_list { + u_short proto; + const char *name; +} const protocol_list[] = { + { 0x21, "IP" }, + { 0x23, "OSI Network Layer" }, + { 0x25, "Xerox NS IDP" }, + { 0x27, "DECnet Phase IV" }, + { 0x29, "Appletalk" }, + { 0x2b, "Novell IPX" }, + { 0x2d, "VJ compressed TCP/IP" }, + { 0x2f, "VJ uncompressed TCP/IP" }, + { 0x31, "Bridging PDU" }, + { 0x33, "Stream Protocol ST-II" }, + { 0x35, "Banyan Vines" }, + { 0x39, "AppleTalk EDDP" }, + { 0x3b, "AppleTalk SmartBuffered" }, + { 0x3d, "Multi-Link" }, + { 0x3f, "NETBIOS Framing" }, + { 0x41, "Cisco Systems" }, + { 0x43, "Ascom Timeplex" }, + { 0x45, "Fujitsu Link Backup and Load Balancing (LBLB)" }, + { 0x47, "DCA Remote Lan" }, + { 0x49, "Serial Data Transport Protocol (PPP-SDTP)" }, + { 0x4b, "SNA over 802.2" }, + { 0x4d, "SNA" }, + { 0x4f, "IP6 Header Compression" }, + { 0x51, "KNX Bridging Data" }, + { 0x53, "Encryption" }, + { 0x55, "Individual Link Encryption" }, + { 0x57, "IPv6" }, + { 0x59, "PPP Muxing" }, + { 0x5b, "Vendor-Specific Network Protocol" }, + { 0x61, "RTP IPHC Full Header" }, + { 0x63, "RTP IPHC Compressed TCP" }, + { 0x65, "RTP IPHC Compressed non-TCP" }, + { 0x67, "RTP IPHC Compressed UDP 8" }, + { 0x69, "RTP IPHC Compressed RTP 8" }, + { 0x6f, "Stampede Bridging" }, + { 0x73, "MP+" }, + { 0xc1, "NTCITS IPI" }, + { 0xfb, "single-link compression" }, + { 0xfd, "Compressed Datagram" }, + { 0x0201, "802.1d Hello Packets" }, + { 0x0203, "IBM Source Routing BPDU" }, + { 0x0205, "DEC LANBridge100 Spanning Tree" }, + { 0x0207, "Cisco Discovery Protocol" }, + { 0x0209, "Netcs Twin Routing" }, + { 0x020b, "STP - Scheduled Transfer Protocol" }, + { 0x020d, "EDP - Extreme Discovery Protocol" }, + { 0x0211, "Optical Supervisory Channel Protocol" }, + { 0x0213, "Optical Supervisory Channel Protocol" }, + { 0x0231, "Luxcom" }, + { 0x0233, "Sigma Network Systems" }, + { 0x0235, "Apple Client Server Protocol" }, + { 0x0281, "MPLS Unicast" }, + { 0x0283, "MPLS Multicast" }, + { 0x0285, "IEEE p1284.4 standard - data packets" }, + { 0x0287, "ETSI TETRA Network Protocol Type 1" }, + { 0x0289, "Multichannel Flow Treatment Protocol" }, + { 0x2063, "RTP IPHC Compressed TCP No Delta" }, + { 0x2065, "RTP IPHC Context State" }, + { 0x2067, "RTP IPHC Compressed UDP 16" }, + { 0x2069, "RTP IPHC Compressed RTP 16" }, + { 0x4001, "Cray Communications Control Protocol" }, + { 0x4003, "CDPD Mobile Network Registration Protocol" }, + { 0x4005, "Expand accelerator protocol" }, + { 0x4007, "ODSICP NCP" }, + { 0x4009, "DOCSIS DLL" }, + { 0x400B, "Cetacean Network Detection Protocol" }, + { 0x4021, "Stacker LZS" }, + { 0x4023, "RefTek Protocol" }, + { 0x4025, "Fibre Channel" }, + { 0x4027, "EMIT Protocols" }, + { 0x405b, "Vendor-Specific Protocol (VSP)" }, + { 0x8021, "Internet Protocol Control Protocol" }, + { 0x8023, "OSI Network Layer Control Protocol" }, + { 0x8025, "Xerox NS IDP Control Protocol" }, + { 0x8027, "DECnet Phase IV Control Protocol" }, + { 0x8029, "Appletalk Control Protocol" }, + { 0x802b, "Novell IPX Control Protocol" }, + { 0x8031, "Bridging NCP" }, + { 0x8033, "Stream Protocol Control Protocol" }, + { 0x8035, "Banyan Vines Control Protocol" }, + { 0x803d, "Multi-Link Control Protocol" }, + { 0x803f, "NETBIOS Framing Control Protocol" }, + { 0x8041, "Cisco Systems Control Protocol" }, + { 0x8043, "Ascom Timeplex" }, + { 0x8045, "Fujitsu LBLB Control Protocol" }, + { 0x8047, "DCA Remote Lan Network Control Protocol (RLNCP)" }, + { 0x8049, "Serial Data Control Protocol (PPP-SDCP)" }, + { 0x804b, "SNA over 802.2 Control Protocol" }, + { 0x804d, "SNA Control Protocol" }, + { 0x804f, "IP6 Header Compression Control Protocol" }, + { 0x8051, "KNX Bridging Control Protocol" }, + { 0x8053, "Encryption Control Protocol" }, + { 0x8055, "Individual Link Encryption Control Protocol" }, + { 0x8057, "IPv6 Control Protocol" }, + { 0x8059, "PPP Muxing Control Protocol" }, + { 0x805b, "Vendor-Specific Network Control Protocol (VSNCP)" }, + { 0x806f, "Stampede Bridging Control Protocol" }, + { 0x8073, "MP+ Control Protocol" }, + { 0x80c1, "NTCITS IPI Control Protocol" }, + { 0x80fb, "Single Link Compression Control Protocol" }, + { 0x80fd, "Compression Control Protocol" }, + { 0x8207, "Cisco Discovery Protocol Control" }, + { 0x8209, "Netcs Twin Routing" }, + { 0x820b, "STP - Control Protocol" }, + { 0x820d, "EDPCP - Extreme Discovery Protocol Ctrl Prtcl" }, + { 0x8235, "Apple Client Server Protocol Control" }, + { 0x8281, "MPLSCP" }, + { 0x8285, "IEEE p1284.4 standard - Protocol Control" }, + { 0x8287, "ETSI TETRA TNP1 Control Protocol" }, + { 0x8289, "Multichannel Flow Treatment Protocol" }, + { 0xc021, "Link Control Protocol" }, + { 0xc023, "Password Authentication Protocol" }, + { 0xc025, "Link Quality Report" }, + { 0xc027, "Shiva Password Authentication Protocol" }, + { 0xc029, "CallBack Control Protocol (CBCP)" }, + { 0xc02b, "BACP Bandwidth Allocation Control Protocol" }, + { 0xc02d, "BAP" }, + { 0xc05b, "Vendor-Specific Authentication Protocol (VSAP)" }, + { 0xc081, "Container Control Protocol" }, + { 0xc223, "Challenge Handshake Authentication Protocol" }, + { 0xc225, "RSA Authentication Protocol" }, + { 0xc227, "Extensible Authentication Protocol" }, + { 0xc229, "Mitsubishi Security Info Exch Ptcl (SIEP)" }, + { 0xc26f, "Stampede Bridging Authorization Protocol" }, + { 0xc281, "Proprietary Authentication Protocol" }, + { 0xc283, "Proprietary Authentication Protocol" }, + { 0xc481, "Proprietary Node ID Authentication Protocol" }, + { 0, NULL }, +}; + +/* + * protocol_name - find a name for a PPP protocol. + */ +const char * protocol_name(int proto) { + const struct protocol_list *lp; + + for (lp = protocol_list; lp->proto != 0; ++lp) { + if (proto == lp->proto) { + return lp->name; + } + } + return NULL; +} +#endif /* PPP_PROTOCOLNAME */ + +#if PPP_STATS_SUPPORT + +/* ---- Note on PPP Stats support ---- + * + * The one willing link stats support should add the get_ppp_stats() + * to fetch statistics from lwIP. + */ + +/* + * reset_link_stats - "reset" stats when link goes up. + */ +void reset_link_stats(int u) { + if (!get_ppp_stats(u, &old_link_stats)) { + return; + } + gettimeofday(&start_time, NULL); +} + +/* + * update_link_stats - get stats at link termination. + */ +void update_link_stats(int u) { + struct timeval now; + char numbuf[32]; + + if (!get_ppp_stats(u, &link_stats) || gettimeofday(&now, NULL) < 0) { + return; + } + link_connect_time = now.tv_sec - start_time.tv_sec; + link_stats_valid = 1; + + link_stats.bytes_in -= old_link_stats.bytes_in; + link_stats.bytes_out -= old_link_stats.bytes_out; + link_stats.pkts_in -= old_link_stats.pkts_in; + link_stats.pkts_out -= old_link_stats.pkts_out; +} + +void print_link_stats() { + /* + * Print connect time and statistics. + */ + if (link_stats_valid) { + int t = (link_connect_time + 5) / 6; /* 1/10ths of minutes */ + info("Connect time %d.%d minutes.", t/10, t%10); + info("Sent %u bytes, received %u bytes.", link_stats.bytes_out, link_stats.bytes_in); + link_stats_valid = 0; + } +} +#endif /* PPP_STATS_SUPPORT */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c new file mode 100644 index 000000000..947f7ba8c --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c @@ -0,0 +1,427 @@ +/** + * @file + * Point To Point Protocol Sequential API module + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/pppapi.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/pppoe.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppos.h" + +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_DECLARE(PPPAPI_MSG, MEMP_NUM_PPP_API_MSG, sizeof(struct pppapi_msg), "PPPAPI_MSG") +#endif + +#define PPPAPI_VAR_REF(name) API_VAR_REF(name) +#define PPPAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct pppapi_msg, name) +#define PPPAPI_VAR_ALLOC(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, ERR_MEM) +#define PPPAPI_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, NULL) +#define PPPAPI_VAR_FREE(name) API_VAR_FREE_POOL(PPPAPI_MSG, name) + +/** + * Call ppp_set_default() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_default(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_default(msg->msg.ppp); + return ERR_OK; +} + +/** + * Call ppp_set_default() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_default(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_set_default, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_NOTIFY_PHASE +/** + * Call ppp_set_notify_phase_callback() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_notify_phase_callback(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_notify_phase_callback(msg->msg.ppp, msg->msg.msg.setnotifyphasecb.notify_phase_cb); + return ERR_OK; +} + +/** + * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb; + err = tcpip_api_call(pppapi_do_ppp_set_notify_phase_callback, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_NOTIFY_PHASE */ + + +#if PPPOS_SUPPORT +/** + * Call pppos_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppos_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppos_create(msg->msg.msg.serialcreate.pppif, msg->msg.msg.serialcreate.output_cb, + msg->msg.msg.serialcreate.link_status_cb, msg->msg.msg.serialcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppos_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.output_cb = output_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppos_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOS_SUPPORT */ + + +#if PPPOE_SUPPORT +/** + * Call pppoe_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppoe_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppoe_create(msg->msg.msg.ethernetcreate.pppif, msg->msg.msg.ethernetcreate.ethif, + msg->msg.msg.ethernetcreate.service_name, msg->msg.msg.ethernetcreate.concentrator_name, + msg->msg.msg.ethernetcreate.link_status_cb, msg->msg.msg.ethernetcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppoe_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ethif = ethif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.service_name = service_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.concentrator_name = concentrator_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppoe_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOE_SUPPORT */ + + +#if PPPOL2TP_SUPPORT +/** + * Call pppol2tp_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppol2tp_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppol2tp_create(msg->msg.msg.l2tpcreate.pppif, + msg->msg.msg.l2tpcreate.netif, API_EXPR_REF(msg->msg.msg.l2tpcreate.ipaddr), msg->msg.msg.l2tpcreate.port, +#if PPPOL2TP_AUTH_SUPPORT + msg->msg.msg.l2tpcreate.secret, + msg->msg.msg.l2tpcreate.secret_len, +#else /* PPPOL2TP_AUTH_SUPPORT */ + NULL, + 0, +#endif /* PPPOL2TP_AUTH_SUPPORT */ + msg->msg.msg.l2tpcreate.link_status_cb, msg->msg.msg.l2tpcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppol2tp_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.netif = netif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ipaddr = PPPAPI_VAR_REF(ipaddr); + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.port = port; +#if PPPOL2TP_AUTH_SUPPORT + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret = secret; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppol2tp_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOL2TP_SUPPORT */ + + +/** + * Call ppp_connect() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_connect(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_connect(msg->msg.ppp, msg->msg.msg.connect.holdoff); +} + +/** + * Call ppp_connect() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_connect(ppp_pcb *pcb, u16_t holdoff) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.connect.holdoff = holdoff; + err = tcpip_api_call(pppapi_do_ppp_connect, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_SERVER +/** + * Call ppp_listen() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_listen(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_listen(msg->msg.ppp); +} + +/** + * Call ppp_listen() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_listen(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_listen, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_SERVER */ + + +/** + * Call ppp_close() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_close(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_close(msg->msg.ppp, msg->msg.msg.close.nocarrier); +} + +/** + * Call ppp_close() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_close(ppp_pcb *pcb, u8_t nocarrier) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.close.nocarrier = nocarrier; + err = tcpip_api_call(pppapi_do_ppp_close, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_free() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_free(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_free(msg->msg.ppp); +} + +/** + * Call ppp_free() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_free(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_free, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_ioctl() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_ioctl(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_ioctl(msg->msg.ppp, msg->msg.msg.ioctl.cmd, msg->msg.msg.ioctl.arg); +} + +/** + * Call ppp_ioctl() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.cmd = cmd; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.arg = arg; + err = tcpip_api_call(pppapi_do_ppp_ioctl, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_PPP_API */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c new file mode 100644 index 000000000..82d78c13a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c @@ -0,0 +1,66 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not necessary */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/pppcrypt.h" + + +static u_char pppcrypt_get_7bits(u_char *input, int startBit) { + unsigned int word; + + word = (unsigned)input[startBit / 8] << 8; + word |= (unsigned)input[startBit / 8 + 1]; + + word >>= 15 - (startBit % 8 + 7); + + return word & 0xFE; +} + +/* IN 56 bit DES key missing parity bits + * OUT 64 bit DES key with parity bits added + */ +void pppcrypt_56_to_64_bit_key(u_char *key, u_char * des_key) { + des_key[0] = pppcrypt_get_7bits(key, 0); + des_key[1] = pppcrypt_get_7bits(key, 7); + des_key[2] = pppcrypt_get_7bits(key, 14); + des_key[3] = pppcrypt_get_7bits(key, 21); + des_key[4] = pppcrypt_get_7bits(key, 28); + des_key[5] = pppcrypt_get_7bits(key, 35); + des_key[6] = pppcrypt_get_7bits(key, 42); + des_key[7] = pppcrypt_get_7bits(key, 49); +} + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c new file mode 100644 index 000000000..8ed2d638f --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c @@ -0,0 +1,1201 @@ +/***************************************************************************** +* pppoe.c - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "lwip/timeouts.h" +#include "lwip/memp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" + +#include "netif/ethernet.h" +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppoe.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOE_IF, MEMP_NUM_PPPOE_INTERFACES, sizeof(struct pppoe_softc), "PPPOE_IF") + +/* Add a 16 bit unsigned value to a buffer pointed to by PTR */ +#define PPPOE_ADD_16(PTR, VAL) \ + *(PTR)++ = (u8_t)((VAL) / 256); \ + *(PTR)++ = (u8_t)((VAL) % 256) + +/* Add a complete PPPoE header to the buffer pointed to by PTR */ +#define PPPOE_ADD_HEADER(PTR, CODE, SESS, LEN) \ + *(PTR)++ = PPPOE_VERTYPE; \ + *(PTR)++ = (CODE); \ + PPPOE_ADD_16(PTR, SESS); \ + PPPOE_ADD_16(PTR, LEN) + +#define PPPOE_DISC_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOE_SLOW_RETRY (60*1000) /* persistent retry interval */ +#define PPPOE_DISC_MAXPADI 4 /* retry PADI four times (quickly) */ +#define PPPOE_DISC_MAXPADR 2 /* retry PADR twice */ + +#ifdef PPPOE_SERVER +#error "PPPOE_SERVER is not yet supported under lwIP!" +/* from if_spppsubr.c */ +#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */ +#endif + +#define PPPOE_ERRORSTRING_LEN 64 + + +/* callbacks called from PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static void pppoe_connect(ppp_pcb *ppp, void *ctx); +static void pppoe_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppoe_destroy(ppp_pcb *ppp, void *ctx); + +/* management routines */ +static void pppoe_abort_connect(struct pppoe_softc *); +#if 0 /* UNUSED */ +static void pppoe_clear_softc(struct pppoe_softc *, const char *); +#endif /* UNUSED */ + +/* internal timeout handling */ +static void pppoe_timeout(void *); + +/* sending actual protocol controll packets */ +static err_t pppoe_send_padi(struct pppoe_softc *); +static err_t pppoe_send_padr(struct pppoe_softc *); +#ifdef PPPOE_SERVER +static err_t pppoe_send_pado(struct pppoe_softc *); +static err_t pppoe_send_pads(struct pppoe_softc *); +#endif +static err_t pppoe_send_padt(struct netif *, u_int, const u8_t *); + +/* internal helper functions */ +static err_t pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb); +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif); +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif); + +/** linked list of created pppoe interfaces */ +static struct pppoe_softc *pppoe_softc_list; + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppoe_callbacks = { + pppoe_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppoe_disconnect, + pppoe_destroy, + pppoe_write, + pppoe_netif_output, + NULL, + NULL +}; + +/* + * Create a new PPP Over Ethernet (PPPoE) connection. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb *ppp; + struct pppoe_softc *sc; + LWIP_UNUSED_ARG(service_name); + LWIP_UNUSED_ARG(concentrator_name); + LWIP_ASSERT_CORE_LOCKED(); + + sc = (struct pppoe_softc *)LWIP_MEMPOOL_ALLOC(PPPOE_IF); + if (sc == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppoe_callbacks, sc, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + return NULL; + } + + memset(sc, 0, sizeof(struct pppoe_softc)); + sc->pcb = ppp; + sc->sc_ethif = ethif; + /* put the new interface at the head of the list */ + sc->next = pppoe_softc_list; + pppoe_softc_list = sc; + return ppp; +} + +/* Called by PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *ph; /* Ethernet + PPPoE header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* skip address & flags */ + pbuf_remove_header(p, 2); + + ph = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOE_HEADERLEN); /* hide PPPoE header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppoe_xmit(sc, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_LINK, PPPOE_HEADERLEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOE_HEADERLEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppoe_xmit(sc, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +static err_t +pppoe_destroy(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pppoe_softc **copp, *freep; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppoe_timeout, sc); + + /* remove interface from list */ + for (copp = &pppoe_softc_list; (freep = *copp); copp = &freep->next) { + if (freep == sc) { + *copp = freep->next; + break; + } + } + +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name) { + mem_free(sc->sc_concentrator_name); + } + if (sc->sc_service_name) { + mem_free(sc->sc_service_name); + } +#endif /* PPPOE_TODO */ + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + + return ERR_OK; +} + +/* + * Find the interface handling the specified session. + * Note: O(number of sessions open), this is a client-side only, mean + * and lean implementation, so number of open sessions typically should + * be 1. + */ +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif) { + struct pppoe_softc *sc; + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc->sc_state == PPPOE_STATE_SESSION + && sc->sc_session == session + && sc->sc_ethif == rcvif) { + return sc; + } + } + return NULL; +} + +/* Check host unique token passed and return appropriate softc pointer, + * or NULL if token is bogus. */ +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif) { + struct pppoe_softc *sc, *t; + + if (len != sizeof sc) { + return NULL; + } + MEMCPY(&t, token, len); + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc == t) { + break; + } + } + + if (sc == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: alien host unique tag, no session found\n")); + return NULL; + } + + /* should be safe to access *sc now */ + if (sc->sc_state < PPPOE_STATE_PADI_SENT || sc->sc_state >= PPPOE_STATE_SESSION) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": host unique tag found, but it belongs to a connection in state %d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_state)); + return NULL; + } + if (sc->sc_ethif != rcvif) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": wrong interface, not accepting host unique\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + return NULL; + } + return sc; +} + +/* analyze and handle a single received packet while not in session state */ +void +pppoe_disc_input(struct netif *netif, struct pbuf *pb) +{ + u16_t tag, len, off; + u16_t session, plen; + struct pppoe_softc *sc; +#if PPP_DEBUG + const char *err_msg = NULL; +#endif /* PPP_DEBUG */ + u8_t *ac_cookie; + u16_t ac_cookie_len; +#ifdef PPPOE_SERVER + u8_t *hunique; + size_t hunique_len; +#endif + struct pppoehdr *ph; + struct pppoetag pt; + int err; + struct eth_hdr *ethhdr; + + /* don't do anything if there is not a single PPPoE instance */ + if (pppoe_softc_list == NULL) { + pbuf_free(pb); + return; + } + + pb = pbuf_coalesce(pb, PBUF_RAW); + + ethhdr = (struct eth_hdr *)pb->payload; + + ac_cookie = NULL; + ac_cookie_len = 0; +#ifdef PPPOE_SERVER + hunique = NULL; + hunique_len = 0; +#endif + session = 0; + off = sizeof(struct eth_hdr) + sizeof(struct pppoehdr); + if (pb->len < off) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet too short: %d\n", pb->len)); + goto done; + } + + ph = (struct pppoehdr *) (ethhdr + 1); + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown version/type packet: 0x%x\n", ph->vertype)); + goto done; + } + session = lwip_ntohs(ph->session); + plen = lwip_ntohs(ph->plen); + + if (plen > (pb->len - off)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet content does not fit: data available = %d, packet size = %u\n", + pb->len - off, plen)); + goto done; + } + if(pb->tot_len == pb->len) { + u16_t framelen = off + plen; + if (framelen < pb->len) { + /* ignore trailing garbage */ + pb->tot_len = pb->len = framelen; + } + } + tag = 0; + len = 0; + sc = NULL; + while (off + sizeof(pt) <= pb->len) { + MEMCPY(&pt, (u8_t*)pb->payload + off, sizeof(pt)); + tag = lwip_ntohs(pt.tag); + len = lwip_ntohs(pt.len); + if (off + sizeof(pt) + len > pb->len) { + PPPDEBUG(LOG_DEBUG, ("pppoe: tag 0x%x len 0x%x is too long\n", tag, len)); + goto done; + } + switch (tag) { + case PPPOE_TAG_EOL: + goto breakbreak; + case PPPOE_TAG_SNAME: + break; /* ignored */ + case PPPOE_TAG_ACNAME: + break; /* ignored */ + case PPPOE_TAG_HUNIQUE: + if (sc != NULL) { + break; + } +#ifdef PPPOE_SERVER + hunique = (u8_t*)pb->payload + off + sizeof(pt); + hunique_len = len; +#endif + sc = pppoe_find_softc_by_hunique((u8_t*)pb->payload + off + sizeof(pt), len, netif); + break; + case PPPOE_TAG_ACCOOKIE: + if (ac_cookie == NULL) { + if (len > PPPOE_MAX_AC_COOKIE_LEN) { + PPPDEBUG(LOG_DEBUG, ("pppoe: AC cookie is too long: len = %d, max = %d\n", len, PPPOE_MAX_AC_COOKIE_LEN)); + goto done; + } + ac_cookie = (u8_t*)pb->payload + off + sizeof(pt); + ac_cookie_len = len; + } + break; +#if PPP_DEBUG + case PPPOE_TAG_SNAME_ERR: + err_msg = "SERVICE NAME ERROR"; + break; + case PPPOE_TAG_ACSYS_ERR: + err_msg = "AC SYSTEM ERROR"; + break; + case PPPOE_TAG_GENERIC_ERR: + err_msg = "GENERIC ERROR"; + break; +#endif /* PPP_DEBUG */ + default: + break; + } +#if PPP_DEBUG + if (err_msg != NULL) { + char error_tmp[PPPOE_ERRORSTRING_LEN]; + u16_t error_len = LWIP_MIN(len, sizeof(error_tmp)-1); + strncpy(error_tmp, (char*)pb->payload + off + sizeof(pt), error_len); + error_tmp[error_len] = '\0'; + if (sc) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": %s: %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err_msg, error_tmp)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: %s: %s\n", err_msg, error_tmp)); + } + } +#endif /* PPP_DEBUG */ + off += sizeof(pt) + len; + } + +breakbreak:; + switch (ph->code) { + case PPPOE_CODE_PADI: +#ifdef PPPOE_SERVER + /* + * got service name, concentrator name, and/or host unique. + * ignore if we have no interfaces with IFF_PASSIVE|IFF_UP. + */ + if (LIST_EMPTY(&pppoe_softc_list)) { + goto done; + } + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (!(sc->sc_sppp.pp_if.if_flags & IFF_UP)) { + continue; + } + if (!(sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + continue; + } + if (sc->sc_state == PPPOE_STATE_INITIAL) { + break; + } + } + if (sc == NULL) { + /* PPPDEBUG(LOG_DEBUG, ("pppoe: free passive interface is not found\n")); */ + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + MEMCPY(&sc->sc_dest, eh->ether_shost, sizeof sc->sc_dest); + sc->sc_state = PPPOE_STATE_PADO_SENT; + pppoe_send_pado(sc); + break; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADR: +#ifdef PPPOE_SERVER + /* + * get sc from ac_cookie if IFF_PASSIVE + */ + if (ac_cookie == NULL) { + /* be quiet if there is not a single pppoe instance */ + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but not includes ac_cookie\n")); + goto done; + } + sc = pppoe_find_softc_by_hunique(ac_cookie, ac_cookie_len, netif); + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (!LIST_EMPTY(&pppoe_softc_list)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADO_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADR\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + pppoe_send_pads(sc); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; +#else + /* ignore, we are no access concentrator */ + goto done; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADO: + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (pppoe_softc_list != NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADO but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADI_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADO\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (ac_cookie) { + sc->sc_ac_cookie_len = ac_cookie_len; + MEMCPY(sc->sc_ac_cookie, ac_cookie, ac_cookie_len); + } + MEMCPY(&sc->sc_dest, ethhdr->src.addr, sizeof(sc->sc_dest.addr)); + sys_untimeout(pppoe_timeout, sc); + sc->sc_padr_retried = 0; + sc->sc_state = PPPOE_STATE_PADR_SENT; + if ((err = pppoe_send_padr(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + case PPPOE_CODE_PADS: + if (sc == NULL) { + goto done; + } + sc->sc_session = session; + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x connected\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, session)); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; + case PPPOE_CODE_PADT: + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ +#if 0 /* UNUSED */ + if (sc == NULL) { /* PADT frames are rarely sent with a hunique tag, this is actually almost always true */ + goto done; + } + pppoe_clear_softc(sc, "received PADT"); +#endif /* UNUSED */ + break; + default: + if(sc) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": unknown code (0x%"X16_F") session = 0x%"X16_F"\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + (u16_t)ph->code, session)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown code (0x%"X16_F") session = 0x%"X16_F"\n", (u16_t)ph->code, session)); + } + break; + } + +done: + pbuf_free(pb); + return; +} + +void +pppoe_data_input(struct netif *netif, struct pbuf *pb) +{ + u16_t session, plen; + struct pppoe_softc *sc; + struct pppoehdr *ph; +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + u8_t shost[ETHER_ADDR_LEN]; +#endif + +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + MEMCPY(shost, ((struct eth_hdr *)pb->payload)->src.addr, sizeof(shost)); +#endif + if (pbuf_remove_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + if (pb->len < sizeof(*ph)) { + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: could not get PPPoE header\n")); + goto drop; + } + ph = (struct pppoehdr *)pb->payload; + + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe (data): unknown version/type packet: 0x%x\n", ph->vertype)); + goto drop; + } + if (ph->code != 0) { + goto drop; + } + + session = lwip_ntohs(ph->session); + sc = pppoe_find_softc_by_session(session, netif); + if (sc == NULL) { +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + PPPDEBUG(LOG_DEBUG, ("pppoe: input for unknown session 0x%x, sending PADT\n", session)); + pppoe_send_padt(netif, session, shost); +#endif + goto drop; + } + + plen = lwip_ntohs(ph->plen); + + if (pbuf_remove_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header PPPOE_HEADERLEN failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: %c%c%"U16_F": pkthdr.len=%d, pppoe.len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + pb->len, plen)); + + if (pb->tot_len < plen) { + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(sc->pcb, pb); + return; + +drop: + pbuf_free(pb); +} + +static err_t +pppoe_output(struct pppoe_softc *sc, struct pbuf *pb) +{ + struct eth_hdr *ethhdr; + u16_t etype; + err_t res; + + /* make room for Ethernet header - should not fail */ + if (pbuf_add_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_output: could not allocate room for Ethernet header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + etype = sc->sc_state == PPPOE_STATE_SESSION ? ETHTYPE_PPPOE : ETHTYPE_PPPOEDISC; + ethhdr->type = lwip_htons(etype); + MEMCPY(ðhdr->dest.addr, &sc->sc_dest.addr, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &sc->sc_ethif->hwaddr, sizeof(ethhdr->src.addr)); + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F" (%x) state=%d, session=0x%x output -> %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F", len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, etype, + sc->sc_state, sc->sc_session, + sc->sc_dest.addr[0], sc->sc_dest.addr[1], sc->sc_dest.addr[2], sc->sc_dest.addr[3], sc->sc_dest.addr[4], sc->sc_dest.addr[5], + pb->tot_len)); + + res = sc->sc_ethif->linkoutput(sc->sc_ethif, pb); + + pbuf_free(pb); + + return res; +} + +static err_t +pppoe_send_padi(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + int len; +#ifdef PPPOE_TODO + int l1 = 0, l2 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + /* calculate length of frame (excluding ethernet header + pppoe header) */ + len = 2 + 2 + 2 + 2 + sizeof sc; /* service name tag is required, host unique is send too */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + l1 = (int)strlen(sc->sc_service_name); + len += l1; + } + if (sc->sc_concentrator_name != NULL) { + l2 = (int)strlen(sc->sc_concentrator_name); + len += 2 + 2 + l2; + } +#endif /* PPPOE_TODO */ + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADI, 0, (u16_t)len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name != NULL) { + PPPOE_ADD_16(p, PPPOE_TAG_ACNAME); + PPPOE_ADD_16(p, l2); + MEMCPY(p, sc->sc_concentrator_name, l2); + p += l2; + } +#endif /* PPPOE_TODO */ + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + /* send pkt */ + return pppoe_output(sc, pb); +} + +static void +pppoe_timeout(void *arg) +{ + u32_t retry_wait; + int err; + struct pppoe_softc *sc = (struct pppoe_softc*)arg; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": timeout\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + + switch (sc->sc_state) { + case PPPOE_STATE_PADI_SENT: + /* + * We have two basic ways of retrying: + * - Quick retry mode: try a few times in short sequence + * - Slow retry mode: we already had a connection successfully + * established and will try infinitely (without user + * intervention) + * We only enter slow retry mode if IFF_LINK1 (aka autodial) + * is not set. + */ + if (sc->sc_padi_retried < 0xff) { + sc->sc_padi_retried++; + } + if (!sc->pcb->settings.persist && sc->sc_padi_retried >= PPPOE_DISC_MAXPADI) { +#if 0 + if ((sc->sc_sppp.pp_if.if_flags & IFF_LINK1) == 0) { + /* slow retry mode */ + retry_wait = PPPOE_SLOW_RETRY; + } else +#endif + { + pppoe_abort_connect(sc); + return; + } + } + /* initialize for quick retry mode */ + retry_wait = LWIP_MIN(PPPOE_DISC_TIMEOUT * sc->sc_padi_retried, PPPOE_SLOW_RETRY); + if ((err = pppoe_send_padi(sc)) != 0) { + sc->sc_padi_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to transmit PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppoe_timeout, sc); + break; + + case PPPOE_STATE_PADR_SENT: + sc->sc_padr_retried++; + if (sc->sc_padr_retried >= PPPOE_DISC_MAXPADR) { + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); + sc->sc_state = PPPOE_STATE_PADI_SENT; + sc->sc_padr_retried = 0; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padi_retried), pppoe_timeout, sc); + return; + } + if ((err = pppoe_send_padr(sc)) != 0) { + sc->sc_padr_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + default: + return; /* all done, work in peace */ + } +} + +/* Start a connection (i.e. initiate discovery phase) */ +static void +pppoe_connect(ppp_pcb *ppp, void *ctx) +{ + err_t err; + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + sc->sc_session = 0; + sc->sc_ac_cookie_len = 0; + sc->sc_padi_retried = 0; + sc->sc_padr_retried = 0; + /* changed to real address later */ + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); +#ifdef PPPOE_SERVER + /* wait PADI if IFF_PASSIVE */ + if ((sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + return 0; + } +#endif + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* save state, in case we fail to send PADI */ + sc->sc_state = PPPOE_STATE_PADI_SENT; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + } + sys_timeout(PPPOE_DISC_TIMEOUT, pppoe_timeout, sc); +} + +/* disconnect */ +static void +pppoe_disconnect(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": disconnecting\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + if (sc->sc_state == PPPOE_STATE_SESSION) { + pppoe_send_padt(sc->sc_ethif, sc->sc_session, (const u8_t *)&sc->sc_dest); + } + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppoe_timeout, sc); + sc->sc_state = PPPOE_STATE_INITIAL; +#ifdef PPPOE_SERVER + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + sc->sc_hunique = NULL; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ + } + sc->sc_hunique_len = 0; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ +#endif + ppp_link_end(ppp); /* notify upper layers */ + return; +} + +/* Connection attempt aborted */ +static void +pppoe_abort_connect(struct pppoe_softc *sc) +{ + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": could not establish connection\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_failed(sc->pcb); /* notify upper layers */ +} + +/* Send a PADR packet */ +static err_t +pppoe_send_padr(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; +#ifdef PPPOE_TODO + size_t l1 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + len = 2 + 2 + 2 + 2 + sizeof(sc); /* service name, host unique */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } +#endif /* PPPOE_TODO */ + if (sc->sc_ac_cookie_len > 0) { + len += 2 + 2 + sc->sc_ac_cookie_len; /* AC cookie */ + } + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADR, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } + if (sc->sc_ac_cookie_len > 0) { + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sc->sc_ac_cookie_len); + MEMCPY(p, sc->sc_ac_cookie, sc->sc_ac_cookie_len); + p += sc->sc_ac_cookie_len; + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + return pppoe_output(sc, pb); +} + +/* send a PADT packet */ +static err_t +pppoe_send_padt(struct netif *outgoing_if, u_int session, const u8_t *dest) +{ + struct pbuf *pb; + struct eth_hdr *ethhdr; + err_t res; + u8_t *p; + + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + if (pbuf_add_header(pb, sizeof(struct eth_hdr))) { + PPPDEBUG(LOG_ERR, ("pppoe: pppoe_send_padt: could not allocate room for PPPoE header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + ethhdr->type = PP_HTONS(ETHTYPE_PPPOEDISC); + MEMCPY(ðhdr->dest.addr, dest, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &outgoing_if->hwaddr, sizeof(ethhdr->src.addr)); + + p = (u8_t*)(ethhdr + 1); + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADT, session, 0); + + res = outgoing_if->linkoutput(outgoing_if, pb); + + pbuf_free(pb); + + return res; +} + +#ifdef PPPOE_SERVER +static err_t +pppoe_send_pado(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; + + /* calc length */ + len = 0; + /* include ac_cookie */ + len += 2 + 2 + sizeof(sc); + /* include hunique */ + len += 2 + 2 + sc->sc_hunique_len; + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADO, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof(sc)); + p += sizeof(sc); + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} + +static err_t +pppoe_send_pads(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len, l1 = 0; /* XXX: gcc */ + + sc->sc_session = mono_time.tv_sec % 0xff + 1; + /* calc length */ + len = 0; + /* include hunique */ + len += 2 + 2 + 2 + 2 + sc->sc_hunique_len; /* service name, host unique*/ + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADS, sc->sc_session, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else { + PPPOE_ADD_16(p, 0); + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} +#endif + +static err_t +pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb) +{ + u8_t *p; + size_t len; + + len = pb->tot_len; + + /* make room for PPPoE header - should not fail */ + if (pbuf_add_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_xmit: could not allocate room for PPPoE header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, 0, sc->sc_session, len); + + return pppoe_output(sc, pb); +} + +#if 0 /*def PFIL_HOOKS*/ +static int +pppoe_ifattach_hook(void *arg, struct pbuf **mp, struct netif *ifp, int dir) +{ + struct pppoe_softc *sc; + int s; + + if (mp != (struct pbuf **)PFIL_IFNET_DETACH) { + return 0; + } + + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (sc->sc_ethif != ifp) { + continue; + } + if (sc->sc_sppp.pp_if.if_flags & IFF_UP) { + sc->sc_sppp.pp_if.if_flags &= ~(IFF_UP|IFF_RUNNING); + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": ethernet interface detached, going down\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + } + sc->sc_ethif = NULL; + pppoe_clear_softc(sc, "ethernet interface detached"); + } + + return 0; +} +#endif + +#if 0 /* UNUSED */ +static void +pppoe_clear_softc(struct pppoe_softc *sc, const char *message) +{ + LWIP_UNUSED_ARG(message); + + /* stop timer */ + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x terminated, %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_session, message)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_end(sc->pcb); /* notify upper layers - /!\ dangerous /!\ - see pppoe_disc_input() */ +} +#endif /* UNUSED */ +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c new file mode 100644 index 000000000..4c4557fca --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c @@ -0,0 +1,1159 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol program file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +/* + * L2TP Support status: + * + * Supported: + * - L2TPv2 (PPP over L2TP, a.k.a. UDP tunnels) + * - LAC + * + * Not supported: + * - LNS (require PPP server support) + * - L2TPv3 ethernet pseudowires + * - L2TPv3 VLAN pseudowire + * - L2TPv3 PPP pseudowires + * - L2TPv3 IP encapsulation + * - L2TPv3 IP pseudowire + * - L2TP tunnel switching - http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08 + * - Multiple tunnels per UDP socket, as well as multiple sessions per tunnel + * - Hidden AVPs + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/udp.h" +#include "lwip/snmp.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOL2TP_PCB, MEMP_NUM_PPPOL2TP_INTERFACES, sizeof(pppol2tp_pcb), "PPPOL2TP_PCB") + +/* callbacks called from PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx); /* Destroy a L2TP control block */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx); /* Be a LAC, connect to a LNS. */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx); /* Disconnect */ + + /* Prototypes for procedures local to this file. */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr); +static void pppol2tp_timeout(void *arg); +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr); +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb); +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppol2tp_callbacks = { + pppol2tp_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppol2tp_disconnect, + pppol2tp_destroy, + pppol2tp_write, + pppol2tp_netif_output, + NULL, + NULL +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *ppp; + pppol2tp_pcb *l2tp; + struct udp_pcb *udp; +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + if (ipaddr == NULL) { + goto ipaddr_check_failed; + } + + l2tp = (pppol2tp_pcb *)LWIP_MEMPOOL_ALLOC(PPPOL2TP_PCB); + if (l2tp == NULL) { + goto memp_malloc_l2tp_failed; + } + + udp = udp_new_ip_type(IP_GET_TYPE(ipaddr)); + if (udp == NULL) { + goto udp_new_failed; + } + udp_recv(udp, pppol2tp_input, l2tp); + + ppp = ppp_new(pppif, &pppol2tp_callbacks, l2tp, link_status_cb, ctx_cb); + if (ppp == NULL) { + goto ppp_new_failed; + } + + memset(l2tp, 0, sizeof(pppol2tp_pcb)); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + l2tp->ppp = ppp; + l2tp->udp = udp; + l2tp->netif = netif; + ip_addr_copy(l2tp->remote_ip, *ipaddr); + l2tp->remote_port = port; +#if PPPOL2TP_AUTH_SUPPORT + l2tp->secret = secret; + l2tp->secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return ppp; + +ppp_new_failed: + udp_remove(udp); +udp_new_failed: + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); +memp_malloc_l2tp_failed: +ipaddr_check_failed: + return NULL; +} + +/* Called by PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *ph; /* UDP + L2TP header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + ph = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(PPPOL2TP_OUTPUT_DATA_HEADER_LEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); /* hide L2TP header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppol2tp_xmit(l2tp, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_TRANSPORT, PPPOL2TP_OUTPUT_DATA_HEADER_LEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppol2tp_xmit(l2tp, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Destroy a L2TP control block */ +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppol2tp_timeout, l2tp); + udp_remove(l2tp->udp); + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); + return ERR_OK; +} + +/* Be a LAC, connect to a LNS. */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx) { + err_t err; + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + l2tp->tunnel_port = l2tp->remote_port; + l2tp->our_ns = 0; + l2tp->peer_nr = 0; + l2tp->peer_ns = 0; + l2tp->source_tunnel_id = 0; + l2tp->remote_tunnel_id = 0; + l2tp->source_session_id = 0; + l2tp->remote_session_id = 0; + /* l2tp->*_retried are cleared when used */ + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = PPPOL2TP_DEFMRU; + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = PPPOL2TP_DEFMRU; + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* Listen to a random source port, we need to do that instead of using udp_connect() + * because the L2TP LNS might answer with its own random source port (!= 1701) + */ +#if LWIP_IPV6 + if (IP_IS_V6_VAL(l2tp->udp->local_ip)) { + udp_bind(l2tp->udp, IP6_ADDR_ANY, 0); + } else +#endif /* LWIP_IPV6 */ + udp_bind(l2tp->udp, IP_ADDR_ANY, 0); + +#if PPPOL2TP_AUTH_SUPPORT + /* Generate random vector */ + if (l2tp->secret != NULL) { + magic_random_bytes(l2tp->secret_rv, sizeof(l2tp->secret_rv)); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + do { + l2tp->remote_tunnel_id = magic(); + } while(l2tp->remote_tunnel_id == 0); + /* save state, in case we fail to send SCCRQ */ + l2tp->sccrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_SCCRQ_SENT; + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); +} + +/* Disconnect */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + + l2tp->our_ns++; + pppol2tp_send_stopccn(l2tp, l2tp->our_ns); + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppol2tp_timeout, l2tp); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_end(ppp); /* notify upper layers */ +} + +/* UDP Callback for incoming IPv4 L2TP frames */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + u16_t hflags, hlen, len=0, tunnel_id=0, session_id=0, ns=0, nr=0, offset=0; + u8_t *inp; + LWIP_UNUSED_ARG(pcb); + + /* we can't unbound a UDP pcb, thus we can still receive UDP frames after the link is closed */ + if (l2tp->phase < PPPOL2TP_STATE_SCCRQ_SENT) { + goto free_and_return; + } + + if (!ip_addr_cmp(&l2tp->remote_ip, addr)) { + goto free_and_return; + } + + /* discard packet if port mismatch, but only if we received a SCCRP */ + if (l2tp->phase > PPPOL2TP_STATE_SCCRQ_SENT && l2tp->tunnel_port != port) { + goto free_and_return; + } + + /* printf("-----------\nL2TP INPUT, %d\n", p->len); */ + + /* L2TP header */ + if (p->len < sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id) ) { + goto packet_too_short; + } + + inp = (u8_t*)p->payload; + GETSHORT(hflags, inp); + + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + /* check mandatory flags for a control packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY) != PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for control packet not set\n")); + goto free_and_return; + } + /* check forbidden flags for a control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: forbidden header flags for control packet found\n")); + goto free_and_return; + } + } else { + /* check mandatory flags for a data packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_DATA_MANDATORY) != PPPOL2TP_HEADERFLAG_DATA_MANDATORY) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for data packet not set\n")); + goto free_and_return; + } + } + + /* Expected header size */ + hlen = sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id); + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + hlen += sizeof(len); + } + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + hlen += sizeof(ns) + sizeof(nr); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + hlen += sizeof(offset); + } + if (p->len < hlen) { + goto packet_too_short; + } + + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + GETSHORT(len, inp); + if (p->len < len || len < hlen) { + goto packet_too_short; + } + } + GETSHORT(tunnel_id, inp); + GETSHORT(session_id, inp); + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + GETSHORT(ns, inp); + GETSHORT(nr, inp); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + GETSHORT(offset, inp) + if (offset > 4096) { /* don't be fooled with large offset which might overflow hlen */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: strange packet received, offset=%d\n", offset)); + goto free_and_return; + } + hlen += offset; + if (p->len < hlen) { + goto packet_too_short; + } + INCPTR(offset, inp); + } + + /* printf("HLEN = %d\n", hlen); */ + + /* skip L2TP header */ + if (pbuf_remove_header(p, hlen) != 0) { + goto free_and_return; + } + + /* printf("LEN=%d, TUNNEL_ID=%d, SESSION_ID=%d, NS=%d, NR=%d, OFFSET=%d\n", len, tunnel_id, session_id, ns, nr, offset); */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: input packet, len=%"U16_F", tunnel=%"U16_F", session=%"U16_F", ns=%"U16_F", nr=%"U16_F"\n", + len, tunnel_id, session_id, ns, nr)); + + /* Control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + pppol2tp_dispatch_control_packet(l2tp, port, p, ns, nr); + goto free_and_return; + } + + /* Data packet */ + if(l2tp->phase != PPPOL2TP_STATE_DATA) { + goto free_and_return; + } + if(tunnel_id != l2tp->remote_tunnel_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: tunnel ID mismatch, assigned=%d, received=%d\n", l2tp->remote_tunnel_id, tunnel_id)); + goto free_and_return; + } + if(session_id != l2tp->remote_session_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: session ID mismatch, assigned=%d, received=%d\n", l2tp->remote_session_id, session_id)); + goto free_and_return; + } + /* + * skip address & flags if necessary + * + * RFC 2661 does not specify whether the PPP frame in the L2TP payload should + * have a HDLC header or not. We handle both cases for compatibility. + */ + if (p->len >= 2) { + GETSHORT(hflags, inp); + if (hflags == 0xff03) { + pbuf_remove_header(p, 2); + } + } + /* Dispatch the packet thereby consuming it. */ + ppp_input(l2tp->ppp, p); + return; + +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +free_and_return: + pbuf_free(p); +} + +/* L2TP Control packet entry point */ +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr) { + u8_t *inp; + u16_t avplen, avpflags, vendorid, attributetype, messagetype=0; + err_t err; +#if PPPOL2TP_AUTH_SUPPORT + lwip_md5_context md5_ctx; + u8_t md5_hash[16]; + u8_t challenge_id = 0; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* printf("L2TP CTRL INPUT, ns=%d, nr=%d, len=%d\n", ns, nr, p->len); */ + + /* Drop unexpected packet */ + if (ns != l2tp->peer_ns) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: drop unexpected packet: received NS=%d, expected NS=%d\n", ns, l2tp->peer_ns)); + /* + * In order to ensure that all messages are acknowledged properly + * (particularly in the case of a lost ZLB ACK message), receipt + * of duplicate messages MUST be acknowledged. + * + * In this very special case we Ack a packet we previously received. + * Therefore our NS is the NR we just received. And our NR is the + * NS we just received plus one. + */ + if ((s16_t)(ns - l2tp->peer_ns) < 0) { + pppol2tp_send_zlb(l2tp, nr, ns+1); + } + return; + } + + l2tp->peer_nr = nr; + + /* Handle the special case of the ICCN acknowledge */ + if (l2tp->phase == PPPOL2TP_STATE_ICCN_SENT && (s16_t)(l2tp->peer_nr - l2tp->our_ns) > 0) { + l2tp->phase = PPPOL2TP_STATE_DATA; + sys_untimeout(pppol2tp_timeout, l2tp); + ppp_start(l2tp->ppp); /* notify upper layers */ + } + + /* ZLB packets */ + if (p->tot_len == 0) { + return; + } + /* A ZLB packet does not consume a NS slot thus we don't record the NS value for ZLB packets */ + l2tp->peer_ns = ns+1; + + p = pbuf_coalesce(p, PBUF_RAW); + inp = (u8_t*)p->payload; + /* Decode AVPs */ + while (p->len > 0) { + if (p->len < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype) ) { + goto packet_too_short; + } + GETSHORT(avpflags, inp); + avplen = avpflags & PPPOL2TP_AVPHEADERFLAG_LENGTHMASK; + /* printf("AVPLEN = %d\n", avplen); */ + if (p->len < avplen || avplen < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) { + goto packet_too_short; + } + GETSHORT(vendorid, inp); + GETSHORT(attributetype, inp); + avplen -= sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype); + + /* Message type must be the first AVP */ + if (messagetype == 0) { + if (attributetype != 0 || vendorid != 0 || avplen != sizeof(messagetype) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: message type must be the first AVP\n")); + return; + } + GETSHORT(messagetype, inp); + /* printf("Message type = %d\n", messagetype); */ + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + /* Only accept SCCRP packet if we sent a SCCRQ */ + if (l2tp->phase != PPPOL2TP_STATE_SCCRQ_SENT) { + goto send_zlb; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + /* Only accept ICRP packet if we sent a IRCQ */ + if (l2tp->phase != PPPOL2TP_STATE_ICRQ_SENT) { + goto send_zlb; + } + break; + /* Stop Control Connection Notification */ + case PPPOL2TP_MESSAGETYPE_STOPCCN: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); /* Ack the StopCCN before we switch to down state */ + if (l2tp->phase < PPPOL2TP_STATE_DATA) { + pppol2tp_abort_connect(l2tp); + } else if (l2tp->phase == PPPOL2TP_STATE_DATA) { + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ + } + return; + default: + break; + } + goto nextavp; + } + + /* Skip proprietary L2TP extensions */ + if (vendorid != 0) { + goto skipavp; + } + + switch (messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_TUNNELID: + if (avplen != sizeof(l2tp->source_tunnel_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign tunnel ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_tunnel_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned tunnel ID %"U16_F"\n", l2tp->source_tunnel_id)); + goto nextavp; +#if PPPOL2TP_AUTH_SUPPORT + case PPPOL2TP_AVPTYPE_CHALLENGE: + if (avplen == 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Challenge length check failed\n")); + return; + } + if (l2tp->secret == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge from peer and no secret key available\n")); + pppol2tp_abort_connect(l2tp); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCCN; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, inp, avplen); + lwip_md5_finish(&md5_ctx, l2tp->challenge_hash); + lwip_md5_free(&md5_ctx); + l2tp->send_challenge = 1; + goto skipavp; + case PPPOL2TP_AVPTYPE_CHALLENGERESPONSE: + if (avplen != PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Challenge Response length check failed\n")); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCRP; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, l2tp->secret_rv, sizeof(l2tp->secret_rv)); + lwip_md5_finish(&md5_ctx, md5_hash); + lwip_md5_free(&md5_ctx); + if ( memcmp(inp, md5_hash, sizeof(md5_hash)) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge response from peer and secret key do not match\n")); + pppol2tp_abort_connect(l2tp); + return; + } + goto skipavp; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + default: + break; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_SESSIONID: + if (avplen != sizeof(l2tp->source_session_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign session ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_session_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned session ID %"U16_F"\n", l2tp->source_session_id)); + goto nextavp; + default: + break; + } + break; + default: + break; + } + +skipavp: + INCPTR(avplen, inp); +nextavp: + /* printf("AVP Found, vendor=%d, attribute=%d, len=%d\n", vendorid, attributetype, avplen); */ + /* next AVP */ + if (pbuf_remove_header(p, avplen + sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) != 0) { + return; + } + } + + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + do { + l2tp->remote_session_id = magic(); + } while(l2tp->remote_session_id == 0); + l2tp->tunnel_port = port; /* LNS server might have chosen its own local port */ + l2tp->icrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICRQ_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + l2tp->our_ns++; + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + l2tp->iccn_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICCN_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Unhandled packet, send ZLB ACK */ + default: + goto send_zlb; + } + return; + +send_zlb: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); + return; +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +} + +/* L2TP Timeout handler */ +static void pppol2tp_timeout(void *arg) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + err_t err; + u32_t retry_wait; + + PPPDEBUG(LOG_DEBUG, ("pppol2tp: timeout\n")); + + switch (l2tp->phase) { + case PPPOL2TP_STATE_SCCRQ_SENT: + /* backoff wait */ + if (l2tp->sccrq_retried < 0xff) { + l2tp->sccrq_retried++; + } + if (!l2tp->ppp->settings.persist && l2tp->sccrq_retried >= PPPOL2TP_MAXSCCRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + retry_wait = LWIP_MIN(PPPOL2TP_CONTROL_TIMEOUT * l2tp->sccrq_retried, PPPOL2TP_SLOW_RETRY); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: sccrq_retried=%d\n", l2tp->sccrq_retried)); + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + l2tp->sccrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICRQ_SENT: + l2tp->icrq_retried++; + if (l2tp->icrq_retried >= PPPOL2TP_MAXICRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: icrq_retried=%d\n", l2tp->icrq_retried)); + if ((s16_t)(l2tp->peer_nr - l2tp->our_ns) < 0) { /* the SCCCN was not acknowledged */ + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns -1)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + } + } + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICCN_SENT: + l2tp->iccn_retried++; + if (l2tp->iccn_retried >= PPPOL2TP_MAXICCN) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: iccn_retried=%d\n", l2tp->iccn_retried)); + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + l2tp->iccn_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + default: + return; /* all done, work in peace */ + } +} + +/* Connection attempt aborted */ +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: could not establish connection\n")); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_failed(l2tp->ppp); /* notify upper layers */ +} + +/* Initiate a new tunnel */ +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10 +10 +6+sizeof(PPPOL2TP_HOSTNAME)-1 +6+sizeof(PPPOL2TP_VENDORNAME)-1 +8 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->secret != NULL) { + len += 6 + sizeof(l2tp->secret_rv); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(0, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(0, p); /* NS Sequence number - to peer */ + PUTSHORT(0, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCRQ, p); /* Attribute value: Message type: SCCRQ */ + + /* AVP - L2TP Version */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VERSION, p); /* Attribute type: Version */ + PUTSHORT(PPPOL2TP_VERSION, p); /* Attribute value: L2TP Version */ + + /* AVP - Framing capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES, p); /* Attribute type: Framing capabilities */ + PUTLONG(PPPOL2TP_FRAMINGCAPABILITIES, p); /* Attribute value: Framing capabilities */ + + /* AVP - Bearer capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_BEARERCAPABILITIES, p); /* Attribute type: Bearer capabilities */ + PUTLONG(PPPOL2TP_BEARERCAPABILITIES, p); /* Attribute value: Bearer capabilities */ + + /* AVP - Host name */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6+sizeof(PPPOL2TP_HOSTNAME)-1, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_HOSTNAME, p); /* Attribute type: Hostname */ + MEMCPY(p, PPPOL2TP_HOSTNAME, sizeof(PPPOL2TP_HOSTNAME)-1); /* Attribute value: Hostname */ + INCPTR(sizeof(PPPOL2TP_HOSTNAME)-1, p); + + /* AVP - Vendor name */ + PUTSHORT(6+sizeof(PPPOL2TP_VENDORNAME)-1, p); /* len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VENDORNAME, p); /* Attribute type: Vendor name */ + MEMCPY(p, PPPOL2TP_VENDORNAME, sizeof(PPPOL2TP_VENDORNAME)-1); /* Attribute value: Vendor name */ + INCPTR(sizeof(PPPOL2TP_VENDORNAME)-1, p); + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Receive window size */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE, p); /* Attribute type: Receive window size */ + PUTSHORT(PPPOL2TP_RECEIVEWINDOWSIZE, p); /* Attribute value: Receive window size */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge */ + if (l2tp->secret != NULL) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->secret_rv), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGE, p); /* Attribute type: Challenge */ + MEMCPY(p, l2tp->secret_rv, sizeof(l2tp->secret_rv)); /* Attribute value: Random vector */ + INCPTR(sizeof(l2tp->secret_rv), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->send_challenge) { + len += 6 + sizeof(l2tp->challenge_hash); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCCN, p); /* Attribute value: Message type: SCCCN */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge response */ + if (l2tp->send_challenge) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->challenge_hash), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGERESPONSE, p); /* Attribute type: Challenge response */ + MEMCPY(p, l2tp->challenge_hash, sizeof(l2tp->challenge_hash)); /* Attribute value: Computed challenge */ + INCPTR(sizeof(l2tp->challenge_hash), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Initiate a new session */ +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + u32_t serialnumber; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICRQ, p); /* Attribute value: Message type: ICRQ */ + + /* AVP - Assign session ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_SESSIONID, p); /* Attribute type: Session ID */ + PUTSHORT(l2tp->remote_session_id, p); /* Attribute value: Session ID */ + + /* AVP - Call Serial Number */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CALLSERIALNUMBER, p); /* Attribute type: Serial number */ + serialnumber = magic(); + PUTLONG(serialnumber, p); /* Attribute value: Serial number */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +10 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICCN, p); /* Attribute value: Message type: ICCN */ + + /* AVP - Framing type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGTYPE, p); /* Attribute type: Framing type */ + PUTLONG(PPPOL2TP_FRAMINGTYPE, p); /* Attribute value: Framing type */ + + /* AVP - TX Connect speed */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TXCONNECTSPEED, p); /* Attribute type: TX Connect speed */ + PUTLONG(PPPOL2TP_TXCONNECTSPEED, p); /* Attribute value: TX Connect speed */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a ZLB ACK packet */ +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(nr, p); /* NR Sequence number - expected for peer */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a StopCCN packet */ +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +8; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_STOPCCN, p); /* Attribute value: Message type: StopCCN */ + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Result code */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RESULTCODE, p); /* Attribute type: Result code */ + PUTSHORT(PPPOL2TP_RESULTCODE, p); /* Attribute value: Result code */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb) { + u8_t *p; + + /* make room for L2TP header - should not fail */ + if (pbuf_add_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppol2tp: pppol2tp_pcb: could not allocate room for L2TP header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PUTSHORT(PPPOL2TP_HEADERFLAG_DATA_MANDATORY, p); + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb) { + err_t err; + if (l2tp->netif) { + err = udp_sendto_if(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port, l2tp->netif); + } else { + err = udp_sendto(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port); + } + pbuf_free(pb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c new file mode 100644 index 000000000..dff025544 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c @@ -0,0 +1,895 @@ +/** + * @file + * Network Point to Point Protocol over Serial file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/arch.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/snmp.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/api.h" +#include "lwip/ip4.h" /* for ip4_input() */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" +#include "netif/ppp/vj.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOS_PCB, MEMP_NUM_PPPOS_INTERFACES, sizeof(pppos_pcb), "PPPOS_PCB") + +/* callbacks called from PPP core */ +static err_t pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol); +static void pppos_connect(ppp_pcb *ppp, void *ctx); +#if PPP_SERVER +static void pppos_listen(ppp_pcb *ppp, void *ctx); +#endif /* PPP_SERVER */ +static void pppos_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppos_destroy(ppp_pcb *ppp, void *ctx); +static void pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); +static void pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); + +/* Prototypes for procedures local to this file. */ +#if PPP_INPROC_IRQ_SAFE +static void pppos_input_callback(void *arg); +#endif /* PPP_INPROC_IRQ_SAFE */ +static void pppos_input_free_current_packet(pppos_pcb *pppos); +static void pppos_input_drop(pppos_pcb *pppos); +static err_t pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs); +static err_t pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppos_callbacks = { + pppos_connect, +#if PPP_SERVER + pppos_listen, +#endif /* PPP_SERVER */ + pppos_disconnect, + pppos_destroy, + pppos_write, + pppos_netif_output, + pppos_send_config, + pppos_recv_config +}; + +/* PPP's Asynchronous-Control-Character-Map. The mask array is used + * to select the specific bit for a character. */ +#define ESCAPE_P(accm, c) ((accm)[(c) >> 3] & 1 << (c & 0x07)) + +#if PPP_FCS_TABLE +/* + * FCS lookup table as calculated by genfcstab. + */ +static const u16_t fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) +#else /* PPP_FCS_TABLE */ +/* The HDLC polynomial: X**0 + X**5 + X**12 + X**16 (0x8408) */ +#define PPP_FCS_POLYNOMIAL 0x8408 +static u16_t +ppp_get_fcs(u8_t byte) +{ + unsigned int octet; + int bit; + octet = byte; + for (bit = 8; bit-- > 0; ) { + octet = (octet & 0x01) ? ((octet >> 1) ^ PPP_FCS_POLYNOMIAL) : (octet >> 1); + } + return octet & 0xffff; +} +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ ppp_get_fcs(((fcs) ^ (c)) & 0xff)) +#endif /* PPP_FCS_TABLE */ + +/* + * Values for FCS calculations. + */ +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ + +#if PPP_INPROC_IRQ_SAFE +#define PPPOS_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#else +#define PPPOS_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) +#endif /* PPP_INPROC_IRQ_SAFE */ + + +/* + * Create a new PPP connection using the given serial I/O device. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + pppos_pcb *pppos; + ppp_pcb *ppp; + LWIP_ASSERT_CORE_LOCKED(); + + pppos = (pppos_pcb *)LWIP_MEMPOOL_ALLOC(PPPOS_PCB); + if (pppos == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppos_callbacks, pppos, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return NULL; + } + + memset(pppos, 0, sizeof(pppos_pcb)); + pppos->ppp = ppp; + pppos->output_cb = output_cb; + return ppp; +} + +/* Called by PPP core */ +static err_t +pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + u8_t *s; + struct pbuf *nb; + u16_t n; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = p->len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + /* Load output buffer. */ + fcs_out = PPP_INITFCS; + s = (u8_t*)p->payload; + n = p->len; + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_write[%d]: len=%d\n", ppp->netif->num, p->len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: output failed len=%d\n", ppp->netif->num, p->len)); + } + pbuf_free(p); + return err; +} + +/* Called by PPP core */ +static err_t +pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + struct pbuf *nb, *p; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = pb->tot_len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + fcs_out = PPP_INITFCS; + if (!pppos->accomp) { + err = pppos_output_append(pppos, err, nb, PPP_ALLSTATIONS, 1, &fcs_out); + err = pppos_output_append(pppos, err, nb, PPP_UI, 1, &fcs_out); + } + if (!pppos->pcomp || protocol > 0xFF) { + err = pppos_output_append(pppos, err, nb, (protocol >> 8) & 0xFF, 1, &fcs_out); + } + err = pppos_output_append(pppos, err, nb, protocol & 0xFF, 1, &fcs_out); + + /* Load packet. */ + for(p = pb; p; p = p->next) { + u16_t n = p->len; + u8_t *s = (u8_t*)p->payload; + + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_netif_output[%d]: proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: output failed proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } + return err; +} + +static void +pppos_connect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Start the connection and handle incoming events (packet or timeout). + */ + PPPDEBUG(LOG_INFO, ("pppos_connect: unit %d: connecting\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} + +#if PPP_SERVER +static void +pppos_listen(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Wait for something to happen. + */ + PPPDEBUG(LOG_INFO, ("pppos_listen: unit %d: listening\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} +#endif /* PPP_SERVER */ + +static void +pppos_disconnect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + + PPPOS_PROTECT(lev); + pppos->open = 0; + PPPOS_UNPROTECT(lev); + + /* If PPP_INPROC_IRQ_SAFE is used we cannot call + * pppos_input_free_current_packet() here because + * rx IRQ might still call pppos_input(). + */ +#if !PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* !PPP_INPROC_IRQ_SAFE */ + + ppp_link_end(ppp); /* notify upper layers */ +} + +static err_t +pppos_destroy(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return ERR_OK; +} + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/** Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. + * + * This is one of the only functions that may be called outside of the TCPIP thread! + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +err_t +pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l) +{ + struct pbuf *p; + err_t err; + + p = pbuf_alloc(PBUF_RAW, l, PBUF_POOL); + if (!p) { + return ERR_MEM; + } + pbuf_take(p, s, l); + + err = tcpip_inpkt(p, ppp_netif(ppp), pppos_input_sys); + if (err != ERR_OK) { + pbuf_free(p); + } + return err; +} + +/* called from TCPIP thread */ +err_t pppos_input_sys(struct pbuf *p, struct netif *inp) { + ppp_pcb *ppp = (ppp_pcb*)inp->state; + struct pbuf *n; + LWIP_ASSERT_CORE_LOCKED(); + + for (n = p; n; n = n->next) { + pppos_input(ppp, (u8_t*)n->payload, n->len); + } + pbuf_free(p); + return ERR_OK; +} +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/** PPPoS input helper struct, must be packed since it is stored + * to pbuf->payload, which might be unaligned. */ +#if PPP_INPROC_IRQ_SAFE +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppos_input_header { + PACK_STRUCT_FIELD(ppp_pcb *ppp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#endif /* PPP_INPROC_IRQ_SAFE */ + +/** Pass received raw characters to PPPoS to be decoded. + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +void +pppos_input(ppp_pcb *ppp, u8_t *s, int l) +{ + pppos_pcb *pppos = (pppos_pcb *)ppp->link_ctx_cb; + struct pbuf *next_pbuf; + u8_t cur_char; + u8_t escaped; + PPPOS_DECL_PROTECT(lev); +#if !PPP_INPROC_IRQ_SAFE + LWIP_ASSERT_CORE_LOCKED(); +#endif + + PPPDEBUG(LOG_DEBUG, ("pppos_input[%d]: got %d bytes\n", ppp->netif->num, l)); + while (l-- > 0) { + cur_char = *s++; + + PPPOS_PROTECT(lev); + /* ppp_input can disconnect the interface, we need to abort to prevent a memory + * leak if there are remaining bytes because pppos_connect and pppos_listen + * functions expect input buffer to be free. Furthermore there are no real + * reason to continue reading bytes if we are disconnected. + */ + if (!pppos->open) { + PPPOS_UNPROTECT(lev); + return; + } + escaped = ESCAPE_P(pppos->in_accm, cur_char); + PPPOS_UNPROTECT(lev); + /* Handle special characters. */ + if (escaped) { + /* Check for escape sequences. */ + /* XXX Note that this does not handle an escaped 0x5d character which + * would appear as an escape character. Since this is an ASCII ']' + * and there is no reason that I know of to escape it, I won't complicate + * the code to handle this case. GLL */ + if (cur_char == PPP_ESCAPE) { + pppos->in_escaped = 1; + /* Check for the flag character. */ + } else if (cur_char == PPP_FLAG) { + /* If this is just an extra flag character, ignore it. */ + if (pppos->in_state <= PDADDRESS) { + /* ignore it */; + /* If we haven't received the packet header, drop what has come in. */ + } else if (pppos->in_state < PDDATA) { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping incomplete packet %d\n", + ppp->netif->num, pppos->in_state)); + LINK_STATS_INC(link.lenerr); + pppos_input_drop(pppos); + /* If the fcs is invalid, drop the packet. */ + } else if (pppos->in_fcs != PPP_GOODFCS) { + PPPDEBUG(LOG_INFO, + ("pppos_input[%d]: Dropping bad fcs 0x%"X16_F" proto=0x%"X16_F"\n", + ppp->netif->num, pppos->in_fcs, pppos->in_protocol)); + /* Note: If you get lots of these, check for UART frame errors or try different baud rate */ + LINK_STATS_INC(link.chkerr); + pppos_input_drop(pppos); + /* Otherwise it's a good packet so pass it on. */ + } else { + struct pbuf *inp; + /* Trim off the checksum. */ + if(pppos->in_tail->len > 2) { + pppos->in_tail->len -= 2; + + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + } else { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + + pbuf_realloc(pppos->in_head, pppos->in_head->tot_len - 2); + } + + /* Dispatch the packet thereby consuming it. */ + inp = pppos->in_head; + /* Packet consumed, release our references. */ + pppos->in_head = NULL; + pppos->in_tail = NULL; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* hide the room for Ethernet forwarding header */ + pbuf_remove_header(inp, PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN); +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ +#if PPP_INPROC_IRQ_SAFE + if(tcpip_try_callback(pppos_input_callback, inp) != ERR_OK) { + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: tcpip_callback() failed, dropping packet\n", ppp->netif->num)); + pbuf_free(inp); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + } +#else /* PPP_INPROC_IRQ_SAFE */ + ppp_input(ppp, inp); +#endif /* PPP_INPROC_IRQ_SAFE */ + } + + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + pppos->in_state = PDADDRESS; + pppos->in_escaped = 0; + /* Other characters are usually control characters that may have + * been inserted by the physical layer so here we just drop them. */ + } else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping ACCM char <%d>\n", ppp->netif->num, cur_char)); + } + /* Process other characters. */ + } else { + /* Unencode escaped characters. */ + if (pppos->in_escaped) { + pppos->in_escaped = 0; + cur_char ^= PPP_TRANS; + } + + /* Process character relative to current state. */ + switch(pppos->in_state) { + case PDIDLE: /* Idle state - waiting. */ + /* Drop the character if it's not 0xff + * we would have processed a flag character above. */ + if (cur_char != PPP_ALLSTATIONS) { + break; + } + /* no break */ + /* Fall through */ + + case PDSTART: /* Process start flag. */ + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + /* no break */ + /* Fall through */ + + case PDADDRESS: /* Process address field. */ + if (cur_char == PPP_ALLSTATIONS) { + pppos->in_state = PDCONTROL; + break; + } + /* no break */ + + /* Else assume compressed address and control fields so + * fall through to get the protocol... */ + /* Fall through */ + case PDCONTROL: /* Process control field. */ + /* If we don't get a valid control code, restart. */ + if (cur_char == PPP_UI) { + pppos->in_state = PDPROTOCOL1; + break; + } + /* no break */ + +#if 0 + else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Invalid control <%d>\n", ppp->netif->num, cur_char)); + pppos->in_state = PDSTART; + } +#endif + /* Fall through */ + + case PDPROTOCOL1: /* Process protocol field 1. */ + /* If the lower bit is set, this is the end of the protocol + * field. */ + if (cur_char & 1) { + pppos->in_protocol = cur_char; + pppos->in_state = PDDATA; + } else { + pppos->in_protocol = (u16_t)cur_char << 8; + pppos->in_state = PDPROTOCOL2; + } + break; + case PDPROTOCOL2: /* Process protocol field 2. */ + pppos->in_protocol |= cur_char; + pppos->in_state = PDDATA; + break; + case PDDATA: /* Process data byte. */ + /* Make space to receive processed data. */ + if (pppos->in_tail == NULL || pppos->in_tail->len == PBUF_POOL_BUFSIZE) { + u16_t pbuf_alloc_len; + if (pppos->in_tail != NULL) { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + /* give up the in_tail reference now */ + pppos->in_tail = NULL; + } + } + /* If we haven't started a packet, we need a packet header. */ + pbuf_alloc_len = 0; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* If IP forwarding is enabled we are reserving PBUF_LINK_ENCAPSULATION_HLEN + * + PBUF_LINK_HLEN bytes so the packet is being allocated with enough header + * space to be forwarded (to Ethernet for example). + */ + if (pppos->in_head == NULL) { + pbuf_alloc_len = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + } +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ + next_pbuf = pbuf_alloc(PBUF_RAW, pbuf_alloc_len, PBUF_POOL); + if (next_pbuf == NULL) { + /* No free buffers. Drop the input packet and let the + * higher layers deal with it. Continue processing + * the received pbuf chain in case a new packet starts. */ + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: NO FREE PBUFS!\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + pppos_input_drop(pppos); + pppos->in_state = PDSTART; /* Wait for flag sequence. */ + break; + } + if (pppos->in_head == NULL) { + u8_t *payload = ((u8_t*)next_pbuf->payload) + pbuf_alloc_len; +#if PPP_INPROC_IRQ_SAFE + ((struct pppos_input_header*)payload)->ppp = ppp; + payload += sizeof(struct pppos_input_header); + next_pbuf->len += sizeof(struct pppos_input_header); +#endif /* PPP_INPROC_IRQ_SAFE */ + next_pbuf->len += sizeof(pppos->in_protocol); + *(payload++) = pppos->in_protocol >> 8; + *(payload) = pppos->in_protocol & 0xFF; + pppos->in_head = next_pbuf; + } + pppos->in_tail = next_pbuf; + } + /* Load character into buffer. */ + ((u8_t*)pppos->in_tail->payload)[pppos->in_tail->len++] = cur_char; + break; + default: + break; + } + + /* update the frame check sequence number. */ + pppos->in_fcs = PPP_FCS(pppos->in_fcs, cur_char); + } + } /* while (l-- > 0), all bytes processed */ +} + +#if PPP_INPROC_IRQ_SAFE +/* PPPoS input callback using one input pointer + */ +static void pppos_input_callback(void *arg) { + struct pbuf *pb = (struct pbuf*)arg; + ppp_pcb *ppp; + + ppp = ((struct pppos_input_header*)pb->payload)->ppp; + if(pbuf_remove_header(pb, sizeof(struct pppos_input_header))) { + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(ppp, pb); + return; + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + pbuf_free(pb); +} +#endif /* PPP_INPROC_IRQ_SAFE */ + +static void +pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + pppos->pcomp = pcomp; + pppos->accomp = accomp; + + /* Load the ACCM bits for the 32 control codes. */ + for (i = 0; i < 32/8; i++) { + pppos->out_accm[i] = (u8_t)((accm >> (8 * i)) & 0xFF); + } + + PPPDEBUG(LOG_INFO, ("pppos_send_config[%d]: out_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->out_accm[0], pppos->out_accm[1], pppos->out_accm[2], pppos->out_accm[3])); +} + +static void +pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(ppp); + LWIP_UNUSED_ARG(pcomp); + LWIP_UNUSED_ARG(accomp); + + /* Load the ACCM bits for the 32 control codes. */ + PPPOS_PROTECT(lev); + for (i = 0; i < 32 / 8; i++) { + pppos->in_accm[i] = (u8_t)(accm >> (i * 8)); + } + PPPOS_UNPROTECT(lev); + + PPPDEBUG(LOG_INFO, ("pppos_recv_config[%d]: in_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->in_accm[0], pppos->in_accm[1], pppos->in_accm[2], pppos->in_accm[3])); +} + +/* + * Drop the input packet. + */ +static void +pppos_input_free_current_packet(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { + if (pppos->in_tail && (pppos->in_tail != pppos->in_head)) { + pbuf_free(pppos->in_tail); + } + pbuf_free(pppos->in_head); + pppos->in_head = NULL; + } + pppos->in_tail = NULL; +} + +/* + * Drop the input packet and increase error counters. + */ +static void +pppos_input_drop(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { +#if 0 + PPPDEBUG(LOG_INFO, ("pppos_input_drop: %d:%.*H\n", pppos->in_head->len, min(60, pppos->in_head->len * 2), pppos->in_head->payload)); +#endif + PPPDEBUG(LOG_INFO, ("pppos_input_drop: pbuf len=%d, addr %p\n", pppos->in_head->len, (void*)pppos->in_head)); + } + pppos_input_free_current_packet(pppos); +#if VJ_SUPPORT + vj_uncompress_err(&pppos->ppp->vj_comp); +#endif /* VJ_SUPPORT */ + + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pppos->ppp->netif, ifindiscards); +} + +/* + * pppos_output_append - append given character to end of given pbuf. + * If out_accm is not 0 and the character needs to be escaped, do so. + * If pbuf is full, send the pbuf and reuse it. + * Return the current pbuf. + */ +static err_t +pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs) +{ + if (err != ERR_OK) { + return err; + } + + /* Make sure there is room for the character and an escape code. + * Sure we don't quite fill the buffer if the character doesn't + * get escaped but is one character worth complicating this? */ + if ((PBUF_POOL_BUFSIZE - nb->len) < 2) { + u32_t l = pppos->output_cb(pppos->ppp, (u8_t*)nb->payload, nb->len, pppos->ppp->ctx_cb); + if (l != nb->len) { + return ERR_IF; + } + nb->len = 0; + } + + /* Update FCS before checking for special characters. */ + if (fcs) { + *fcs = PPP_FCS(*fcs, c); + } + + /* Copy to output buffer escaping special characters. */ + if (accm && ESCAPE_P(pppos->out_accm, c)) { + *((u8_t*)nb->payload + nb->len++) = PPP_ESCAPE; + *((u8_t*)nb->payload + nb->len++) = c ^ PPP_TRANS; + } else { + *((u8_t*)nb->payload + nb->len++) = c; + } + + return ERR_OK; +} + +static err_t +pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs) +{ + ppp_pcb *ppp = pppos->ppp; + + /* Add FCS and trailing flag. */ + err = pppos_output_append(pppos, err, nb, ~(*fcs) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, (~(*fcs) >> 8) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + + if (err != ERR_OK) { + goto failed; + } + + /* Send remaining buffer if not empty */ + if (nb->len > 0) { + u32_t l = pppos->output_cb(ppp, (u8_t*)nb->payload, nb->len, ppp->ctx_cb); + if (l != nb->len) { + err = ERR_IF; + goto failed; + } + } + + pppos->last_xmit = sys_now(); + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, nb->tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + pbuf_free(nb); + return ERR_OK; + +failed: + pppos->last_xmit = 0; /* prepend PPP_FLAG to next packet */ + LINK_STATS_INC(link.err); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(nb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOS_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c new file mode 100644 index 000000000..3b2399dc2 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c @@ -0,0 +1,677 @@ +/* + * upap.c - User/Password Authentication Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/upap.h" + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t pap_option_list[] = { + { "hide-password", o_bool, &hide_password, + "Don't output passwords to log", OPT_PRIO | 1 }, + { "show-password", o_bool, &hide_password, + "Show password string in debug log messages", OPT_PRIOSUB | 0 }, + + { "pap-restart", o_int, &upap[0].us_timeouttime, + "Set retransmit timeout for PAP", OPT_PRIO }, + { "pap-max-authreq", o_int, &upap[0].us_maxtransmits, + "Set max number of transmissions for auth-reqs", OPT_PRIO }, + { "pap-timeout", o_int, &upap[0].us_reqtimeout, + "Set time limit for peer PAP authentication", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void upap_init(ppp_pcb *pcb); +static void upap_lowerup(ppp_pcb *pcb); +static void upap_lowerdown(ppp_pcb *pcb); +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l); +static void upap_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent pap_protent = { + PPP_PAP, + upap_init, + upap_input, + upap_protrej, + upap_lowerup, + upap_lowerdown, + NULL, + NULL, +#if PRINTPKT_SUPPORT + upap_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "PAP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + pap_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +static void upap_timeout(void *arg); +#if PPP_SERVER +static void upap_reqtimeout(void *arg); +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len); +#endif /* PPP_SERVER */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_sauthreq(ppp_pcb *pcb); +#if PPP_SERVER +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen); +#endif /* PPP_SERVER */ + + +/* + * upap_init - Initialize a UPAP unit. + */ +static void upap_init(ppp_pcb *pcb) { + pcb->upap.us_user = NULL; + pcb->upap.us_userlen = 0; + pcb->upap.us_passwd = NULL; + pcb->upap.us_passwdlen = 0; + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ + pcb->upap.us_id = 0; +} + + +/* + * upap_authwithpeer - Authenticate us with our peer (start client). + * + * Set new state and send authenticate's. + */ +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password) { + + if(!user || !password) + return; + + /* Save the username and password we're given */ + pcb->upap.us_user = user; + pcb->upap.us_userlen = (u8_t)LWIP_MIN(strlen(user), 0xff); + pcb->upap.us_passwd = password; + pcb->upap.us_passwdlen = (u8_t)LWIP_MIN(strlen(password), 0xff); + pcb->upap.us_transmits = 0; + + /* Lower layer up yet? */ + if (pcb->upap.us_clientstate == UPAPCS_INITIAL || + pcb->upap.us_clientstate == UPAPCS_PENDING) { + pcb->upap.us_clientstate = UPAPCS_PENDING; + return; + } + + upap_sauthreq(pcb); /* Start protocol */ +} + +#if PPP_SERVER +/* + * upap_authpeer - Authenticate our peer (start server). + * + * Set new state. + */ +void upap_authpeer(ppp_pcb *pcb) { + + /* Lower layer up yet? */ + if (pcb->upap.us_serverstate == UPAPSS_INITIAL || + pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_PENDING; + return; + } + + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); +} +#endif /* PPP_SERVER */ + +/* + * upap_timeout - Retransmission timer for sending auth-reqs expired. + */ +static void upap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) + return; + + if (pcb->upap.us_transmits >= pcb->settings.pap_max_transmits) { + /* give up in disgust */ + ppp_error("No response to PAP authenticate-requests"); + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + auth_withpeer_fail(pcb, PPP_PAP); + return; + } + + upap_sauthreq(pcb); /* Send Authenticate-Request */ +} + + +#if PPP_SERVER +/* + * upap_reqtimeout - Give up waiting for the peer to send an auth-req. + */ +static void upap_reqtimeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_serverstate != UPAPSS_LISTEN) + return; /* huh?? */ + + auth_peer_fail(pcb, PPP_PAP); + pcb->upap.us_serverstate = UPAPSS_BADAUTH; +} +#endif /* PPP_SERVER */ + + +/* + * upap_lowerup - The lower layer is up. + * + * Start authenticating if pending. + */ +static void upap_lowerup(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_INITIAL) + pcb->upap.us_clientstate = UPAPCS_CLOSED; + else if (pcb->upap.us_clientstate == UPAPCS_PENDING) { + upap_sauthreq(pcb); /* send an auth-request */ + } + +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_INITIAL) + pcb->upap.us_serverstate = UPAPSS_CLOSED; + else if (pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); + } +#endif /* PPP_SERVER */ +} + + +/* + * upap_lowerdown - The lower layer is down. + * + * Cancel all timeouts. + */ +static void upap_lowerdown(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) /* Timeout pending? */ + UNTIMEOUT(upap_timeout, pcb); /* Cancel timeout */ +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN && pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +#endif /* PPP_SERVER */ + + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ +} + + +/* + * upap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. In any case, pretend lower layer went down. + */ +static void upap_protrej(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) { + ppp_error("PAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_PAP); + } +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN) { + ppp_error("PAP authentication of peer failed (protocol-reject)"); + auth_peer_fail(pcb, PPP_PAP); + } +#endif /* PPP_SERVER */ + upap_lowerdown(pcb); +} + + +/* + * upap_input - Input UPAP packet. + */ +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd short header.")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd illegal length.")); + return; + } + if (len > l) { + UPAPDEBUG(("pap_input: rcvd short packet.")); + return; + } + len -= UPAP_HEADERLEN; + + /* + * Action depends on code. + */ + switch (code) { + case UPAP_AUTHREQ: +#if PPP_SERVER + upap_rauthreq(pcb, inp, id, len); +#endif /* PPP_SERVER */ + break; + + case UPAP_AUTHACK: + upap_rauthack(pcb, inp, id, len); + break; + + case UPAP_AUTHNAK: + upap_rauthnak(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + break; + } +} + +#if PPP_SERVER +/* + * upap_rauth - Receive Authenticate. + */ +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char ruserlen, rpasswdlen; + char *ruser; + char *rpasswd; + char rhostname[256]; + int retcode; + const char *msg; + int msglen; + + if (pcb->upap.us_serverstate < UPAPSS_LISTEN) + return; + + /* + * If we receive a duplicate authenticate-request, we are + * supposed to return the same status as for the first request. + */ + if (pcb->upap.us_serverstate == UPAPSS_OPEN) { + upap_sresp(pcb, UPAP_AUTHACK, id, "", 0); /* return auth-ack */ + return; + } + if (pcb->upap.us_serverstate == UPAPSS_BADAUTH) { + upap_sresp(pcb, UPAP_AUTHNAK, id, "", 0); /* return auth-nak */ + return; + } + + /* + * Parse user/passwd. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + GETCHAR(ruserlen, inp); + len -= sizeof (u_char) + ruserlen + sizeof (u_char); + if (len < 0) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + ruser = (char *) inp; + INCPTR(ruserlen, inp); + GETCHAR(rpasswdlen, inp); + if (len < rpasswdlen) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + + rpasswd = (char *) inp; + + /* + * Check the username and password given. + */ + retcode = UPAP_AUTHNAK; + if (auth_check_passwd(pcb, ruser, ruserlen, rpasswd, rpasswdlen, &msg, &msglen)) { + retcode = UPAP_AUTHACK; + } + BZERO(rpasswd, rpasswdlen); + +#if 0 /* UNUSED */ + /* + * Check remote number authorization. A plugin may have filled in + * the remote number or added an allowed number, and rather than + * return an authenticate failure, is leaving it for us to verify. + */ + if (retcode == UPAP_AUTHACK) { + if (!auth_number()) { + /* We do not want to leak info about the pap result. */ + retcode = UPAP_AUTHNAK; /* XXX exit value will be "wrong" */ + warn("calling number %q is not authorized", remote_number); + } + } + + msglen = strlen(msg); + if (msglen > 255) + msglen = 255; +#endif /* UNUSED */ + + upap_sresp(pcb, retcode, id, msg, msglen); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rhostname, sizeof(rhostname), "%.*v", ruserlen, ruser); + + if (retcode == UPAP_AUTHACK) { + pcb->upap.us_serverstate = UPAPSS_OPEN; + ppp_notice("PAP peer authentication succeeded for %q", rhostname); + auth_peer_success(pcb, PPP_PAP, 0, ruser, ruserlen); + } else { + pcb->upap.us_serverstate = UPAPSS_BADAUTH; + ppp_warn("PAP peer authentication failed for %q", rhostname); + auth_peer_fail(pcb, PPP_PAP); + } + + if (pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +} +#endif /* PPP_SERVER */ + +/* + * upap_rauthack - Receive Authenticate-Ack. + */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthack: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthack: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_OPEN; + + auth_withpeer_success(pcb, PPP_PAP, 0); +} + + +/* + * upap_rauthnak - Receive Authenticate-Nak. + */ +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthnak: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthnak: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + + ppp_error("PAP authentication failed"); + auth_withpeer_fail(pcb, PPP_PAP); +} + + +/* + * upap_sauthreq - Send an Authenticate-Request. + */ +static void upap_sauthreq(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + 2 * sizeof (u_char) + + pcb->upap.us_userlen + pcb->upap.us_passwdlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(UPAP_AUTHREQ, outp); + PUTCHAR(++pcb->upap.us_id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(pcb->upap.us_userlen, outp); + MEMCPY(outp, pcb->upap.us_user, pcb->upap.us_userlen); + INCPTR(pcb->upap.us_userlen, outp); + PUTCHAR(pcb->upap.us_passwdlen, outp); + MEMCPY(outp, pcb->upap.us_passwd, pcb->upap.us_passwdlen); + + ppp_write(pcb, p); + + TIMEOUT(upap_timeout, pcb, pcb->settings.pap_timeout_time); + ++pcb->upap.us_transmits; + pcb->upap.us_clientstate = UPAPCS_AUTHREQ; +} + +#if PPP_SERVER +/* + * upap_sresp - Send a response (ack or nak). + */ +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + sizeof (u_char) + msglen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(msglen, outp); + MEMCPY(outp, msg, msglen); + + ppp_write(pcb, p); +} +#endif /* PPP_SERVER */ + +#if PRINTPKT_SUPPORT +/* + * upap_printpkt - print the contents of a PAP packet. + */ +static const char* const upap_codenames[] = { + "AuthReq", "AuthAck", "AuthNak" +}; + +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int mlen, ulen, wlen; + const u_char *user, *pwd, *msg; + const u_char *pstart; + + if (plen < UPAP_HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < UPAP_HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(upap_codenames)) + printer(arg, " %s", upap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= UPAP_HEADERLEN; + switch (code) { + case UPAP_AUTHREQ: + if (len < 1) + break; + ulen = p[0]; + if (len < ulen + 2) + break; + wlen = p[ulen + 1]; + if (len < ulen + wlen + 2) + break; + user = (const u_char *) (p + 1); + pwd = (const u_char *) (p + ulen + 2); + p += ulen + wlen + 2; + len -= ulen + wlen + 2; + printer(arg, " user="); + ppp_print_string(user, ulen, printer, arg); + printer(arg, " password="); +/* FIXME: require ppp_pcb struct as printpkt() argument */ +#if 0 + if (!pcb->settings.hide_password) +#endif + ppp_print_string(pwd, wlen, printer, arg); +#if 0 + else + printer(arg, ""); +#endif + break; + case UPAP_AUTHACK: + case UPAP_AUTHNAK: + if (len < 1) + break; + mlen = p[0]; + if (len < mlen + 1) + break; + msg = (const u_char *) (p + 1); + p += mlen + 1; + len -= mlen + 1; + printer(arg, " "); + ppp_print_string(msg, mlen, printer, arg); + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c new file mode 100644 index 000000000..f1366dae4 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c @@ -0,0 +1,957 @@ +/* + * utils.c - various utility functions used in pppd. + * + * Copyright (c) 1999-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SVR4 +#include +#endif +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" + +#if defined(SUNOS4) +extern char *strerror(); +#endif + +static void ppp_logit(int level, const char *fmt, va_list args); +static void ppp_log_write(int level, char *buf); +#if PRINTPKT_SUPPORT +static void ppp_vslp_printer(void *arg, const char *fmt, ...); +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg); + +struct buffer_info { + char *ptr; + int len; +}; +#endif /* PRINTPKT_SUPPORT */ + +/* + * ppp_strlcpy - like strcpy/strncpy, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len) { + size_t ret = strlen(src); + + if (len != 0) { + if (ret < len) + strcpy(dest, src); + else { + strncpy(dest, src, len - 1); + dest[len-1] = 0; + } + } + return ret; +} + +/* + * ppp_strlcat - like strcat/strncat, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcat(char *dest, const char *src, size_t len) { + size_t dlen = strlen(dest); + + return dlen + ppp_strlcpy(dest + dlen, src, (len > dlen? len - dlen: 0)); +} + + +/* + * ppp_slprintf - format a message into a buffer. Like sprintf except we + * also specify the length of the output buffer, and we handle + * %m (error message), %v (visible string), + * %q (quoted string), %t (current time) and %I (IP address) formats. + * Doesn't do floating-point formats. + * Returns the number of chars put into buf. + */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...) { + va_list args; + int n; + + va_start(args, fmt); + n = ppp_vslprintf(buf, buflen, fmt, args); + va_end(args); + return n; +} + +/* + * ppp_vslprintf - like ppp_slprintf, takes a va_list instead of a list of args. + */ +#define OUTCHAR(c) (buflen > 0? (--buflen, *buf++ = (c)): 0) + +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args) { + int c, i, n; + int width, prec, fillch; + int base, len, neg, quoted; + unsigned long val = 0; + const char *f; + char *str, *buf0; + const unsigned char *p; + char num[32]; +#if 0 /* need port */ + time_t t; +#endif /* need port */ + u32_t ip; + static char hexchars[] = "0123456789abcdef"; +#if PRINTPKT_SUPPORT + struct buffer_info bufinfo; +#endif /* PRINTPKT_SUPPORT */ + + buf0 = buf; + --buflen; + while (buflen > 0) { + for (f = fmt; *f != '%' && *f != 0; ++f) + ; + if (f > fmt) { + len = f - fmt; + if (len > buflen) + len = buflen; + memcpy(buf, fmt, len); + buf += len; + buflen -= len; + fmt = f; + } + if (*fmt == 0) + break; + c = *++fmt; + width = 0; + prec = -1; + fillch = ' '; + if (c == '0') { + fillch = '0'; + c = *++fmt; + } + if (c == '*') { + width = va_arg(args, int); + c = *++fmt; + } else { + while (lwip_isdigit(c)) { + width = width * 10 + c - '0'; + c = *++fmt; + } + } + if (c == '.') { + c = *++fmt; + if (c == '*') { + prec = va_arg(args, int); + c = *++fmt; + } else { + prec = 0; + while (lwip_isdigit(c)) { + prec = prec * 10 + c - '0'; + c = *++fmt; + } + } + } + str = 0; + base = 0; + neg = 0; + ++fmt; + switch (c) { + case 'l': + c = *fmt++; + switch (c) { + case 'd': + val = va_arg(args, long); + if ((long)val < 0) { + neg = 1; + val = (unsigned long)-(long)val; + } + base = 10; + break; + case 'u': + val = va_arg(args, unsigned long); + base = 10; + break; + default: + OUTCHAR('%'); + OUTCHAR('l'); + --fmt; /* so %lz outputs %lz etc. */ + continue; + } + break; + case 'd': + i = va_arg(args, int); + if (i < 0) { + neg = 1; + val = -i; + } else + val = i; + base = 10; + break; + case 'u': + val = va_arg(args, unsigned int); + base = 10; + break; + case 'o': + val = va_arg(args, unsigned int); + base = 8; + break; + case 'x': + case 'X': + val = va_arg(args, unsigned int); + base = 16; + break; +#if 0 /* unused (and wrong on LLP64 systems) */ + case 'p': + val = (unsigned long) va_arg(args, void *); + base = 16; + neg = 2; + break; +#endif /* unused (and wrong on LLP64 systems) */ + case 's': + str = va_arg(args, char *); + break; + case 'c': + num[0] = va_arg(args, int); + num[1] = 0; + str = num; + break; +#if 0 /* do we always have strerror() in embedded ? */ + case 'm': + str = strerror(errno); + break; +#endif /* do we always have strerror() in embedded ? */ + case 'I': + ip = va_arg(args, u32_t); + ip = lwip_ntohl(ip); + ppp_slprintf(num, sizeof(num), "%d.%d.%d.%d", (ip >> 24) & 0xff, + (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff); + str = num; + break; +#if 0 /* need port */ + case 't': + time(&t); + str = ctime(&t); + str += 4; /* chop off the day name */ + str[15] = 0; /* chop off year and newline */ + break; +#endif /* need port */ + case 'v': /* "visible" string */ + case 'q': /* quoted string */ + quoted = c == 'q'; + p = va_arg(args, unsigned char *); + if (p == NULL) + p = (const unsigned char *)""; + if (fillch == '0' && prec >= 0) { + n = prec; + } else { + n = strlen((const char *)p); + if (prec >= 0 && n > prec) + n = prec; + } + while (n > 0 && buflen > 0) { + c = *p++; + --n; + if (!quoted && c >= 0x80) { + OUTCHAR('M'); + OUTCHAR('-'); + c -= 0x80; + } + if (quoted && (c == '"' || c == '\\')) + OUTCHAR('\\'); + if (c < 0x20 || (0x7f <= c && c < 0xa0)) { + if (quoted) { + OUTCHAR('\\'); + switch (c) { + case '\t': OUTCHAR('t'); break; + case '\n': OUTCHAR('n'); break; + case '\b': OUTCHAR('b'); break; + case '\f': OUTCHAR('f'); break; + default: + OUTCHAR('x'); + OUTCHAR(hexchars[c >> 4]); + OUTCHAR(hexchars[c & 0xf]); + } + } else { + if (c == '\t') + OUTCHAR(c); + else { + OUTCHAR('^'); + OUTCHAR(c ^ 0x40); + } + } + } else + OUTCHAR(c); + } + continue; +#if PRINTPKT_SUPPORT + case 'P': /* print PPP packet */ + bufinfo.ptr = buf; + bufinfo.len = buflen + 1; + p = va_arg(args, unsigned char *); + n = va_arg(args, int); + ppp_format_packet(p, n, ppp_vslp_printer, &bufinfo); + buf = bufinfo.ptr; + buflen = bufinfo.len - 1; + continue; +#endif /* PRINTPKT_SUPPORT */ + case 'B': + p = va_arg(args, unsigned char *); + for (n = prec; n > 0; --n) { + c = *p++; + if (fillch == ' ') + OUTCHAR(' '); + OUTCHAR(hexchars[(c >> 4) & 0xf]); + OUTCHAR(hexchars[c & 0xf]); + } + continue; + default: + *buf++ = '%'; + if (c != '%') + --fmt; /* so %z outputs %z etc. */ + --buflen; + continue; + } + if (base != 0) { + str = num + sizeof(num); + *--str = 0; + while (str > num + neg) { + *--str = hexchars[val % base]; + val = val / base; + if (--prec <= 0 && val == 0) + break; + } + switch (neg) { + case 1: + *--str = '-'; + break; + case 2: + *--str = 'x'; + *--str = '0'; + break; + default: + break; + } + len = num + sizeof(num) - 1 - str; + } else { + len = strlen(str); + if (prec >= 0 && len > prec) + len = prec; + } + if (width > 0) { + if (width > buflen) + width = buflen; + if ((n = width - len) > 0) { + buflen -= n; + for (; n > 0; --n) + *buf++ = fillch; + } + } + if (len > buflen) + len = buflen; + memcpy(buf, str, len); + buf += len; + buflen -= len; + } + *buf = 0; + return buf - buf0; +} + +#if PRINTPKT_SUPPORT +/* + * vslp_printer - used in processing a %P format + */ +static void ppp_vslp_printer(void *arg, const char *fmt, ...) { + int n; + va_list pvar; + struct buffer_info *bi; + + va_start(pvar, fmt); + bi = (struct buffer_info *) arg; + n = ppp_vslprintf(bi->ptr, bi->len, fmt, pvar); + va_end(pvar); + + bi->ptr += n; + bi->len -= n; +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * log_packet - format a packet and log it. + */ + +void +log_packet(p, len, prefix, level) + u_char *p; + int len; + char *prefix; + int level; +{ + init_pr_log(prefix, level); + ppp_format_packet(p, len, pr_log, &level); + end_pr_log(); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ppp_format_packet - make a readable representation of a packet, + * calling `printer(arg, format, ...)' to output it. + */ +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg) { + int i, n; + u_short proto; + const struct protent *protp; + + if (len >= 2) { + GETSHORT(proto, p); + len -= 2; + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == protp->protocol) + break; + if (protp != NULL) { + printer(arg, "[%s", protp->name); + n = (*protp->printpkt)(p, len, printer, arg); + printer(arg, "]"); + p += n; + len -= n; + } else { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == (protp->protocol & ~0x8000)) + break; + if (protp != 0 && protp->data_name != 0) { + printer(arg, "[%s data]", protp->data_name); + if (len > 8) + printer(arg, "%.8B ...", p); + else + printer(arg, "%.*B", len, p); + len = 0; + } else + printer(arg, "[proto=0x%x]", proto); + } + } + + if (len > 32) + printer(arg, "%.32B ...", p); + else + printer(arg, "%.*B", len, p); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * init_pr_log, end_pr_log - initialize and finish use of pr_log. + */ + +static char line[256]; /* line to be logged accumulated here */ +static char *linep; /* current pointer within line */ +static int llevel; /* level for logging */ + +void +init_pr_log(prefix, level) + const char *prefix; + int level; +{ + linep = line; + if (prefix != NULL) { + ppp_strlcpy(line, prefix, sizeof(line)); + linep = line + strlen(line); + } + llevel = level; +} + +void +end_pr_log() +{ + if (linep != line) { + *linep = 0; + ppp_log_write(llevel, line); + } +} + +/* + * pr_log - printer routine for outputting to log + */ +void +pr_log (void *arg, const char *fmt, ...) +{ + int l, n; + va_list pvar; + char *p, *eol; + char buf[256]; + + va_start(pvar, fmt); + n = ppp_vslprintf(buf, sizeof(buf), fmt, pvar); + va_end(pvar); + + p = buf; + eol = strchr(buf, '\n'); + if (linep != line) { + l = (eol == NULL)? n: eol - buf; + if (linep + l < line + sizeof(line)) { + if (l > 0) { + memcpy(linep, buf, l); + linep += l; + } + if (eol == NULL) + return; + p = eol + 1; + eol = strchr(p, '\n'); + } + *linep = 0; + ppp_log_write(llevel, line); + linep = line; + } + + while (eol != NULL) { + *eol = 0; + ppp_log_write(llevel, p); + p = eol + 1; + eol = strchr(p, '\n'); + } + + /* assumes sizeof(buf) <= sizeof(line) */ + l = buf + n - p; + if (l > 0) { + memcpy(line, p, n); + linep = line + l; + } +} +#endif /* UNUSED */ + +/* + * ppp_print_string - print a readable representation of a string using + * printer. + */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg) { + int c; + + printer(arg, "\""); + for (; len > 0; --len) { + c = *p++; + if (' ' <= c && c <= '~') { + if (c == '\\' || c == '"') + printer(arg, "\\"); + printer(arg, "%c", c); + } else { + switch (c) { + case '\n': + printer(arg, "\\n"); + break; + case '\r': + printer(arg, "\\r"); + break; + case '\t': + printer(arg, "\\t"); + break; + default: + printer(arg, "\\%.3o", (u8_t)c); + /* no break */ + } + } + } + printer(arg, "\""); +} + +/* + * ppp_logit - does the hard work for fatal et al. + */ +static void ppp_logit(int level, const char *fmt, va_list args) { + char buf[1024]; + + ppp_vslprintf(buf, sizeof(buf), fmt, args); + ppp_log_write(level, buf); +} + +static void ppp_log_write(int level, char *buf) { + LWIP_UNUSED_ARG(level); /* necessary if PPPDEBUG is defined to an empty function */ + LWIP_UNUSED_ARG(buf); + PPPDEBUG(level, ("%s\n", buf) ); +#if 0 + if (log_to_fd >= 0 && (level != LOG_DEBUG || debug)) { + int n = strlen(buf); + + if (n > 0 && buf[n-1] == '\n') + --n; + if (write(log_to_fd, buf, n) != n + || write(log_to_fd, "\n", 1) != 1) + log_to_fd = -1; + } +#endif +} + +/* + * ppp_fatal - log an error message and die horribly. + */ +void ppp_fatal(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); + + LWIP_ASSERT("ppp_fatal", 0); /* as promised */ +} + +/* + * ppp_error - log an error message. + */ +void ppp_error(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); +#if 0 /* UNUSED */ + ++error_count; +#endif /* UNUSED */ +} + +/* + * ppp_warn - log a warning message. + */ +void ppp_warn(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_WARNING, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_notice - log a notice-level message. + */ +void ppp_notice(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_NOTICE, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_info - log an informational message. + */ +void ppp_info(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_INFO, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_dbglog - log a debug message. + */ +void ppp_dbglog(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_DEBUG, fmt, pvar); + va_end(pvar); +} + +#if PRINTPKT_SUPPORT +/* + * ppp_dump_packet - print out a packet in readable form if it is interesting. + * Assumes len >= PPP_HDRLEN. + */ +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len) { + int proto; + + /* + * don't print data packets, i.e. IPv4, IPv6, VJ, and compressed packets. + */ + proto = (p[0] << 8) + p[1]; + if (proto < 0xC000 && (proto & ~0x8000) == proto) + return; + + /* + * don't print valid LCP echo request/reply packets if the link is up. + */ + if (proto == PPP_LCP && pcb->phase == PPP_PHASE_RUNNING && len >= 2 + HEADERLEN) { + unsigned char *lcp = p + 2; + int l = (lcp[2] << 8) + lcp[3]; + + if ((lcp[0] == ECHOREQ || lcp[0] == ECHOREP) + && l >= HEADERLEN && l <= len - 2) + return; + } + + ppp_dbglog("%s %P", tag, p, len); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* Unused */ + +/* + * complete_read - read a full `count' bytes from fd, + * unless end-of-file or an error other than EINTR is encountered. + */ +ssize_t +complete_read(int fd, void *buf, size_t count) +{ + size_t done; + ssize_t nb; + char *ptr = buf; + + for (done = 0; done < count; ) { + nb = read(fd, ptr, count - done); + if (nb < 0) { + if (errno == EINTR) + continue; + return -1; + } + if (nb == 0) + break; + done += nb; + ptr += nb; + } + return done; +} + +/* Procedures for locking the serial device using a lock file. */ +#ifndef LOCK_DIR +#ifdef __linux__ +#define LOCK_DIR "/var/lock" +#else +#ifdef SVR4 +#define LOCK_DIR "/var/spool/locks" +#else +#define LOCK_DIR "/var/spool/lock" +#endif +#endif +#endif /* LOCK_DIR */ + +static char lock_file[MAXPATHLEN]; + +/* + * lock - create a lock file for the named device + */ +int +lock(dev) + char *dev; +{ +#ifdef LOCKLIB + int result; + + result = mklock (dev, (void *) 0); + if (result == 0) { + ppp_strlcpy(lock_file, dev, sizeof(lock_file)); + return 0; + } + + if (result > 0) + ppp_notice("Device %s is locked by pid %d", dev, result); + else + ppp_error("Can't create lock file %s", lock_file); + return -1; + +#else /* LOCKLIB */ + + char lock_buffer[12]; + int fd, pid, n; + +#ifdef SVR4 + struct stat sbuf; + + if (stat(dev, &sbuf) < 0) { + ppp_error("Can't get device number for %s: %m", dev); + return -1; + } + if ((sbuf.st_mode & S_IFMT) != S_IFCHR) { + ppp_error("Can't lock %s: not a character device", dev); + return -1; + } + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LK.%03d.%03d.%03d", + LOCK_DIR, major(sbuf.st_dev), + major(sbuf.st_rdev), minor(sbuf.st_rdev)); +#else + char *p; + char lockdev[MAXPATHLEN]; + + if ((p = strstr(dev, "dev/")) != NULL) { + dev = p + 4; + strncpy(lockdev, dev, MAXPATHLEN-1); + lockdev[MAXPATHLEN-1] = 0; + while ((p = strrchr(lockdev, '/')) != NULL) { + *p = '_'; + } + dev = lockdev; + } else + if ((p = strrchr(dev, '/')) != NULL) + dev = p + 1; + + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LCK..%s", LOCK_DIR, dev); +#endif + + while ((fd = open(lock_file, O_EXCL | O_CREAT | O_RDWR, 0644)) < 0) { + if (errno != EEXIST) { + ppp_error("Can't create lock file %s: %m", lock_file); + break; + } + + /* Read the lock file to find out who has the device locked. */ + fd = open(lock_file, O_RDONLY, 0); + if (fd < 0) { + if (errno == ENOENT) /* This is just a timing problem. */ + continue; + ppp_error("Can't open existing lock file %s: %m", lock_file); + break; + } +#ifndef LOCK_BINARY + n = read(fd, lock_buffer, 11); +#else + n = read(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + fd = -1; + if (n <= 0) { + ppp_error("Can't read pid from lock file %s", lock_file); + break; + } + + /* See if the process still exists. */ +#ifndef LOCK_BINARY + lock_buffer[n] = 0; + pid = atoi(lock_buffer); +#endif /* LOCK_BINARY */ + if (pid == getpid()) + return 1; /* somebody else locked it for us */ + if (pid == 0 + || (kill(pid, 0) == -1 && errno == ESRCH)) { + if (unlink (lock_file) == 0) { + ppp_notice("Removed stale lock on %s (pid %d)", dev, pid); + continue; + } + ppp_warn("Couldn't remove stale lock on %s", dev); + } else + ppp_notice("Device %s is locked by pid %d", dev, pid); + break; + } + + if (fd < 0) { + lock_file[0] = 0; + return -1; + } + + pid = getpid(); +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof (pid)); +#endif + close(fd); + return 0; + +#endif +} + +/* + * relock - called to update our lockfile when we are about to detach, + * thus changing our pid (we fork, the child carries on, and the parent dies). + * Note that this is called by the parent, with pid equal to the pid + * of the child. This avoids a potential race which would exist if + * we had the child rewrite the lockfile (the parent might die first, + * and another process could think the lock was stale if it checked + * between when the parent died and the child rewrote the lockfile). + */ +int +relock(pid) + int pid; +{ +#ifdef LOCKLIB + /* XXX is there a way to do this? */ + return -1; +#else /* LOCKLIB */ + + int fd; + char lock_buffer[12]; + + if (lock_file[0] == 0) + return -1; + fd = open(lock_file, O_WRONLY, 0); + if (fd < 0) { + ppp_error("Couldn't reopen lock file %s: %m", lock_file); + lock_file[0] = 0; + return -1; + } + +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + return 0; + +#endif /* LOCKLIB */ +} + +/* + * unlock - remove our lockfile + */ +void +unlock() +{ + if (lock_file[0]) { +#ifdef LOCKLIB + (void) rmlock(lock_file, (void *) 0); +#else + unlink(lock_file); +#endif + lock_file[0] = 0; + } +} + +#endif /* Unused */ + +#endif /* PPP_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c new file mode 100644 index 000000000..3fecba699 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c @@ -0,0 +1,685 @@ +/* + * Routines to compress and uncompess tcp packets (for transmission + * over low speed serial lines. + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * Initial distribution. + * + * Modified June 1993 by Paul Mackerras, paulus@cs.anu.edu.au, + * so that the entire packet being decompressed doesn't have + * to be in contiguous memory (just the compressed header). + * + * Modified March 1998 by Guy Lancaster, glanca@gesn.com, + * for a 16 bit processor. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppdebug.h" + +#include "netif/ppp/vj.h" + +#include + +#if LINK_STATS +#define INCR(counter) ++comp->stats.counter +#else +#define INCR(counter) +#endif + +void +vj_compress_init(struct vjcompress *comp) +{ + u8_t i; + struct cstate *tstate = comp->tstate; + +#if MAX_SLOTS == 0 + memset((char *)comp, 0, sizeof(*comp)); +#endif + comp->maxSlotIndex = MAX_SLOTS - 1; + comp->compressSlot = 0; /* Disable slot ID compression by default. */ + for (i = MAX_SLOTS - 1; i > 0; --i) { + tstate[i].cs_id = i; + tstate[i].cs_next = &tstate[i - 1]; + } + tstate[0].cs_next = &tstate[MAX_SLOTS - 1]; + tstate[0].cs_id = 0; + comp->last_cs = &tstate[0]; + comp->last_recv = 255; + comp->last_xmit = 255; + comp->flags = VJF_TOSS; +} + + +/* ENCODE encodes a number that is known to be non-zero. ENCODEZ + * checks for zero (since zero has to be encoded in the long, 3 byte + * form). + */ +#define ENCODE(n) { \ + if ((u16_t)(n) >= 256) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} +#define ENCODEZ(n) { \ + if ((u16_t)(n) >= 256 || (u16_t)(n) == 0) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} + +#define DECODEL(f) { \ + if (*cp == 0) {\ + u32_t tmp_ = lwip_ntohl(f) + ((cp[1] << 8) | cp[2]); \ + (f) = lwip_htonl(tmp_); \ + cp += 3; \ + } else { \ + u32_t tmp_ = lwip_ntohl(f) + (u32_t)*cp++; \ + (f) = lwip_htonl(tmp_); \ + } \ +} + +#define DECODES(f) { \ + if (*cp == 0) {\ + u16_t tmp_ = lwip_ntohs(f) + (((u16_t)cp[1] << 8) | cp[2]); \ + (f) = lwip_htons(tmp_); \ + cp += 3; \ + } else { \ + u16_t tmp_ = lwip_ntohs(f) + (u16_t)*cp++; \ + (f) = lwip_htons(tmp_); \ + } \ +} + +#define DECODEU(f) { \ + if (*cp == 0) {\ + (f) = lwip_htons(((u16_t)cp[1] << 8) | cp[2]); \ + cp += 3; \ + } else { \ + (f) = lwip_htons((u16_t)*cp++); \ + } \ +} + +/* Helper structures for unaligned *u32_t and *u16_t accesses */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u32_t { + PACK_STRUCT_FIELD(u32_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u16_t { + PACK_STRUCT_FIELD(u16_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* + * vj_compress_tcp - Attempt to do Van Jacobson header compression on a + * packet. This assumes that nb and comp are not null and that the first + * buffer of the chain contains a valid IP header. + * Return the VJ type code indicating whether or not the packet was + * compressed. + */ +u8_t +vj_compress_tcp(struct vjcompress *comp, struct pbuf **pb) +{ + struct pbuf *np = *pb; + struct ip_hdr *ip = (struct ip_hdr *)np->payload; + struct cstate *cs = comp->last_cs->cs_next; + u16_t ilen = IPH_HL(ip); + u16_t hlen; + struct tcp_hdr *oth; + struct tcp_hdr *th; + u16_t deltaS, deltaA = 0; + u32_t deltaL; + u32_t changes = 0; + u8_t new_seq[16]; + u8_t *cp = new_seq; + + /* + * Check that the packet is IP proto TCP. + */ + if (IPH_PROTO(ip) != IP_PROTO_TCP) { + return (TYPE_IP); + } + + /* + * Bail if this is an IP fragment or if the TCP packet isn't + * `compressible' (i.e., ACK isn't set or some other control bit is + * set). + */ + if ((IPH_OFFSET(ip) & PP_HTONS(0x3fff)) || np->tot_len < 40) { + return (TYPE_IP); + } + th = (struct tcp_hdr *)&((struct vj_u32_t*)ip)[ilen]; + if ((TCPH_FLAGS(th) & (TCP_SYN|TCP_FIN|TCP_RST|TCP_ACK)) != TCP_ACK) { + return (TYPE_IP); + } + + /* Check that the TCP/IP headers are contained in the first buffer. */ + hlen = ilen + TCPH_HDRLEN(th); + hlen <<= 2; + if (np->len < hlen) { + PPPDEBUG(LOG_INFO, ("vj_compress_tcp: header len %d spans buffers\n", hlen)); + return (TYPE_IP); + } + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before compression. */ + np = pbuf_clone(PBUF_RAW, PBUF_RAM, *pb); + if (!np) { + return (TYPE_IP); + } + + *pb = np; + ip = (struct ip_hdr *)np->payload; + + /* + * Packet is compressible -- we're going to send either a + * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need + * to locate (or create) the connection state. Special case the + * most recently used connection since it's most likely to be used + * again & we don't have to do any reordering if it's used. + */ + INCR(vjs_packets); + if (!ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + || !ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + || (*(struct vj_u32_t*)th).v != (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + /* + * Wasn't the first -- search for it. + * + * States are kept in a circularly linked list with + * last_cs pointing to the end of the list. The + * list is kept in lru order by moving a state to the + * head of the list whenever it is referenced. Since + * the list is short and, empirically, the connection + * we want is almost always near the front, we locate + * states via linear search. If we don't find a state + * for the datagram, the oldest state is (re-)used. + */ + struct cstate *lcs; + struct cstate *lastcs = comp->last_cs; + + do { + lcs = cs; cs = cs->cs_next; + INCR(vjs_searches); + if (ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + && ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + && (*(struct vj_u32_t*)th).v == (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + goto found; + } + } while (cs != lastcs); + + /* + * Didn't find it -- re-use oldest cstate. Send an + * uncompressed packet that tells the other side what + * connection number we're using for this conversation. + * Note that since the state list is circular, the oldest + * state points to the newest and we only need to set + * last_cs to update the lru linkage. + */ + INCR(vjs_misses); + comp->last_cs = lcs; + goto uncompressed; + + found: + /* + * Found it -- move to the front on the connection list. + */ + if (cs == lastcs) { + comp->last_cs = lcs; + } else { + lcs->cs_next = cs->cs_next; + cs->cs_next = lastcs->cs_next; + lastcs->cs_next = cs; + } + } + + oth = (struct tcp_hdr *)&((struct vj_u32_t*)&cs->cs_ip)[ilen]; + deltaS = ilen; + + /* + * Make sure that only what we expect to change changed. The first + * line of the `if' checks the IP protocol version, header length & + * type of service. The 2nd line checks the "Don't fragment" bit. + * The 3rd line checks the time-to-live and protocol (the protocol + * check is unnecessary but costless). The 4th line checks the TCP + * header length. The 5th line checks IP options, if any. The 6th + * line checks TCP options, if any. If any of these things are + * different between the previous & current datagram, we send the + * current datagram `uncompressed'. + */ + if ((((struct vj_u16_t*)ip)[0]).v != (((struct vj_u16_t*)&cs->cs_ip)[0]).v + || (((struct vj_u16_t*)ip)[3]).v != (((struct vj_u16_t*)&cs->cs_ip)[3]).v + || (((struct vj_u16_t*)ip)[4]).v != (((struct vj_u16_t*)&cs->cs_ip)[4]).v + || TCPH_HDRLEN(th) != TCPH_HDRLEN(oth) + || (deltaS > 5 && BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) + || (TCPH_HDRLEN(th) > 5 && BCMP(th + 1, oth + 1, (TCPH_HDRLEN(th) - 5) << 2))) { + goto uncompressed; + } + + /* + * Figure out which of the changing fields changed. The + * receiver expects changes in the order: urgent, window, + * ack, seq (the order minimizes the number of temporaries + * needed in this section of code). + */ + if (TCPH_FLAGS(th) & TCP_URG) { + deltaS = lwip_ntohs(th->urgp); + ENCODEZ(deltaS); + changes |= NEW_U; + } else if (th->urgp != oth->urgp) { + /* argh! URG not set but urp changed -- a sensible + * implementation should never do this but RFC793 + * doesn't prohibit the change so we have to deal + * with it. */ + goto uncompressed; + } + + if ((deltaS = (u16_t)(lwip_ntohs(th->wnd) - lwip_ntohs(oth->wnd))) != 0) { + ENCODE(deltaS); + changes |= NEW_W; + } + + if ((deltaL = lwip_ntohl(th->ackno) - lwip_ntohl(oth->ackno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaA = (u16_t)deltaL; + ENCODE(deltaA); + changes |= NEW_A; + } + + if ((deltaL = lwip_ntohl(th->seqno) - lwip_ntohl(oth->seqno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaS = (u16_t)deltaL; + ENCODE(deltaS); + changes |= NEW_S; + } + + switch(changes) { + case 0: + /* + * Nothing changed. If this packet contains data and the + * last one didn't, this is probably a data packet following + * an ack (normal on an interactive connection) and we send + * it compressed. Otherwise it's probably a retransmit, + * retransmitted ack or window probe. Send it uncompressed + * in case the other side missed the compressed version. + */ + if (IPH_LEN(ip) != IPH_LEN(&cs->cs_ip) && + lwip_ntohs(IPH_LEN(&cs->cs_ip)) == hlen) { + break; + } + /* no break */ + /* fall through */ + + case SPECIAL_I: + case SPECIAL_D: + /* + * actual changes match one of our special case encodings -- + * send packet uncompressed. + */ + goto uncompressed; + + case NEW_S|NEW_A: + if (deltaS == deltaA && deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for echoed terminal traffic */ + changes = SPECIAL_I; + cp = new_seq; + } + break; + + case NEW_S: + if (deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for data xfer */ + changes = SPECIAL_D; + cp = new_seq; + } + break; + default: + break; + } + + deltaS = (u16_t)(lwip_ntohs(IPH_ID(ip)) - lwip_ntohs(IPH_ID(&cs->cs_ip))); + if (deltaS != 1) { + ENCODEZ(deltaS); + changes |= NEW_I; + } + if (TCPH_FLAGS(th) & TCP_PSH) { + changes |= TCP_PUSH_BIT; + } + /* + * Grab the cksum before we overwrite it below. Then update our + * state with this packet's header. + */ + deltaA = lwip_ntohs(th->chksum); + MEMCPY(&cs->cs_ip, ip, hlen); + + /* + * We want to use the original packet as our compressed packet. + * (cp - new_seq) is the number of bytes we need for compressed + * sequence numbers. In addition we need one byte for the change + * mask, one for the connection id and two for the tcp checksum. + * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how + * many bytes of the original packet to toss so subtract the two to + * get the new packet size. + */ + deltaS = (u16_t)(cp - new_seq); + if (!comp->compressSlot || comp->last_xmit != cs->cs_id) { + comp->last_xmit = cs->cs_id; + hlen -= deltaS + 4; + if (pbuf_remove_header(np, hlen)){ + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)(changes | NEW_C); + *cp++ = cs->cs_id; + } else { + hlen -= deltaS + 3; + if (pbuf_remove_header(np, hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)changes; + } + *cp++ = (u8_t)(deltaA >> 8); + *cp++ = (u8_t)deltaA; + MEMCPY(cp, new_seq, deltaS); + INCR(vjs_compressed); + return (TYPE_COMPRESSED_TCP); + + /* + * Update connection state cs & send uncompressed packet (that is, + * a regular ip/tcp packet but with the 'conversation id' we hope + * to use on future compressed packets in the protocol field). + */ +uncompressed: + MEMCPY(&cs->cs_ip, ip, hlen); + IPH_PROTO_SET(ip, cs->cs_id); + comp->last_xmit = cs->cs_id; + return (TYPE_UNCOMPRESSED_TCP); +} + +/* + * Called when we may have missed a packet. + */ +void +vj_uncompress_err(struct vjcompress *comp) +{ + comp->flags |= VJF_TOSS; + INCR(vjs_errorin); +} + +/* + * "Uncompress" a packet of type TYPE_UNCOMPRESSED_TCP. + * Return 0 on success, -1 on failure. + */ +int +vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp) +{ + u32_t hlen; + struct cstate *cs; + struct ip_hdr *ip; + + ip = (struct ip_hdr *)nb->payload; + hlen = IPH_HL(ip) << 2; + if (IPH_PROTO(ip) >= MAX_SLOTS + || hlen + sizeof(struct tcp_hdr) > nb->len + || (hlen += TCPH_HDRLEN_BYTES((struct tcp_hdr *)&((char *)ip)[hlen])) + > nb->len + || hlen > MAX_HDR) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_uncomp: bad cid=%d, hlen=%d buflen=%d\n", + IPH_PROTO(ip), hlen, nb->len)); + vj_uncompress_err(comp); + return -1; + } + cs = &comp->rstate[comp->last_recv = IPH_PROTO(ip)]; + comp->flags &=~ VJF_TOSS; + IPH_PROTO_SET(ip, IP_PROTO_TCP); + /* copy from/to bigger buffers checked above instead of cs->cs_ip and ip + just to help static code analysis to see this is correct ;-) */ + MEMCPY(&cs->cs_hdr, nb->payload, hlen); + cs->cs_hlen = (u16_t)hlen; + INCR(vjs_uncompressedin); + return 0; +} + +/* + * Uncompress a packet of type TYPE_COMPRESSED_TCP. + * The packet is composed of a buffer chain and the first buffer + * must contain an accurate chain length. + * The first buffer must include the entire compressed TCP/IP header. + * This procedure replaces the compressed header with the uncompressed + * header and returns the length of the VJ header. + */ +int +vj_uncompress_tcp(struct pbuf **nb, struct vjcompress *comp) +{ + u8_t *cp; + struct tcp_hdr *th; + struct cstate *cs; + struct vj_u16_t *bp; + struct pbuf *n0 = *nb; + u32_t tmp; + u32_t vjlen, hlen, changes; + + INCR(vjs_compressedin); + cp = (u8_t*)n0->payload; + changes = *cp++; + if (changes & NEW_C) { + /* + * Make sure the state index is in range, then grab the state. + * If we have a good state index, clear the 'discard' flag. + */ + if (*cp >= MAX_SLOTS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: bad cid=%d\n", *cp)); + goto bad; + } + + comp->flags &=~ VJF_TOSS; + comp->last_recv = *cp++; + } else { + /* + * this packet has an implicit state index. If we've + * had a line error since the last time we got an + * explicit state index, we have to toss the packet. + */ + if (comp->flags & VJF_TOSS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: tossing\n")); + INCR(vjs_tossed); + return (-1); + } + } + cs = &comp->rstate[comp->last_recv]; + hlen = IPH_HL(&cs->cs_ip) << 2; + th = (struct tcp_hdr *)&((u8_t*)&cs->cs_ip)[hlen]; + th->chksum = lwip_htons((*cp << 8) | cp[1]); + cp += 2; + if (changes & TCP_PUSH_BIT) { + TCPH_SET_FLAG(th, TCP_PSH); + } else { + TCPH_UNSET_FLAG(th, TCP_PSH); + } + + switch (changes & SPECIALS_MASK) { + case SPECIAL_I: + { + u32_t i = lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->ackno) + i; + th->ackno = lwip_htonl(tmp); + tmp = lwip_ntohl(th->seqno) + i; + th->seqno = lwip_htonl(tmp); + } + break; + + case SPECIAL_D: + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->seqno) + lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + th->seqno = lwip_htonl(tmp); + break; + + default: + if (changes & NEW_U) { + TCPH_SET_FLAG(th, TCP_URG); + DECODEU(th->urgp); + } else { + TCPH_UNSET_FLAG(th, TCP_URG); + } + if (changes & NEW_W) { + DECODES(th->wnd); + } + if (changes & NEW_A) { + DECODEL(th->ackno); + } + if (changes & NEW_S) { + DECODEL(th->seqno); + } + break; + } + if (changes & NEW_I) { + DECODES(cs->cs_ip._id); + } else { + IPH_ID_SET(&cs->cs_ip, lwip_ntohs(IPH_ID(&cs->cs_ip)) + 1); + IPH_ID_SET(&cs->cs_ip, lwip_htons(IPH_ID(&cs->cs_ip))); + } + + /* + * At this point, cp points to the first byte of data in the + * packet. Fill in the IP total length and update the IP + * header checksum. + */ + vjlen = (u16_t)(cp - (u8_t*)n0->payload); + if (n0->len < vjlen) { + /* + * We must have dropped some characters (crc should detect + * this but the old slip framing won't) + */ + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: head buffer %d too short %d\n", + n0->len, vjlen)); + goto bad; + } + +#if BYTE_ORDER == LITTLE_ENDIAN + tmp = n0->tot_len - vjlen + cs->cs_hlen; + IPH_LEN_SET(&cs->cs_ip, lwip_htons((u16_t)tmp)); +#else + IPH_LEN_SET(&cs->cs_ip, lwip_htons(n0->tot_len - vjlen + cs->cs_hlen)); +#endif + + /* recompute the ip header checksum */ + bp = (struct vj_u16_t*) &cs->cs_ip; + IPH_CHKSUM_SET(&cs->cs_ip, 0); + for (tmp = 0; hlen > 0; hlen -= 2) { + tmp += (*bp++).v; + } + tmp = (tmp & 0xffff) + (tmp >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + IPH_CHKSUM_SET(&cs->cs_ip, (u16_t)(~tmp)); + + /* Remove the compressed header and prepend the uncompressed header. */ + if (pbuf_remove_header(n0, vjlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + if(LWIP_MEM_ALIGN(n0->payload) != n0->payload) { + struct pbuf *np; + +#if IP_FORWARD + /* If IP forwarding is enabled we are using a PBUF_LINK packet type so + * the packet is being allocated with enough header space to be + * forwarded (to Ethernet for example). + */ + np = pbuf_alloc(PBUF_LINK, n0->len + cs->cs_hlen, PBUF_POOL); +#else /* IP_FORWARD */ + np = pbuf_alloc(PBUF_RAW, n0->len + cs->cs_hlen, PBUF_POOL); +#endif /* IP_FORWARD */ + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: realign failed\n")); + goto bad; + } + + if (pbuf_remove_header(np, cs->cs_hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + pbuf_take(np, n0->payload, n0->len); + + if(n0->next) { + pbuf_chain(np, n0->next); + pbuf_dechain(n0); + } + pbuf_free(n0); + n0 = np; + } + + if (pbuf_add_header(n0, cs->cs_hlen)) { + struct pbuf *np; + + LWIP_ASSERT("vj_uncompress_tcp: cs->cs_hlen <= PBUF_POOL_BUFSIZE", cs->cs_hlen <= PBUF_POOL_BUFSIZE); + np = pbuf_alloc(PBUF_RAW, cs->cs_hlen, PBUF_POOL); + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: prepend failed\n")); + goto bad; + } + pbuf_cat(np, n0); + n0 = np; + } + LWIP_ASSERT("n0->len >= cs->cs_hlen", n0->len >= cs->cs_hlen); + MEMCPY(n0->payload, &cs->cs_ip, cs->cs_hlen); + + *nb = n0; + + return vjlen; + +bad: + vj_uncompress_err(comp); + return (-1); +} + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/slipif.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/slipif.c new file mode 100644 index 000000000..9b175dc3e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/slipif.c @@ -0,0 +1,558 @@ +/** + * @file + * SLIP Interface + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is built upon the file: src/arch/rtxc/netif/sioslip.c + * + * Author: Magnus Ivarsson + * Simon Goldschmidt + */ + + +/** + * @defgroup slipif SLIP + * @ingroup netifs + * + * This is an arch independent SLIP netif. The specific serial hooks must be + * provided by another file. They are sio_open, sio_read/sio_tryread and sio_send + * + * Usage: This netif can be used in three ways:\n + * 1) For NO_SYS==0, an RX thread can be used which blocks on sio_read() + * until data is received.\n + * 2) In your main loop, call slipif_poll() to check for new RX bytes, + * completed packets are fed into netif->input().\n + * 3) Call slipif_received_byte[s]() from your serial RX ISR and + * slipif_process_rxqueue() from your main loop. ISR level decodes + * packets and puts completed packets on a queue which is fed into + * the stack from the main loop (needs SYS_LIGHTWEIGHT_PROT for + * pbuf_alloc to work on ISR level!). + * + */ + +#include "netif/slipif.h" +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/sys.h" +#include "lwip/sio.h" + +#define SLIP_END 0xC0 /* 0300: start and end of every packet */ +#define SLIP_ESC 0xDB /* 0333: escape start (one byte escaped data follows) */ +#define SLIP_ESC_END 0xDC /* 0334: following escape: original byte is 0xC0 (END) */ +#define SLIP_ESC_ESC 0xDD /* 0335: following escape: original byte is 0xDB (ESC) */ + +/** Maximum packet size that is received by this netif */ +#ifndef SLIP_MAX_SIZE +#define SLIP_MAX_SIZE 1500 +#endif + +/** Define this to the interface speed for SNMP + * (sio_fd is the sio_fd_t returned by sio_open). + * The default value of zero means 'unknown'. + */ +#ifndef SLIP_SIO_SPEED +#define SLIP_SIO_SPEED(sio_fd) 0 +#endif + +enum slipif_recv_state { + SLIP_RECV_NORMAL, + SLIP_RECV_ESCAPE +}; + +struct slipif_priv { + sio_fd_t sd; + /* q is the whole pbuf chain for a packet, p is the current pbuf in the chain */ + struct pbuf *p, *q; + u8_t state; + u16_t i, recved; +#if SLIP_RX_FROM_ISR + struct pbuf *rxpackets; +#endif +}; + +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output(struct netif *netif, struct pbuf *p) +{ + struct slipif_priv *priv; + struct pbuf *q; + u16_t i; + u8_t c; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + LWIP_ASSERT("p != NULL", (p != NULL)); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_output: sending %"U16_F" bytes\n", p->tot_len)); + priv = (struct slipif_priv *)netif->state; + + /* Send pbuf out on the serial I/O device. */ + /* Start with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + + for (q = p; q != NULL; q = q->next) { + for (i = 0; i < q->len; i++) { + c = ((u8_t *)q->payload)[i]; + switch (c) { + case SLIP_END: + /* need to escape this byte (0xC0 -> 0xDB, 0xDC) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_END, priv->sd); + break; + case SLIP_ESC: + /* need to escape this byte (0xDB -> 0xDB, 0xDD) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_ESC, priv->sd); + break; + default: + /* normal byte - no need for escaping */ + sio_send(c, priv->sd); + break; + } + } + } + /* End with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + return ERR_OK; +} + +#if LWIP_IPV4 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV6 */ + +/** + * Handle the incoming SLIP stream character by character + * + * @param netif the lwip network interface structure for this slipif + * @param c received character (multiple calls to this function will + * return a complete packet, NULL is returned before - used for polling) + * @return The IP packet when SLIP_END is received + */ +static struct pbuf * +slipif_rxbyte(struct netif *netif, u8_t c) +{ + struct slipif_priv *priv; + struct pbuf *t; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + switch (priv->state) { + case SLIP_RECV_NORMAL: + switch (c) { + case SLIP_END: + if (priv->recved > 0) { + /* Received whole packet. */ + /* Trim the pbuf to the size of the received packet. */ + pbuf_realloc(priv->q, priv->recved); + + LINK_STATS_INC(link.recv); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif: Got packet (%"U16_F" bytes)\n", priv->recved)); + t = priv->q; + priv->p = priv->q = NULL; + priv->i = priv->recved = 0; + return t; + } + return NULL; + case SLIP_ESC: + priv->state = SLIP_RECV_ESCAPE; + return NULL; + default: + break; + } /* end switch (c) */ + break; + case SLIP_RECV_ESCAPE: + /* un-escape END or ESC bytes, leave other bytes + (although that would be a protocol error) */ + switch (c) { + case SLIP_ESC_END: + c = SLIP_END; + break; + case SLIP_ESC_ESC: + c = SLIP_ESC; + break; + default: + break; + } + priv->state = SLIP_RECV_NORMAL; + break; + default: + break; + } /* end switch (priv->state) */ + + /* byte received, packet not yet completely received */ + if (priv->p == NULL) { + /* allocate a new pbuf */ + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: alloc\n")); + priv->p = pbuf_alloc(PBUF_LINK, (PBUF_POOL_BUFSIZE - PBUF_LINK_HLEN - PBUF_LINK_ENCAPSULATION_HLEN), PBUF_POOL); + + if (priv->p == NULL) { + LINK_STATS_INC(link.drop); + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: no new pbuf! (DROP)\n")); + /* don't process any further since we got no pbuf to receive to */ + return NULL; + } + + if (priv->q != NULL) { + /* 'chain' the pbuf to the existing chain */ + pbuf_cat(priv->q, priv->p); + } else { + /* p is the first pbuf in the chain */ + priv->q = priv->p; + } + } + + /* this automatically drops bytes if > SLIP_MAX_SIZE */ + if ((priv->p != NULL) && (priv->recved <= SLIP_MAX_SIZE)) { + ((u8_t *)priv->p->payload)[priv->i] = c; + priv->recved++; + priv->i++; + if (priv->i >= priv->p->len) { + /* on to the next pbuf */ + priv->i = 0; + if (priv->p->next != NULL && priv->p->next->len > 0) { + /* p is a chain, on to the next in the chain */ + priv->p = priv->p->next; + } else { + /* p is a single pbuf, set it to NULL so next time a new + * pbuf is allocated */ + priv->p = NULL; + } + } + } + return NULL; +} + +/** Like slipif_rxbyte, but passes completed packets to netif->input + * + * @param netif The lwip network interface structure for this slipif + * @param c received character + */ +static void +slipif_rxbyte_input(struct netif *netif, u8_t c) +{ + struct pbuf *p; + p = slipif_rxbyte(netif, c); + if (p != NULL) { + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + } +} + +#if SLIP_USE_RX_THREAD +/** + * The SLIP input thread. + * + * Feed the IP layer with incoming packets + * + * @param nf the lwip network interface structure for this slipif + */ +static void +slipif_loop_thread(void *nf) +{ + u8_t c; + struct netif *netif = (struct netif *)nf; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + + while (1) { + if (sio_read(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } + } +} +#endif /* SLIP_USE_RX_THREAD */ + +/** + * @ingroup slipif + * SLIP netif initialization + * + * Call the arch specific sio_open and remember + * the opened device in the state field of the netif. + * + * @param netif the lwip network interface structure for this slipif + * @return ERR_OK if serial line could be opened, + * ERR_MEM if no memory could be allocated, + * ERR_IF is serial line couldn't be opened + * + * @note If netif->state is interpreted as an u8_t serial port number. + * + */ +err_t +slipif_init(struct netif *netif) +{ + struct slipif_priv *priv; + u8_t sio_num; + + LWIP_ASSERT("slipif needs an input callback", netif->input != NULL); + + /* netif->state contains serial port number */ + sio_num = LWIP_PTR_NUMERIC_CAST(u8_t, netif->state); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_init: netif->num=%"U16_F"\n", (u16_t)sio_num)); + + /* Allocate private data */ + priv = (struct slipif_priv *)mem_malloc(sizeof(struct slipif_priv)); + if (!priv) { + return ERR_MEM; + } + + netif->name[0] = 's'; + netif->name[1] = 'l'; +#if LWIP_IPV4 + netif->output = slipif_output_v4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = slipif_output_v6; +#endif /* LWIP_IPV6 */ + netif->mtu = SLIP_MAX_SIZE; + + /* Try to open the serial port. */ + priv->sd = sio_open(sio_num); + if (!priv->sd) { + /* Opening the serial port failed. */ + mem_free(priv); + return ERR_IF; + } + + /* Initialize private data */ + priv->p = NULL; + priv->q = NULL; + priv->state = SLIP_RECV_NORMAL; + priv->i = 0; + priv->recved = 0; +#if SLIP_RX_FROM_ISR + priv->rxpackets = NULL; +#endif + + netif->state = priv; + + /* initialize the snmp variables and counters inside the struct netif */ + MIB2_INIT_NETIF(netif, snmp_ifType_slip, SLIP_SIO_SPEED(priv->sd)); + +#if SLIP_USE_RX_THREAD + /* Create a thread to poll the serial line. */ + sys_thread_new(SLIPIF_THREAD_NAME, slipif_loop_thread, netif, + SLIPIF_THREAD_STACKSIZE, SLIPIF_THREAD_PRIO); +#endif /* SLIP_USE_RX_THREAD */ + return ERR_OK; +} + +/** + * @ingroup slipif + * Polls the serial device and feeds the IP layer with incoming packets. + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_poll(struct netif *netif) +{ + u8_t c; + struct slipif_priv *priv; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + while (sio_tryread(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } +} + +#if SLIP_RX_FROM_ISR +/** + * @ingroup slipif + * Feeds the IP layer with incoming packets that were receive + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_process_rxqueue(struct netif *netif) +{ + struct slipif_priv *priv; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + SYS_ARCH_PROTECT(old_level); + while (priv->rxpackets != NULL) { + struct pbuf *p = priv->rxpackets; +#if SLIP_RX_QUEUE + /* dequeue packet */ + struct pbuf *q = p; + while ((q->len != q->tot_len) && (q->next != NULL)) { + q = q->next; + } + priv->rxpackets = q->next; + q->next = NULL; +#else /* SLIP_RX_QUEUE */ + priv->rxpackets = NULL; +#endif /* SLIP_RX_QUEUE */ + SYS_ARCH_UNPROTECT(old_level); + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + SYS_ARCH_PROTECT(old_level); + } + SYS_ARCH_UNPROTECT(old_level); +} + +/** Like slipif_rxbyte, but queues completed packets. + * + * @param netif The lwip network interface structure for this slipif + * @param data Received serial byte + */ +static void +slipif_rxbyte_enqueue(struct netif *netif, u8_t data) +{ + struct pbuf *p; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + SYS_ARCH_DECL_PROTECT(old_level); + + p = slipif_rxbyte(netif, data); + if (p != NULL) { + SYS_ARCH_PROTECT(old_level); + if (priv->rxpackets != NULL) { +#if SLIP_RX_QUEUE + /* queue multiple pbufs */ + struct pbuf *q = p; + while (q->next != NULL) { + q = q->next; + } + q->next = p; + } else { +#else /* SLIP_RX_QUEUE */ + pbuf_free(priv->rxpackets); + } + { +#endif /* SLIP_RX_QUEUE */ + priv->rxpackets = p; + } + SYS_ARCH_UNPROTECT(old_level); + } +} + +/** + * @ingroup slipif + * Process a received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + */ +void +slipif_received_byte(struct netif *netif, u8_t data) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + slipif_rxbyte_enqueue(netif, data); +} + +/** + * @ingroup slipif + * Process multiple received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + * @param len Number of received characters + */ +void +slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len) +{ + u8_t i; + u8_t *rxdata = data; + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + for (i = 0; i < len; i++, rxdata++) { + slipif_rxbyte_enqueue(netif, *rxdata); + } +} +#endif /* SLIP_RX_FROM_ISR */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/zepif.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/zepif.c new file mode 100644 index 000000000..b4033030b --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/src/netif/zepif.c @@ -0,0 +1,300 @@ +/** + * @file + * + * @defgroup zepif ZEP - ZigBee Encapsulation Protocol + * @ingroup netifs + * A netif implementing the ZigBee Encapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + * + * Usage (there must be a default netif before!): + * @code{.c} + * netif_add(&zep_netif, NULL, NULL, NULL, NULL, zepif_init, tcpip_6lowpan_input); + * netif_create_ip6_linklocal_address(&zep_netif, 1); + * netif_set_up(&zep_netif); + * netif_set_link_up(&zep_netif); + * @endcode + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "netif/zepif.h" + +#if LWIP_IPV6 && LWIP_UDP + +#include "netif/lowpan6.h" +#include "lwip/udp.h" +#include "lwip/timeouts.h" +#include + +/** Define this to 1 to loop back TX packets for testing */ +#ifndef ZEPIF_LOOPBACK +#define ZEPIF_LOOPBACK 0 +#endif + +#define ZEP_MAX_DATA_LEN 127 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct zep_hdr { + PACK_STRUCT_FLD_8(u8_t prot_id[2]); + PACK_STRUCT_FLD_8(u8_t prot_version); + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t channel_id); + PACK_STRUCT_FIELD(u16_t device_id); + PACK_STRUCT_FLD_8(u8_t crc_mode); + PACK_STRUCT_FLD_8(u8_t unknown_1); + PACK_STRUCT_FIELD(u32_t timestamp[2]); + PACK_STRUCT_FIELD(u32_t seq_num); + PACK_STRUCT_FLD_8(u8_t unknown_2[10]); + PACK_STRUCT_FLD_8(u8_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +struct zepif_state { + struct zepif_init init; + struct udp_pcb *pcb; + u32_t seqno; +}; + +static u8_t zep_lowpan_timer_running; + +/* Helper function that calls the 6LoWPAN timer and reschedules itself */ +static void +zep_lowpan_timer(void *arg) +{ + lowpan6_tmr(); + if (zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, arg); + } +} + +/* Pass received pbufs into 6LowPAN netif */ +static void +zepif_udp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + err_t err; + struct netif *netif_lowpan6 = (struct netif *)arg; + struct zep_hdr *zep; + + LWIP_ASSERT("arg != NULL", arg != NULL); + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_UNUSED_ARG(pcb); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + if (p == NULL) { + return; + } + + /* Parse and hide the ZEP header */ + if (p->len < sizeof(struct zep_hdr)) { + /* need the zep_hdr in one piece */ + goto err_return; + } + zep = (struct zep_hdr *)p->payload; + if (zep->prot_id[0] != 'E') { + goto err_return; + } + if (zep->prot_id[1] != 'X') { + goto err_return; + } + if (zep->prot_version != 2) { + /* we only support this version for now */ + goto err_return; + } + if (zep->type != 1) { + goto err_return; + } + if (zep->crc_mode != 1) { + goto err_return; + } + if (zep->len != p->tot_len - sizeof(struct zep_hdr)) { + goto err_return; + } + /* everything seems to be OK, hide the ZEP header */ + if (pbuf_remove_header(p, sizeof(struct zep_hdr))) { + goto err_return; + } + /* TODO Check CRC? */ + /* remove CRC trailer */ + pbuf_realloc(p, p->tot_len - 2); + + /* Call into 6LoWPAN code. */ + err = netif_lowpan6->input(p, netif_lowpan6); + if (err == ERR_OK) { + return; + } +err_return: + pbuf_free(p); +} + +/* Send 6LoWPAN TX packets as UDP broadcast */ +static err_t +zepif_linkoutput(struct netif *netif, struct pbuf *p) +{ + err_t err; + struct pbuf *q; + struct zep_hdr *zep; + struct zepif_state *state; + + LWIP_ASSERT("invalid netif", netif != NULL); + LWIP_ASSERT("invalid pbuf", p != NULL); + + if (p->tot_len > ZEP_MAX_DATA_LEN) { + return ERR_VAL; + } + LWIP_ASSERT("TODO: support chained pbufs", p->next == NULL); + + state = (struct zepif_state *)netif->state; + LWIP_ASSERT("state->pcb != NULL", state->pcb != NULL); + + q = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct zep_hdr) + p->tot_len, PBUF_RAM); + if (q == NULL) { + return ERR_MEM; + } + zep = (struct zep_hdr *)q->payload; + memset(zep, 0, sizeof(struct zep_hdr)); + zep->prot_id[0] = 'E'; + zep->prot_id[1] = 'X'; + zep->prot_version = 2; + zep->type = 1; /* Data */ + zep->channel_id = 0; /* whatever */ + zep->device_id = lwip_htons(1); /* whatever */ + zep->crc_mode = 1; + zep->unknown_1 = 0xff; + zep->seq_num = lwip_htonl(state->seqno); + state->seqno++; + zep->len = (u8_t)p->tot_len; + + err = pbuf_take_at(q, p->payload, p->tot_len, sizeof(struct zep_hdr)); + if (err == ERR_OK) { +#if ZEPIF_LOOPBACK + zepif_udp_recv(netif, state->pcb, pbuf_clone(PBUF_RAW, PBUF_RAM, q), NULL, 0); +#endif + err = udp_sendto(state->pcb, q, state->init.zep_dst_ip_addr, state->init.zep_dst_udp_port); + } + pbuf_free(q); + + return err; +} + +/** + * @ingroup zepif + * Set up a raw 6LowPAN netif and surround it with input- and output + * functions for ZEP + */ +err_t +zepif_init(struct netif *netif) +{ + err_t err; + struct zepif_init *init_state = (struct zepif_init *)netif->state; + struct zepif_state *state = (struct zepif_state *)mem_malloc(sizeof(struct zepif_state)); + + LWIP_ASSERT("zepif needs an input callback", netif->input != NULL); + + if (state == NULL) { + return ERR_MEM; + } + memset(state, 0, sizeof(struct zepif_state)); + if (init_state != NULL) { + memcpy(&state->init, init_state, sizeof(struct zepif_init)); + } + if (state->init.zep_src_udp_port == 0) { + state->init.zep_src_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } + if (state->init.zep_dst_udp_port == 0) { + state->init.zep_dst_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } +#if LWIP_IPV4 + if (state->init.zep_dst_ip_addr == NULL) { + /* With IPv4 enabled, default to broadcasting packets if no address is set */ + state->init.zep_dst_ip_addr = IP_ADDR_BROADCAST; + } +#endif /* LWIP_IPV4 */ + + netif->state = NULL; + + state->pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (state->pcb == NULL) { + err = ERR_MEM; + goto err_ret; + } + err = udp_bind(state->pcb, state->init.zep_src_ip_addr, state->init.zep_src_udp_port); + if (err != ERR_OK) { + goto err_ret; + } + if (state->init.zep_netif != NULL) { + udp_bind_netif(state->pcb, state->init.zep_netif); + } + LWIP_ASSERT("udp_bind(lowpan6_broadcast_pcb) failed", err == ERR_OK); + ip_set_option(state->pcb, SOF_BROADCAST); + udp_recv(state->pcb, zepif_udp_recv, netif); + + err = lowpan6_if_init(netif); + LWIP_ASSERT("lowpan6_if_init set a state", netif->state == NULL); + if (err == ERR_OK) { + netif->state = state; + netif->hwaddr_len = 6; + if (init_state != NULL) { + memcpy(netif->hwaddr, init_state->addr, 6); + } else { + u8_t i; + for (i = 0; i < 6; i++) { + netif->hwaddr[i] = i; + } + netif->hwaddr[0] &= 0xfc; + } + netif->linkoutput = zepif_linkoutput; + + if (!zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, NULL); + zep_lowpan_timer_running = 1; + } + + return ERR_OK; + } + +err_ret: + if (state->pcb != NULL) { + udp_remove(state->pcb); + } + mem_free(state); + return err; +} + +#endif /* LWIP_IPV6 && LWIP_UDP */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c new file mode 100644 index 000000000..65d9bf906 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* lwIP includes. */ +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/stats.h" + +#if !NO_SYS + +#include "cmsis_os.h" + +#if defined(LWIP_PROVIDE_ERRNO) +int errno; +#endif + +/*-----------------------------------------------------------------------------------*/ +// Creates an empty mailbox. +err_t sys_mbox_new(sys_mbox_t *mbox, int size) +{ +#if (osCMSIS < 0x20000U) + osMessageQDef(QUEUE, size, void *); + *mbox = osMessageCreate(osMessageQ(QUEUE), NULL); +#else + *mbox = osMessageQueueNew(size, sizeof(void *), NULL); +#endif +#if SYS_STATS + ++lwip_stats.sys.mbox.used; + if(lwip_stats.sys.mbox.max < lwip_stats.sys.mbox.used) + { + lwip_stats.sys.mbox.max = lwip_stats.sys.mbox.used; + } +#endif /* SYS_STATS */ + if(*mbox == NULL) + return ERR_MEM; + + return ERR_OK; +} + +/*-----------------------------------------------------------------------------------*/ +/* + Deallocates a mailbox. If there are messages still present in the + mailbox when the mailbox is deallocated, it is an indication of a + programming error in lwIP and the developer should be notified. +*/ +void sys_mbox_free(sys_mbox_t *mbox) +{ +#if (osCMSIS < 0x20000U) + if(osMessageWaiting(*mbox)) +#else + if(osMessageQueueGetCount(*mbox)) +#endif + { + /* Line for breakpoint. Should never break here! */ + portNOP(); +#if SYS_STATS + lwip_stats.sys.mbox.err++; +#endif /* SYS_STATS */ + + } +#if (osCMSIS < 0x20000U) + osMessageDelete(*mbox); +#else + osMessageQueueDelete(*mbox); +#endif +#if SYS_STATS + --lwip_stats.sys.mbox.used; +#endif /* SYS_STATS */ +} + +/*-----------------------------------------------------------------------------------*/ +// Posts the "msg" to the mailbox. +void sys_mbox_post(sys_mbox_t *mbox, void *data) +{ +#if (osCMSIS < 0x20000U) + while(osMessagePut(*mbox, (uint32_t)data, osWaitForever) != osOK); +#else + while(osMessageQueuePut(*mbox, &data, 0, osWaitForever) != osOK); +#endif +} + + +/*-----------------------------------------------------------------------------------*/ +// Try to post the "msg" to the mailbox. +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg) +{ + err_t result; +#if (osCMSIS < 0x20000U) + if(osMessagePut(*mbox, (uint32_t)msg, 0) == osOK) +#else + if(osMessageQueuePut(*mbox, &msg, 0, 0) == osOK) +#endif + { + result = ERR_OK; + } + else + { + // could not post, queue must be full + result = ERR_MEM; + +#if SYS_STATS + lwip_stats.sys.mbox.err++; +#endif /* SYS_STATS */ + } + + return result; +} + + +/*-----------------------------------------------------------------------------------*/ +// Try to post the "msg" to the mailbox. +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg) +{ + return sys_mbox_trypost(mbox, msg); +} + +/*-----------------------------------------------------------------------------------*/ +/* + Blocks the thread until a message arrives in the mailbox, but does + not block the thread longer than "timeout" milliseconds (similar to + the sys_arch_sem_wait() function). The "msg" argument is a result + parameter that is set by the function (i.e., by doing "*msg = + ptr"). The "msg" parameter maybe NULL to indicate that the message + should be dropped. + + The return values are the same as for the sys_arch_sem_wait() function: + Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a + timeout. + + Note that a function with a similar name, sys_mbox_fetch(), is + implemented by lwIP. +*/ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) +{ +#if (osCMSIS < 0x20000U) + osEvent event; + uint32_t starttime = osKernelSysTick(); +#else + osStatus_t status; + uint32_t starttime = osKernelGetTickCount(); +#endif + if(timeout != 0) + { +#if (osCMSIS < 0x20000U) + event = osMessageGet (*mbox, timeout); + + if(event.status == osEventMessage) + { + *msg = (void *)event.value.v; + return (osKernelSysTick() - starttime); + } +#else + status = osMessageQueueGet(*mbox, msg, 0, timeout); + if (status == osOK) + { + return (osKernelGetTickCount() - starttime); + } +#endif + else + { + return SYS_ARCH_TIMEOUT; + } + } + else + { +#if (osCMSIS < 0x20000U) + event = osMessageGet (*mbox, osWaitForever); + *msg = (void *)event.value.v; + return (osKernelSysTick() - starttime); +#else + osMessageQueueGet(*mbox, msg, 0, osWaitForever ); + return (osKernelGetTickCount() - starttime); +#endif + } +} + +/*-----------------------------------------------------------------------------------*/ +/* + Similar to sys_arch_mbox_fetch, but if message is not ready immediately, we'll + return with SYS_MBOX_EMPTY. On success, 0 is returned. +*/ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg) +{ +#if (osCMSIS < 0x20000U) + osEvent event; + + event = osMessageGet (*mbox, 0); + + if(event.status == osEventMessage) + { + *msg = (void *)event.value.v; +#else + if (osMessageQueueGet(*mbox, msg, 0, 0) == osOK) + { +#endif + return ERR_OK; + } + else + { + return SYS_MBOX_EMPTY; + } +} +/*----------------------------------------------------------------------------------*/ +int sys_mbox_valid(sys_mbox_t *mbox) +{ + if (*mbox == SYS_MBOX_NULL) + return 0; + else + return 1; +} +/*-----------------------------------------------------------------------------------*/ +void sys_mbox_set_invalid(sys_mbox_t *mbox) +{ + *mbox = SYS_MBOX_NULL; +} + +/*-----------------------------------------------------------------------------------*/ +// Creates a new semaphore. The "count" argument specifies +// the initial state of the semaphore. +err_t sys_sem_new(sys_sem_t *sem, u8_t count) +{ +#if (osCMSIS < 0x20000U) + osSemaphoreDef(SEM); + *sem = osSemaphoreCreate (osSemaphore(SEM), 1); +#else + *sem = osSemaphoreNew(UINT16_MAX, count, NULL); +#endif + + if(*sem == NULL) + { +#if SYS_STATS + ++lwip_stats.sys.sem.err; +#endif /* SYS_STATS */ + return ERR_MEM; + } + + if(count == 0) // Means it can't be taken + { +#if (osCMSIS < 0x20000U) + osSemaphoreWait(*sem, 0); +#else + osSemaphoreAcquire(*sem, 0); +#endif + } + +#if SYS_STATS + ++lwip_stats.sys.sem.used; + if (lwip_stats.sys.sem.max < lwip_stats.sys.sem.used) { + lwip_stats.sys.sem.max = lwip_stats.sys.sem.used; + } +#endif /* SYS_STATS */ + + return ERR_OK; +} + +/*-----------------------------------------------------------------------------------*/ +/* + Blocks the thread while waiting for the semaphore to be + signaled. If the "timeout" argument is non-zero, the thread should + only be blocked for the specified time (measured in + milliseconds). + + If the timeout argument is non-zero, the return value is the number of + milliseconds spent waiting for the semaphore to be signaled. If the + semaphore wasn't signaled within the specified time, the return value is + SYS_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore + (i.e., it was already signaled), the function may return zero. + + Notice that lwIP implements a function with a similar name, + sys_sem_wait(), that uses the sys_arch_sem_wait() function. +*/ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout) +{ +#if (osCMSIS < 0x20000U) + uint32_t starttime = osKernelSysTick(); +#else + uint32_t starttime = osKernelGetTickCount(); +#endif + if(timeout != 0) + { +#if (osCMSIS < 0x20000U) + if(osSemaphoreWait (*sem, timeout) == osOK) + { + return (osKernelSysTick() - starttime); +#else + if(osSemaphoreAcquire(*sem, timeout) == osOK) + { + return (osKernelGetTickCount() - starttime); +#endif + } + else + { + return SYS_ARCH_TIMEOUT; + } + } + else + { +#if (osCMSIS < 0x20000U) + while(osSemaphoreWait (*sem, osWaitForever) != osOK); + return (osKernelSysTick() - starttime); +#else + while(osSemaphoreAcquire(*sem, osWaitForever) != osOK); + return (osKernelGetTickCount() - starttime); +#endif + } +} + +/*-----------------------------------------------------------------------------------*/ +// Signals a semaphore +void sys_sem_signal(sys_sem_t *sem) +{ + osSemaphoreRelease(*sem); +} + +/*-----------------------------------------------------------------------------------*/ +// Deallocates a semaphore +void sys_sem_free(sys_sem_t *sem) +{ +#if SYS_STATS + --lwip_stats.sys.sem.used; +#endif /* SYS_STATS */ + + osSemaphoreDelete(*sem); +} +/*-----------------------------------------------------------------------------------*/ +int sys_sem_valid(sys_sem_t *sem) +{ + if (*sem == SYS_SEM_NULL) + return 0; + else + return 1; +} + +/*-----------------------------------------------------------------------------------*/ +void sys_sem_set_invalid(sys_sem_t *sem) +{ + *sem = SYS_SEM_NULL; +} + +/*-----------------------------------------------------------------------------------*/ +#if (osCMSIS < 0x20000U) +osMutexId lwip_sys_mutex; +osMutexDef(lwip_sys_mutex); +#else +osMutexId_t lwip_sys_mutex; +#endif +// Initialize sys arch +void sys_init(void) +{ +#if (osCMSIS < 0x20000U) + lwip_sys_mutex = osMutexCreate(osMutex(lwip_sys_mutex)); +#else + lwip_sys_mutex = osMutexNew(NULL); +#endif +} +/*-----------------------------------------------------------------------------------*/ + /* Mutexes*/ +/*-----------------------------------------------------------------------------------*/ +/*-----------------------------------------------------------------------------------*/ +#if LWIP_COMPAT_MUTEX == 0 +/* Create a new mutex*/ +err_t sys_mutex_new(sys_mutex_t *mutex) { + +#if (osCMSIS < 0x20000U) + osMutexDef(MUTEX); + *mutex = osMutexCreate(osMutex(MUTEX)); +#else + *mutex = osMutexNew(NULL); +#endif + + if(*mutex == NULL) + { +#if SYS_STATS + ++lwip_stats.sys.mutex.err; +#endif /* SYS_STATS */ + return ERR_MEM; + } + +#if SYS_STATS + ++lwip_stats.sys.mutex.used; + if (lwip_stats.sys.mutex.max < lwip_stats.sys.mutex.used) { + lwip_stats.sys.mutex.max = lwip_stats.sys.mutex.used; + } +#endif /* SYS_STATS */ + return ERR_OK; +} +/*-----------------------------------------------------------------------------------*/ +/* Deallocate a mutex*/ +void sys_mutex_free(sys_mutex_t *mutex) +{ +#if SYS_STATS + --lwip_stats.sys.mutex.used; +#endif /* SYS_STATS */ + + osMutexDelete(*mutex); +} +/*-----------------------------------------------------------------------------------*/ +/* Lock a mutex*/ +void sys_mutex_lock(sys_mutex_t *mutex) +{ +#if (osCMSIS < 0x20000U) + osMutexWait(*mutex, osWaitForever); +#else + osMutexAcquire(*mutex, osWaitForever); +#endif +} + +/*-----------------------------------------------------------------------------------*/ +/* Unlock a mutex*/ +void sys_mutex_unlock(sys_mutex_t *mutex) +{ + osMutexRelease(*mutex); +} +#endif /*LWIP_COMPAT_MUTEX*/ +/*-----------------------------------------------------------------------------------*/ +// TODO +/*-----------------------------------------------------------------------------------*/ +/* + Starts a new thread with priority "prio" that will begin its execution in the + function "thread()". The "arg" argument will be passed as an argument to the + thread() function. The id of the new thread is returned. Both the id and + the priority are system dependent. +*/ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread , void *arg, int stacksize, int prio) +{ +#if (osCMSIS < 0x20000U) + const osThreadDef_t os_thread_def = { (char *)name, (os_pthread)thread, (osPriority)prio, 0, stacksize}; + return osThreadCreate(&os_thread_def, arg); +#else + const osThreadAttr_t attributes = { + .name = name, + .stack_size = stacksize, + .priority = (osPriority_t)prio, + }; + return osThreadNew(thread, arg, &attributes); +#endif +} + +/* + This optional function does a "fast" critical region protection and returns + the previous protection level. This function is only called during very short + critical regions. An embedded system which supports ISR-based drivers might + want to implement this function by disabling interrupts. Task-based systems + might want to implement this by using a mutex or disabling tasking. This + function should support recursive calls from the same task or interrupt. In + other words, sys_arch_protect() could be called while already protected. In + that case the return value indicates that it is already protected. + + sys_arch_protect() is only required if your port is supporting an operating + system. + + Note: This function is based on FreeRTOS API, because no equivalent CMSIS-RTOS + API is available +*/ +sys_prot_t sys_arch_protect(void) +{ +#if (osCMSIS < 0x20000U) + osMutexWait(lwip_sys_mutex, osWaitForever); +#else + osMutexAcquire(lwip_sys_mutex, osWaitForever); +#endif + return (sys_prot_t)1; +} + + +/* + This optional function does a "fast" set of critical region protection to the + value specified by pval. See the documentation for sys_arch_protect() for + more information. This function is only required if your port is supporting + an operating system. + + Note: This function is based on FreeRTOS API, because no equivalent CMSIS-RTOS + API is available +*/ +void sys_arch_unprotect(sys_prot_t pval) +{ + ( void ) pval; + osMutexRelease(lwip_sys_mutex); +} + +#endif /* !NO_SYS */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h new file mode 100644 index 000000000..177758c5e --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack(1) +#endif + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cc.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cc.h new file mode 100644 index 000000000..3065318b3 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CC_H__ +#define __CC_H__ + +#include "cpu.h" +#include +#include + +typedef int sys_prot_t; + +#define LWIP_PROVIDE_ERRNO + +#if defined (__GNUC__) & !defined (__CC_ARM) + +#define LWIP_TIMEVAL_PRIVATE 0 +#include + +#endif + +/* define compiler specific symbols */ +#if defined (__ICCARM__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x +#define PACK_STRUCT_USE_INCLUDES + +#elif defined (__GNUC__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT __attribute__ ((__packed__)) +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__CC_ARM) + +#define PACK_STRUCT_BEGIN __packed +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__TASKING__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#endif + +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); } while(0) + +/* Define random number generator function */ +#define LWIP_RAND() ((u32_t)rand()) + +#endif /* __CC_H__ */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cpu.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cpu.h new file mode 100644 index 000000000..bfdf8811a --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/cpu.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CPU_H__ +#define __CPU_H__ + +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +#endif /* __CPU_H__ */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/epstruct.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/epstruct.h new file mode 100644 index 000000000..1e1a049cb --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/epstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack() +#endif + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/init.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/init.h new file mode 100644 index 000000000..e622c73ae --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/init.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __ARCH_INIT_H__ +#define __ARCH_INIT_H__ + +#define TCPIP_INIT_DONE(arg) tcpip_init_done(arg) + +void tcpip_init_done(void *); +int wait_for_tcpip_init(void); + +#endif /* __ARCH_INIT_H__ */ + + + + diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/lib.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/lib.h new file mode 100644 index 000000000..378f25b88 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/lib.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __LIB_H__ +#define __LIB_H__ + +#include + + +#endif /* __LIB_H__ */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/perf.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/perf.h new file mode 100644 index 000000000..334d42af8 --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/perf.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __PERF_H__ +#define __PERF_H__ + +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ + +#endif /* __PERF_H__ */ diff --git a/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h new file mode 100644 index 000000000..49bfd4ebc --- /dev/null +++ b/src/third_party/stm32_cubeide/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __SYS_ARCH_H__ +#define __SYS_ARCH_H__ + +#include "lwip/opt.h" + +#if (NO_SYS != 0) +#error "NO_SYS need to be set to 0 to use threaded API" +#endif + +#include "cmsis_os.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if (osCMSIS < 0x20000U) + +#define SYS_MBOX_NULL (osMessageQId)0 +#define SYS_SEM_NULL (osSemaphoreId)0 + +typedef osSemaphoreId sys_sem_t; +typedef osSemaphoreId sys_mutex_t; +typedef osMessageQId sys_mbox_t; +typedef osThreadId sys_thread_t; +#else + +#define SYS_MBOX_NULL (osMessageQueueId_t)0 +#define SYS_SEM_NULL (osSemaphoreId_t)0 + +typedef osSemaphoreId_t sys_sem_t; +typedef osSemaphoreId_t sys_mutex_t; +typedef osMessageQueueId_t sys_mbox_t; +typedef osThreadId_t sys_thread_t; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __SYS_ARCH_H__ */ + diff --git a/src/third_party/stm32_cubeide/STM32F769IGTX_FLASH.ld b/src/third_party/stm32_cubeide/STM32F769IGTX_FLASH.ld new file mode 100644 index 000000000..620b01c62 --- /dev/null +++ b/src/third_party/stm32_cubeide/STM32F769IGTX_FLASH.ld @@ -0,0 +1,175 @@ +/** + ****************************************************************************** + * @file LinkerScript.ld + * @author Auto-generated by STM32CubeIDE + * @brief Linker script for STM32F769IGTx Device from STM32F7 series + * 1024Kbytes FLASH + * 512Kbytes RAM + * + * Set heap size, stack size and stack location according + * to application requirements. + * + * Set memory bank area and size if external memory is used + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Entry Point */ +ENTRY(Reset_Handler) + +/* Highest address of the user mode stack */ +_estack = ORIGIN(RAM) + LENGTH(RAM); /* end of "RAM" Ram type memory */ + +_Min_Heap_Size = 0x200; /* required amount of heap */ +_Min_Stack_Size = 0x400; /* required amount of stack */ + +/* Memories definition */ +MEMORY +{ + RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 512K + FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 2048K +} + +/* Sections */ +SECTIONS +{ + /* The startup code into "FLASH" Rom type memory */ + .isr_vector : + { + . = ALIGN(4); + KEEP(*(.isr_vector)) /* Startup code */ + . = ALIGN(4); + } >FLASH + + /* The program code and other data into "FLASH" Rom type memory */ + .text : + { + . = ALIGN(4); + *(.text) /* .text sections (code) */ + *(.text*) /* .text* sections (code) */ + *(.glue_7) /* glue arm to thumb code */ + *(.glue_7t) /* glue thumb to arm code */ + *(.eh_frame) + + KEEP (*(.init)) + KEEP (*(.fini)) + + . = ALIGN(4); + _etext = .; /* define a global symbols at end of code */ + } >FLASH + + /* Constant data into "FLASH" Rom type memory */ + .rodata : + { + . = ALIGN(4); + *(.rodata) /* .rodata sections (constants, strings, etc.) */ + *(.rodata*) /* .rodata* sections (constants, strings, etc.) */ + . = ALIGN(4); + } >FLASH + + .ARM.extab : { + . = ALIGN(4); + *(.ARM.extab* .gnu.linkonce.armextab.*) + . = ALIGN(4); + } >FLASH + + .ARM : { + . = ALIGN(4); + __exidx_start = .; + *(.ARM.exidx*) + __exidx_end = .; + . = ALIGN(4); + } >FLASH + + .preinit_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array*)) + PROVIDE_HIDDEN (__preinit_array_end = .); + . = ALIGN(4); + } >FLASH + + .init_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array*)) + PROVIDE_HIDDEN (__init_array_end = .); + . = ALIGN(4); + } >FLASH + + .fini_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(SORT(.fini_array.*))) + KEEP (*(.fini_array*)) + PROVIDE_HIDDEN (__fini_array_end = .); + . = ALIGN(4); + } >FLASH + + /* Used by the startup to initialize data */ + _sidata = LOADADDR(.data); + + /* Initialized data sections into "RAM" Ram type memory */ + .data : + { + . = ALIGN(4); + _sdata = .; /* create a global symbol at data start */ + *(.data) /* .data sections */ + *(.data*) /* .data* sections */ + + . = ALIGN(4); + _edata = .; /* define a global symbol at data end */ + + } >RAM AT> FLASH + + /* Uninitialized data section into "RAM" Ram type memory */ + . = ALIGN(4); + .bss : + { + /* This is used by the startup in order to initialize the .bss section */ + _sbss = .; /* define a global symbol at bss start */ + __bss_start__ = _sbss; + *(.bss) + *(.bss*) + *(COMMON) + + . = ALIGN(4); + _ebss = .; /* define a global symbol at bss end */ + __bss_end__ = _ebss; + } >RAM + + /* User_heap_stack section, used to check that there is enough "RAM" Ram type memory left */ + ._user_heap_stack : + { + . = ALIGN(8); + PROVIDE ( end = . ); + PROVIDE ( _end = . ); + . = . + _Min_Heap_Size; + . = . + _Min_Stack_Size; + . = ALIGN(8); + } >RAM + + /* Remove information from the compiler libraries */ + /DISCARD/ : + { + libc.a ( * ) + libm.a ( * ) + libgcc.a ( * ) + } + + .ARM.attributes 0 : { *(.ARM.attributes) } +} diff --git a/src/third_party/stm32_cubeide/STM32F769IGTX_RAM.ld b/src/third_party/stm32_cubeide/STM32F769IGTX_RAM.ld new file mode 100644 index 000000000..4690005bd --- /dev/null +++ b/src/third_party/stm32_cubeide/STM32F769IGTX_RAM.ld @@ -0,0 +1,175 @@ +/** + ****************************************************************************** + * @file LinkerScript.ld + * @author Auto-generated by STM32CubeIDE + * @brief Linker script for STM32F769IGTx Device from STM32F7 series + * 1024Kbytes FLASH + * 512Kbytes RAM + * + * Set heap size, stack size and stack location according + * to application requirements. + * + * Set memory bank area and size if external memory is used + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Entry Point */ +ENTRY(Reset_Handler) + +/* Highest address of the user mode stack */ +_estack = ORIGIN(RAM) + LENGTH(RAM); /* end of "RAM" Ram type memory */ + +_Min_Heap_Size = 0x200; /* required amount of heap */ +_Min_Stack_Size = 0x400; /* required amount of stack */ + +/* Memories definition */ +MEMORY +{ + RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 512K + FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 2048K +} + +/* Sections */ +SECTIONS +{ + /* The startup code into "RAM" Ram type memory */ + .isr_vector : + { + . = ALIGN(4); + KEEP(*(.isr_vector)) /* Startup code */ + . = ALIGN(4); + } >RAM + + /* The program code and other data into "RAM" Ram type memory */ + .text : + { + . = ALIGN(4); + *(.text) /* .text sections (code) */ + *(.text*) /* .text* sections (code) */ + *(.glue_7) /* glue arm to thumb code */ + *(.glue_7t) /* glue thumb to arm code */ + *(.eh_frame) + + KEEP (*(.init)) + KEEP (*(.fini)) + + . = ALIGN(4); + _etext = .; /* define a global symbols at end of code */ + } >RAM + + /* Constant data into "RAM" Ram type memory */ + .rodata : + { + . = ALIGN(4); + *(.rodata) /* .rodata sections (constants, strings, etc.) */ + *(.rodata*) /* .rodata* sections (constants, strings, etc.) */ + . = ALIGN(4); + } >RAM + + .ARM.extab : { + . = ALIGN(4); + *(.ARM.extab* .gnu.linkonce.armextab.*) + . = ALIGN(4); + } >RAM + + .ARM : { + . = ALIGN(4); + __exidx_start = .; + *(.ARM.exidx*) + __exidx_end = .; + . = ALIGN(4); + } >RAM + + .preinit_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array*)) + PROVIDE_HIDDEN (__preinit_array_end = .); + . = ALIGN(4); + } >RAM + + .init_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array*)) + PROVIDE_HIDDEN (__init_array_end = .); + . = ALIGN(4); + } >RAM + + .fini_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(SORT(.fini_array.*))) + KEEP (*(.fini_array*)) + PROVIDE_HIDDEN (__fini_array_end = .); + . = ALIGN(4); + } >RAM + + /* Used by the startup to initialize data */ + _sidata = LOADADDR(.data); + + /* Initialized data sections into "RAM" Ram type memory */ + .data : + { + . = ALIGN(4); + _sdata = .; /* create a global symbol at data start */ + *(.data) /* .data sections */ + *(.data*) /* .data* sections */ + + . = ALIGN(4); + _edata = .; /* define a global symbol at data end */ + + } >RAM + + /* Uninitialized data section into "RAM" Ram type memory */ + . = ALIGN(4); + .bss : + { + /* This is used by the startup in order to initialize the .bss section */ + _sbss = .; /* define a global symbol at bss start */ + __bss_start__ = _sbss; + *(.bss) + *(.bss*) + *(COMMON) + + . = ALIGN(4); + _ebss = .; /* define a global symbol at bss end */ + __bss_end__ = _ebss; + } >RAM + + /* User_heap_stack section, used to check that there is enough "RAM" Ram type memory left */ + ._user_heap_stack : + { + . = ALIGN(8); + PROVIDE ( end = . ); + PROVIDE ( _end = . ); + . = . + _Min_Heap_Size; + . = . + _Min_Stack_Size; + . = ALIGN(8); + } >RAM + + /* Remove information from the compiler libraries */ + /DISCARD/ : + { + libc.a ( * ) + libm.a ( * ) + libgcc.a ( * ) + } + + .ARM.attributes 0 : { *(.ARM.attributes) } +} diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.c b/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.c new file mode 100644 index 000000000..f0a95c9e0 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.c @@ -0,0 +1,102 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usb_device.c + * @version : v1.0_Cube + * @brief : This file implements the USB Device + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ + +#include "usb_device.h" +#include "usbd_core.h" +#include "usbd_desc.h" +#include "usbd_cdc.h" +#include "usbd_cdc_if.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* USER CODE BEGIN PV */ +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE END PV */ + +/* USER CODE BEGIN PFP */ +/* Private function prototypes -----------------------------------------------*/ + +/* USER CODE END PFP */ + +/* USB Device Core handle declaration. */ +USBD_HandleTypeDef hUsbDeviceFS; + +/* + * -- Insert your variables declaration here -- + */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* + * -- Insert your external function declaration here -- + */ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/** + * Init USB device Library, add supported class and start the library + * @retval None + */ +void MX_USB_DEVICE_Init(void) +{ + /* USER CODE BEGIN USB_DEVICE_Init_PreTreatment */ + + /* USER CODE END USB_DEVICE_Init_PreTreatment */ + + /* Init Device Library, add supported class and start the library. */ + if (USBD_Init(&hUsbDeviceFS, &FS_Desc, DEVICE_FS) != USBD_OK) + { + Error_Handler(); + } + if (USBD_RegisterClass(&hUsbDeviceFS, &USBD_CDC) != USBD_OK) + { + Error_Handler(); + } + if (USBD_CDC_RegisterInterface(&hUsbDeviceFS, &USBD_Interface_fops_FS) != USBD_OK) + { + Error_Handler(); + } + if (USBD_Start(&hUsbDeviceFS) != USBD_OK) + { + Error_Handler(); + } + + /* USER CODE BEGIN USB_DEVICE_Init_PostTreatment */ + + /* USER CODE END USB_DEVICE_Init_PostTreatment */ +} + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.h b/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.h new file mode 100644 index 000000000..cdf1eeffc --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usb_device.h @@ -0,0 +1,105 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usb_device.h + * @version : v1.0_Cube + * @brief : Header for usb_device.c file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USB_DEVICE__H__ +#define __USB_DEVICE__H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx.h" +#include "stm32f7xx_hal.h" +#include "usbd_def.h" + +/* USER CODE BEGIN INCLUDE */ + +/* USER CODE END INCLUDE */ + +/** @addtogroup USBD_OTG_DRIVER + * @{ + */ + +/** @defgroup USBD_DEVICE USBD_DEVICE + * @brief Device file for Usb otg low level driver. + * @{ + */ + +/** @defgroup USBD_DEVICE_Exported_Variables USBD_DEVICE_Exported_Variables + * @brief Public variables. + * @{ + */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* + * -- Insert your variables declaration here -- + */ +/* USER CODE BEGIN VARIABLES */ + +/* USER CODE END VARIABLES */ +/** + * @} + */ + +/** @defgroup USBD_DEVICE_Exported_FunctionsPrototype USBD_DEVICE_Exported_FunctionsPrototype + * @brief Declaration of public functions for Usb device. + * @{ + */ + +/** USB Device initialization function. */ +void MX_USB_DEVICE_Init(void); + +/* + * -- Insert functions declaration here -- + */ +/* USER CODE BEGIN FD */ + +/* USER CODE END FD */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USB_DEVICE__H__ */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.c b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.c new file mode 100644 index 000000000..c32eedeb4 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.c @@ -0,0 +1,368 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usbd_cdc_if.c + * @version : v1.0_Cube + * @brief : Usb device for Virtual Com Port. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_cdc_if.h" + +/* USER CODE BEGIN INCLUDE */ +#include +#include "cmsis_os.h" +/* USER CODE END INCLUDE */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ +/* Private variables ---------------------------------------------------------*/ +USBD_CDC_LineCodingTypeDef LineCoding = + { + 115200, /* baud rate*/ + 0x00, /* stop bits-1*/ + 0x00, /* parity - none*/ + 0x08 /* nb. of bits 8*/ + }; +uint8_t g_serialLineState; +extern void notifySerialLineStateChanged(uint8_t serialLineState); +extern void notifySerialInput(uint8_t *buffer, uint32_t length); +/* USER CODE END PV */ + +/** @addtogroup STM32_USB_OTG_DEVICE_LIBRARY + * @brief Usb device library. + * @{ + */ + +/** @addtogroup USBD_CDC_IF + * @{ + */ + +/** @defgroup USBD_CDC_IF_Private_TypesDefinitions USBD_CDC_IF_Private_TypesDefinitions + * @brief Private types. + * @{ + */ + +/* USER CODE BEGIN PRIVATE_TYPES */ + +/* USER CODE END PRIVATE_TYPES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Private_Defines USBD_CDC_IF_Private_Defines + * @brief Private defines. + * @{ + */ + +/* USER CODE BEGIN PRIVATE_DEFINES */ +/* Define size for the receive and transmit buffer over CDC */ +/* It's up to user to redefine and/or remove those define */ +#define APP_RX_DATA_SIZE 4096 +#define APP_TX_DATA_SIZE 4096 +/* USER CODE END PRIVATE_DEFINES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Private_Macros USBD_CDC_IF_Private_Macros + * @brief Private macros. + * @{ + */ + +/* USER CODE BEGIN PRIVATE_MACRO */ + +/* USER CODE END PRIVATE_MACRO */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Private_Variables USBD_CDC_IF_Private_Variables + * @brief Private variables. + * @{ + */ +/* Create buffer for reception and transmission */ +/* It's up to user to redefine and/or remove those define */ +/** Received data over USB are stored in this buffer */ +uint8_t UserRxBufferFS[APP_RX_DATA_SIZE]; + +/** Data to send over USB CDC are stored in this buffer */ +uint8_t UserTxBufferFS[APP_TX_DATA_SIZE]; + +/* USER CODE BEGIN PRIVATE_VARIABLES */ + +/* USER CODE END PRIVATE_VARIABLES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Exported_Variables USBD_CDC_IF_Exported_Variables + * @brief Public variables. + * @{ + */ + +extern USBD_HandleTypeDef hUsbDeviceFS; + +/* USER CODE BEGIN EXPORTED_VARIABLES */ + +/* USER CODE END EXPORTED_VARIABLES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Private_FunctionPrototypes USBD_CDC_IF_Private_FunctionPrototypes + * @brief Private functions declaration. + * @{ + */ + +static int8_t CDC_Init_FS(void); +static int8_t CDC_DeInit_FS(void); +static int8_t CDC_Control_FS(uint8_t cmd, uint8_t* pbuf, uint16_t length); +static int8_t CDC_Receive_FS(uint8_t* pbuf, uint32_t *Len); +static int8_t CDC_TransmitCplt_FS(uint8_t *pbuf, uint32_t *Len, uint8_t epnum); + +/* USER CODE BEGIN PRIVATE_FUNCTIONS_DECLARATION */ + +/* USER CODE END PRIVATE_FUNCTIONS_DECLARATION */ + +/** + * @} + */ + +USBD_CDC_ItfTypeDef USBD_Interface_fops_FS = +{ + CDC_Init_FS, + CDC_DeInit_FS, + CDC_Control_FS, + CDC_Receive_FS, + CDC_TransmitCplt_FS +}; + +/* Private functions ---------------------------------------------------------*/ +/** + * @brief Initializes the CDC media low layer over the FS USB IP + * @retval USBD_OK if all operations are OK else USBD_FAIL + */ +static int8_t CDC_Init_FS(void) +{ + /* USER CODE BEGIN 3 */ + /* Set Application Buffers */ + USBD_CDC_SetTxBuffer(&hUsbDeviceFS, UserTxBufferFS, 0); + USBD_CDC_SetRxBuffer(&hUsbDeviceFS, UserRxBufferFS); + return (USBD_OK); + /* USER CODE END 3 */ +} + +/** + * @brief DeInitializes the CDC media low layer + * @retval USBD_OK if all operations are OK else USBD_FAIL + */ +static int8_t CDC_DeInit_FS(void) +{ + /* USER CODE BEGIN 4 */ + return (USBD_OK); + /* USER CODE END 4 */ +} + +/** + * @brief Manage the CDC class requests + * @param cmd: Command code + * @param pbuf: Buffer containing command data (request parameters) + * @param length: Number of data to be sent (in bytes) + * @retval Result of the operation: USBD_OK if all operations are OK else USBD_FAIL + */ +static int8_t CDC_Control_FS(uint8_t cmd, uint8_t* pbuf, uint16_t length) +{ + /* USER CODE BEGIN 5 */ + switch(cmd) + { + case CDC_SEND_ENCAPSULATED_COMMAND: + + break; + + case CDC_GET_ENCAPSULATED_RESPONSE: + + break; + + case CDC_SET_COMM_FEATURE: + + break; + + case CDC_GET_COMM_FEATURE: + + break; + + case CDC_CLEAR_COMM_FEATURE: + + break; + + /*******************************************************************************/ + /* Line Coding Structure */ + /*-----------------------------------------------------------------------------*/ + /* Offset | Field | Size | Value | Description */ + /* 0 | dwDTERate | 4 | Number |Data terminal rate, in bits per second*/ + /* 4 | bCharFormat | 1 | Number | Stop bits */ + /* 0 - 1 Stop bit */ + /* 1 - 1.5 Stop bits */ + /* 2 - 2 Stop bits */ + /* 5 | bParityType | 1 | Number | Parity */ + /* 0 - None */ + /* 1 - Odd */ + /* 2 - Even */ + /* 3 - Mark */ + /* 4 - Space */ + /* 6 | bDataBits | 1 | Number Data bits (5, 6, 7, 8 or 16). */ + /*******************************************************************************/ + case CDC_SET_LINE_CODING: + + break; + + case CDC_GET_LINE_CODING: + pbuf[0] = (uint8_t)(LineCoding.bitrate); + pbuf[1] = (uint8_t)(LineCoding.bitrate >> 8); + pbuf[2] = (uint8_t)(LineCoding.bitrate >> 16); + pbuf[3] = (uint8_t)(LineCoding.bitrate >> 24); + pbuf[4] = LineCoding.format; + pbuf[5] = LineCoding.paritytype; + pbuf[6] = LineCoding.datatype; + break; + + case CDC_SET_CONTROL_LINE_STATE: + g_serialLineState = pbuf[2]; + notifySerialLineStateChanged(g_serialLineState); + break; + + case CDC_SEND_BREAK: + + break; + + default: + break; + } + + return (USBD_OK); + /* USER CODE END 5 */ +} + +/** + * @brief Data received over USB OUT endpoint are sent over CDC interface + * through this function. + * + * @note + * This function will block any OUT packet reception on USB endpoint + * untill exiting this function. If you exit this function before transfer + * is complete on CDC interface (ie. using DMA controller) it will result + * in receiving more data while previous ones are still not sent. + * + * @param Buf: Buffer of data to be received + * @param Len: Number of data received (in bytes) + * @retval Result of the operation: USBD_OK if all operations are OK else USBD_FAIL + */ +static int8_t CDC_Receive_FS(uint8_t* Buf, uint32_t *Len) +{ + /* USER CODE BEGIN 6 */ + if (hUsbDeviceFS.dev_state != USBD_STATE_CONFIGURED) { + return USBD_FAIL; + } + + if (((Buf == NULL) || (Len == NULL)) || (*Len <= 0)) { + return USBD_FAIL; + } + + notifySerialInput(Buf, *Len); + + return (USBD_OK); + /* USER CODE END 6 */ +} + +/** + * @brief CDC_Transmit_FS + * Data to send over USB IN endpoint are sent over CDC interface + * through this function. + * @note + * + * + * @param Buf: Buffer of data to be sent + * @param Len: Number of data to be sent (in bytes) + * @retval USBD_OK if all operations are OK else USBD_FAIL or USBD_BUSY + */ +uint8_t CDC_Transmit_FS(uint8_t* Buf, uint16_t Len) +{ + uint8_t result = USBD_OK; + /* USER CODE BEGIN 7 */ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef*)hUsbDeviceFS.pClassData; + + for (int i = 0; (i < 10 || g_serialLineState) && hcdc->TxState != 0; i++) { + osDelay(0); + } + + if (hcdc->TxState == 0){ + assert(Len <= APP_TX_DATA_SIZE); + memcpy(UserTxBufferFS, Buf, Len); + USBD_CDC_SetTxBuffer(&hUsbDeviceFS, UserTxBufferFS, Len); + result = USBD_CDC_TransmitPacket(&hUsbDeviceFS); + } else { + result = USBD_BUSY; + } + + /* USER CODE END 7 */ + return result; +} + +/** + * @brief CDC_TransmitCplt_FS + * Data transmited callback + * + * @note + * This function is IN transfer complete callback used to inform user that + * the submitted Data is successfully sent over USB. + * + * @param Buf: Buffer of data to be received + * @param Len: Number of data received (in bytes) + * @retval Result of the operation: USBD_OK if all operations are OK else USBD_FAIL + */ +static int8_t CDC_TransmitCplt_FS(uint8_t *Buf, uint32_t *Len, uint8_t epnum) +{ + uint8_t result = USBD_OK; + /* USER CODE BEGIN 13 */ + UNUSED(Buf); + UNUSED(Len); + UNUSED(epnum); + /* USER CODE END 13 */ + return result; +} + +/* USER CODE BEGIN PRIVATE_FUNCTIONS_IMPLEMENTATION */ + +/* USER CODE END PRIVATE_FUNCTIONS_IMPLEMENTATION */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.h b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.h new file mode 100644 index 000000000..cb3ab6a35 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_cdc_if.h @@ -0,0 +1,130 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usbd_cdc_if.h + * @version : v1.0_Cube + * @brief : Header for usbd_cdc_if.c file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_CDC_IF_H__ +#define __USBD_CDC_IF_H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_cdc.h" + +/* USER CODE BEGIN INCLUDE */ + +/* USER CODE END INCLUDE */ + +/** @addtogroup STM32_USB_OTG_DEVICE_LIBRARY + * @brief For Usb device. + * @{ + */ + +/** @defgroup USBD_CDC_IF USBD_CDC_IF + * @brief Usb VCP device module + * @{ + */ + +/** @defgroup USBD_CDC_IF_Exported_Defines USBD_CDC_IF_Exported_Defines + * @brief Defines. + * @{ + */ +/* USER CODE BEGIN EXPORTED_DEFINES */ + +/* USER CODE END EXPORTED_DEFINES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Exported_Types USBD_CDC_IF_Exported_Types + * @brief Types. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_TYPES */ + +/* USER CODE END EXPORTED_TYPES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Exported_Macros USBD_CDC_IF_Exported_Macros + * @brief Aliases. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_MACRO */ + +/* USER CODE END EXPORTED_MACRO */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Exported_Variables USBD_CDC_IF_Exported_Variables + * @brief Public variables. + * @{ + */ + +/** CDC Interface callback. */ +extern USBD_CDC_ItfTypeDef USBD_Interface_fops_FS; + +/* USER CODE BEGIN EXPORTED_VARIABLES */ +extern uint8_t g_serialLineState; +/* USER CODE END EXPORTED_VARIABLES */ + +/** + * @} + */ + +/** @defgroup USBD_CDC_IF_Exported_FunctionsPrototype USBD_CDC_IF_Exported_FunctionsPrototype + * @brief Public functions declaration. + * @{ + */ + +uint8_t CDC_Transmit_FS(uint8_t* Buf, uint16_t Len); + +/* USER CODE BEGIN EXPORTED_FUNCTIONS */ + +/* USER CODE END EXPORTED_FUNCTIONS */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_CDC_IF_H__ */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.c b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.c new file mode 100644 index 000000000..9beb4e397 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.c @@ -0,0 +1,446 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : App/usbd_desc.c + * @version : v1.0_Cube + * @brief : This file implements the USB device descriptors. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_core.h" +#include "usbd_desc.h" +#include "usbd_conf.h" + +/* USER CODE BEGIN INCLUDE */ + +/* USER CODE END INCLUDE */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE END PV */ + +/** @addtogroup STM32_USB_OTG_DEVICE_LIBRARY + * @{ + */ + +/** @addtogroup USBD_DESC + * @{ + */ + +/** @defgroup USBD_DESC_Private_TypesDefinitions USBD_DESC_Private_TypesDefinitions + * @brief Private types. + * @{ + */ + +/* USER CODE BEGIN PRIVATE_TYPES */ + +/* USER CODE END PRIVATE_TYPES */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Private_Defines USBD_DESC_Private_Defines + * @brief Private defines. + * @{ + */ + +#define USBD_VID 4617 +#define USBD_LANGID_STRING 1033 +#define USBD_MANUFACTURER_STRING "EEZ" +#define USBD_PID_FS 8216 +#define USBD_PRODUCT_STRING_FS "MCU Virtual ComPort" +#define USBD_CONFIGURATION_STRING_FS "CDC Config" +#define USBD_INTERFACE_STRING_FS "CDC Interface" + +#define USB_SIZ_BOS_DESC 0x0C + +/* USER CODE BEGIN PRIVATE_DEFINES */ + +/* USER CODE END PRIVATE_DEFINES */ + +/** + * @} + */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/** @defgroup USBD_DESC_Private_Macros USBD_DESC_Private_Macros + * @brief Private macros. + * @{ + */ + +/* USER CODE BEGIN PRIVATE_MACRO */ + +/* USER CODE END PRIVATE_MACRO */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Private_FunctionPrototypes USBD_DESC_Private_FunctionPrototypes + * @brief Private functions declaration. + * @{ + */ + +static void Get_SerialNum(void); +static void IntToUnicode(uint32_t value, uint8_t * pbuf, uint8_t len); + +/** + * @} + */ + + +/** @defgroup USBD_DESC_Private_FunctionPrototypes USBD_DESC_Private_FunctionPrototypes + * @brief Private functions declaration for FS. + * @{ + */ + +uint8_t * USBD_FS_DeviceDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_LangIDStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_ManufacturerStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_ProductStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_SerialStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_ConfigStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +uint8_t * USBD_FS_InterfaceStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +#if (USBD_LPM_ENABLED == 1) +uint8_t * USBD_FS_USR_BOSDescriptor(USBD_SpeedTypeDef speed, uint16_t *length); +#endif /* (USBD_LPM_ENABLED == 1) */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Private_Variables USBD_DESC_Private_Variables + * @brief Private variables. + * @{ + */ + +USBD_DescriptorsTypeDef FS_Desc = +{ + USBD_FS_DeviceDescriptor +, USBD_FS_LangIDStrDescriptor +, USBD_FS_ManufacturerStrDescriptor +, USBD_FS_ProductStrDescriptor +, USBD_FS_SerialStrDescriptor +, USBD_FS_ConfigStrDescriptor +, USBD_FS_InterfaceStrDescriptor +#if (USBD_LPM_ENABLED == 1) +, USBD_FS_USR_BOSDescriptor +#endif /* (USBD_LPM_ENABLED == 1) */ +}; + +#if defined ( __ICCARM__ ) /* IAR Compiler */ + #pragma data_alignment=4 +#endif /* defined ( __ICCARM__ ) */ +/** USB standard device descriptor. */ +__ALIGN_BEGIN uint8_t USBD_FS_DeviceDesc[USB_LEN_DEV_DESC] __ALIGN_END = +{ + 0x12, /*bLength */ + USB_DESC_TYPE_DEVICE, /*bDescriptorType*/ +#if (USBD_LPM_ENABLED == 1) + 0x01, /*bcdUSB */ /* changed to USB version 2.01 + in order to support LPM L1 suspend + resume test of USBCV3.0*/ +#else + 0x00, /*bcdUSB */ +#endif /* (USBD_LPM_ENABLED == 1) */ + 0x02, + 0x02, /*bDeviceClass*/ + 0x02, /*bDeviceSubClass*/ + 0x00, /*bDeviceProtocol*/ + USB_MAX_EP0_SIZE, /*bMaxPacketSize*/ + LOBYTE(USBD_VID), /*idVendor*/ + HIBYTE(USBD_VID), /*idVendor*/ + LOBYTE(USBD_PID_FS), /*idProduct*/ + HIBYTE(USBD_PID_FS), /*idProduct*/ + 0x00, /*bcdDevice rel. 2.00*/ + 0x02, + USBD_IDX_MFC_STR, /*Index of manufacturer string*/ + USBD_IDX_PRODUCT_STR, /*Index of product string*/ + USBD_IDX_SERIAL_STR, /*Index of serial number string*/ + USBD_MAX_NUM_CONFIGURATION /*bNumConfigurations*/ +}; + +/* USB_DeviceDescriptor */ +/** BOS descriptor. */ +#if (USBD_LPM_ENABLED == 1) +#if defined ( __ICCARM__ ) /* IAR Compiler */ + #pragma data_alignment=4 +#endif /* defined ( __ICCARM__ ) */ +__ALIGN_BEGIN uint8_t USBD_FS_BOSDesc[USB_SIZ_BOS_DESC] __ALIGN_END = +{ + 0x5, + USB_DESC_TYPE_BOS, + 0xC, + 0x0, + 0x1, /* 1 device capability*/ + /* device capability*/ + 0x7, + USB_DEVICE_CAPABITY_TYPE, + 0x2, + 0x2, /* LPM capability bit set*/ + 0x0, + 0x0, + 0x0 +}; +#endif /* (USBD_LPM_ENABLED == 1) */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Private_Variables USBD_DESC_Private_Variables + * @brief Private variables. + * @{ + */ + +#if defined ( __ICCARM__ ) /* IAR Compiler */ + #pragma data_alignment=4 +#endif /* defined ( __ICCARM__ ) */ + +/** USB lang indentifier descriptor. */ +__ALIGN_BEGIN uint8_t USBD_LangIDDesc[USB_LEN_LANGID_STR_DESC] __ALIGN_END = +{ + USB_LEN_LANGID_STR_DESC, + USB_DESC_TYPE_STRING, + LOBYTE(USBD_LANGID_STRING), + HIBYTE(USBD_LANGID_STRING) +}; + +#if defined ( __ICCARM__ ) /* IAR Compiler */ + #pragma data_alignment=4 +#endif /* defined ( __ICCARM__ ) */ +/* Internal string descriptor. */ +__ALIGN_BEGIN uint8_t USBD_StrDesc[USBD_MAX_STR_DESC_SIZ] __ALIGN_END; + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN uint8_t USBD_StringSerial[USB_SIZ_STRING_SERIAL] __ALIGN_END = { + USB_SIZ_STRING_SERIAL, + USB_DESC_TYPE_STRING, +}; + +/** + * @} + */ + +/** @defgroup USBD_DESC_Private_Functions USBD_DESC_Private_Functions + * @brief Private functions. + * @{ + */ + +/** + * @brief Return the device descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_DeviceDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + UNUSED(speed); + *length = sizeof(USBD_FS_DeviceDesc); + return USBD_FS_DeviceDesc; +} + +/** + * @brief Return the LangID string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_LangIDStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + UNUSED(speed); + *length = sizeof(USBD_LangIDDesc); + return USBD_LangIDDesc; +} + +/** + * @brief Return the product string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_ProductStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + if(speed == 0) + { + USBD_GetString((uint8_t *)USBD_PRODUCT_STRING_FS, USBD_StrDesc, length); + } + else + { + USBD_GetString((uint8_t *)USBD_PRODUCT_STRING_FS, USBD_StrDesc, length); + } + return USBD_StrDesc; +} + +/** + * @brief Return the manufacturer string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_ManufacturerStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + UNUSED(speed); + USBD_GetString((uint8_t *)USBD_MANUFACTURER_STRING, USBD_StrDesc, length); + return USBD_StrDesc; +} + +/** + * @brief Return the serial number string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_SerialStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + UNUSED(speed); + *length = USB_SIZ_STRING_SERIAL; + + /* Update the serial number string descriptor with the data from the unique + * ID */ + Get_SerialNum(); + /* USER CODE BEGIN USBD_FS_SerialStrDescriptor */ + + /* USER CODE END USBD_FS_SerialStrDescriptor */ + return (uint8_t *) USBD_StringSerial; +} + +/** + * @brief Return the configuration string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_ConfigStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + if(speed == USBD_SPEED_HIGH) + { + USBD_GetString((uint8_t *)USBD_CONFIGURATION_STRING_FS, USBD_StrDesc, length); + } + else + { + USBD_GetString((uint8_t *)USBD_CONFIGURATION_STRING_FS, USBD_StrDesc, length); + } + return USBD_StrDesc; +} + +/** + * @brief Return the interface string descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_InterfaceStrDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + if(speed == 0) + { + USBD_GetString((uint8_t *)USBD_INTERFACE_STRING_FS, USBD_StrDesc, length); + } + else + { + USBD_GetString((uint8_t *)USBD_INTERFACE_STRING_FS, USBD_StrDesc, length); + } + return USBD_StrDesc; +} + +#if (USBD_LPM_ENABLED == 1) +/** + * @brief Return the BOS descriptor + * @param speed : Current device speed + * @param length : Pointer to data length variable + * @retval Pointer to descriptor buffer + */ +uint8_t * USBD_FS_USR_BOSDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) +{ + UNUSED(speed); + *length = sizeof(USBD_FS_BOSDesc); + return (uint8_t*)USBD_FS_BOSDesc; +} +#endif /* (USBD_LPM_ENABLED == 1) */ + +/** + * @brief Create the serial number string descriptor + * @param None + * @retval None + */ +static void Get_SerialNum(void) +{ + uint32_t deviceserial0, deviceserial1, deviceserial2; + + deviceserial0 = *(uint32_t *) DEVICE_ID1; + deviceserial1 = *(uint32_t *) DEVICE_ID2; + deviceserial2 = *(uint32_t *) DEVICE_ID3; + + deviceserial0 += deviceserial2; + + if (deviceserial0 != 0) + { + IntToUnicode(deviceserial0, &USBD_StringSerial[2], 8); + IntToUnicode(deviceserial1, &USBD_StringSerial[18], 4); + } +} + +/** + * @brief Convert Hex 32Bits value into char + * @param value: value to convert + * @param pbuf: pointer to the buffer + * @param len: buffer length + * @retval None + */ +static void IntToUnicode(uint32_t value, uint8_t * pbuf, uint8_t len) +{ + uint8_t idx = 0; + + for (idx = 0; idx < len; idx++) + { + if (((value >> 28)) < 0xA) + { + pbuf[2 * idx] = (value >> 28) + '0'; + } + else + { + pbuf[2 * idx] = (value >> 28) + 'A' - 10; + } + + value = value << 4; + + pbuf[2 * idx + 1] = 0; + } +} +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.h b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.h new file mode 100644 index 000000000..42f4d3466 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/App/usbd_desc.h @@ -0,0 +1,145 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usbd_desc.c + * @version : v1.0_Cube + * @brief : Header for usbd_conf.c file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_DESC__C__ +#define __USBD_DESC__C__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "usbd_def.h" + +/* USER CODE BEGIN INCLUDE */ + +/* USER CODE END INCLUDE */ + +/** @addtogroup STM32_USB_OTG_DEVICE_LIBRARY + * @{ + */ + +/** @defgroup USBD_DESC USBD_DESC + * @brief Usb device descriptors module. + * @{ + */ + +/** @defgroup USBD_DESC_Exported_Constants USBD_DESC_Exported_Constants + * @brief Constants. + * @{ + */ +#define DEVICE_ID1 (UID_BASE) +#define DEVICE_ID2 (UID_BASE + 0x4) +#define DEVICE_ID3 (UID_BASE + 0x8) + +#define USB_SIZ_STRING_SERIAL 0x1A + +/* USER CODE BEGIN EXPORTED_CONSTANTS */ + +/* USER CODE END EXPORTED_CONSTANTS */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Exported_Defines USBD_DESC_Exported_Defines + * @brief Defines. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_DEFINES */ + +/* USER CODE END EXPORTED_DEFINES */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Exported_TypesDefinitions USBD_DESC_Exported_TypesDefinitions + * @brief Types. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_TYPES */ + +/* USER CODE END EXPORTED_TYPES */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Exported_Macros USBD_DESC_Exported_Macros + * @brief Aliases. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_MACRO */ + +/* USER CODE END EXPORTED_MACRO */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Exported_Variables USBD_DESC_Exported_Variables + * @brief Public variables. + * @{ + */ + +/** Descriptor for the Usb device. */ +extern USBD_DescriptorsTypeDef FS_Desc; + +/* USER CODE BEGIN EXPORTED_VARIABLES */ + +/* USER CODE END EXPORTED_VARIABLES */ + +/** + * @} + */ + +/** @defgroup USBD_DESC_Exported_FunctionsPrototype USBD_DESC_Exported_FunctionsPrototype + * @brief Public functions declaration. + * @{ + */ + +/* USER CODE BEGIN EXPORTED_FUNCTIONS */ + +/* USER CODE END EXPORTED_FUNCTIONS */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_DESC__C__ */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.c b/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.c new file mode 100644 index 000000000..850863310 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.c @@ -0,0 +1,697 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : Target/usbd_conf.c + * @version : v1.0_Cube + * @brief : This file implements the board support package for the USB device library + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx.h" +#include "stm32f7xx_hal.h" +#include "usbd_def.h" +#include "usbd_core.h" + +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE END PV */ + +PCD_HandleTypeDef hpcd_USB_OTG_FS; +void Error_Handler(void); + +/* External functions --------------------------------------------------------*/ +void SystemClock_Config(void); + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* USER CODE BEGIN PFP */ +/* Private function prototypes -----------------------------------------------*/ +USBD_StatusTypeDef USBD_Get_USB_Status(HAL_StatusTypeDef hal_status); + +/* USER CODE END PFP */ + +/* Private functions ---------------------------------------------------------*/ + +/* USER CODE BEGIN 1 */ +static void SystemClockConfig_Resume(void); +/* USER CODE END 1 */ + +/******************************************************************************* + LL Driver Callbacks (PCD -> USB Device Library) +*******************************************************************************/ +/* MSP Init */ + +void HAL_PCD_MspInit(PCD_HandleTypeDef* pcdHandle) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(pcdHandle->Instance==USB_OTG_FS) + { + /* USER CODE BEGIN USB_OTG_FS_MspInit 0 */ + + /* USER CODE END USB_OTG_FS_MspInit 0 */ + + __HAL_RCC_GPIOA_CLK_ENABLE(); + /**USB_OTG_FS GPIO Configuration + PA9 ------> USB_OTG_FS_VBUS + PA11 ------> USB_OTG_FS_DM + PA12 ------> USB_OTG_FS_DP + */ + GPIO_InitStruct.Pin = GPIO_PIN_9; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = GPIO_PIN_11|GPIO_PIN_12; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF10_OTG_FS; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + /* Peripheral clock enable */ + __HAL_RCC_USB_OTG_FS_CLK_ENABLE(); + + /* Peripheral interrupt init */ + HAL_NVIC_SetPriority(OTG_FS_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(OTG_FS_IRQn); + /* USER CODE BEGIN USB_OTG_FS_MspInit 1 */ + + /* USER CODE END USB_OTG_FS_MspInit 1 */ + } +} + +void HAL_PCD_MspDeInit(PCD_HandleTypeDef* pcdHandle) +{ + if(pcdHandle->Instance==USB_OTG_FS) + { + /* USER CODE BEGIN USB_OTG_FS_MspDeInit 0 */ + + /* USER CODE END USB_OTG_FS_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USB_OTG_FS_CLK_DISABLE(); + + /**USB_OTG_FS GPIO Configuration + PA9 ------> USB_OTG_FS_VBUS + PA11 ------> USB_OTG_FS_DM + PA12 ------> USB_OTG_FS_DP + */ + HAL_GPIO_DeInit(GPIOA, GPIO_PIN_9|GPIO_PIN_11|GPIO_PIN_12); + + /* Peripheral interrupt Deinit*/ + HAL_NVIC_DisableIRQ(OTG_FS_IRQn); + + /* USER CODE BEGIN USB_OTG_FS_MspDeInit 1 */ + + /* USER CODE END USB_OTG_FS_MspDeInit 1 */ + } +} + +/** + * @brief Setup stage callback + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_SetupStage((USBD_HandleTypeDef*)hpcd->pData, (uint8_t *)hpcd->Setup); +} + +/** + * @brief Data Out stage callback. + * @param hpcd: PCD handle + * @param epnum: Endpoint number + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#else +void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_DataOutStage((USBD_HandleTypeDef*)hpcd->pData, epnum, hpcd->OUT_ep[epnum].xfer_buff); +} + +/** + * @brief Data In stage callback. + * @param hpcd: PCD handle + * @param epnum: Endpoint number + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#else +void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_DataInStage((USBD_HandleTypeDef*)hpcd->pData, epnum, hpcd->IN_ep[epnum].xfer_buff); +} + +/** + * @brief SOF callback. + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_SOFCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_SOF((USBD_HandleTypeDef*)hpcd->pData); +} + +/** + * @brief Reset callback. + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_ResetCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_SpeedTypeDef speed = USBD_SPEED_FULL; + + if ( hpcd->Init.speed == PCD_SPEED_HIGH) + { + speed = USBD_SPEED_HIGH; + } + else if ( hpcd->Init.speed == PCD_SPEED_FULL) + { + speed = USBD_SPEED_FULL; + } + else + { + Error_Handler(); + } + /* Set Speed. */ + USBD_LL_SetSpeed((USBD_HandleTypeDef*)hpcd->pData, speed); + + /* Reset Device. */ + USBD_LL_Reset((USBD_HandleTypeDef*)hpcd->pData); +} + +/** + * @brief Suspend callback. + * When Low power mode is enabled the debug cannot be used (IAR, Keil doesn't support it) + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + /* Inform USB library that core enters in suspend Mode. */ + USBD_LL_Suspend((USBD_HandleTypeDef*)hpcd->pData); + __HAL_PCD_GATE_PHYCLOCK(hpcd); + /* Enter in STOP mode. */ + /* USER CODE BEGIN 2 */ + if (hpcd->Init.low_power_enable) + { + /* Set SLEEPDEEP bit and SleepOnExit of Cortex System Control Register. */ + SCB->SCR |= (uint32_t)((uint32_t)(SCB_SCR_SLEEPDEEP_Msk | SCB_SCR_SLEEPONEXIT_Msk)); + } + /* USER CODE END 2 */ +} + +/** + * @brief Resume callback. + * When Low power mode is enabled the debug cannot be used (IAR, Keil doesn't support it) + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + /* USER CODE BEGIN 3 */ + + /* USER CODE END 3 */ + USBD_LL_Resume((USBD_HandleTypeDef*)hpcd->pData); +} + +/** + * @brief ISOOUTIncomplete callback. + * @param hpcd: PCD handle + * @param epnum: Endpoint number + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#else +void HAL_PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_IsoOUTIncomplete((USBD_HandleTypeDef*)hpcd->pData, epnum); +} + +/** + * @brief ISOINIncomplete callback. + * @param hpcd: PCD handle + * @param epnum: Endpoint number + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#else +void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_IsoINIncomplete((USBD_HandleTypeDef*)hpcd->pData, epnum); +} + +/** + * @brief Connect callback. + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_DevConnected((USBD_HandleTypeDef*)hpcd->pData); +} + +/** + * @brief Disconnect callback. + * @param hpcd: PCD handle + * @retval None + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +static void PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) +#else +void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + USBD_LL_DevDisconnected((USBD_HandleTypeDef*)hpcd->pData); +} + +/******************************************************************************* + LL Driver Interface (USB Device Library --> PCD) +*******************************************************************************/ + +/** + * @brief Initializes the low level portion of the device driver. + * @param pdev: Device handle + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_Init(USBD_HandleTypeDef *pdev) +{ + /* Init USB Ip. */ + if (pdev->id == DEVICE_FS) { + /* Link the driver to the stack. */ + hpcd_USB_OTG_FS.pData = pdev; + pdev->pData = &hpcd_USB_OTG_FS; + + hpcd_USB_OTG_FS.Instance = USB_OTG_FS; + hpcd_USB_OTG_FS.Init.dev_endpoints = 6; + hpcd_USB_OTG_FS.Init.speed = PCD_SPEED_FULL; + hpcd_USB_OTG_FS.Init.dma_enable = DISABLE; + hpcd_USB_OTG_FS.Init.phy_itface = PCD_PHY_EMBEDDED; + hpcd_USB_OTG_FS.Init.Sof_enable = DISABLE; + hpcd_USB_OTG_FS.Init.low_power_enable = DISABLE; + hpcd_USB_OTG_FS.Init.lpm_enable = DISABLE; + hpcd_USB_OTG_FS.Init.vbus_sensing_enable = ENABLE; + hpcd_USB_OTG_FS.Init.use_dedicated_ep1 = DISABLE; + if (HAL_PCD_Init(&hpcd_USB_OTG_FS) != HAL_OK) + { + Error_Handler( ); + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + /* Register USB PCD CallBacks */ + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_SOF_CB_ID, PCD_SOFCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_SETUPSTAGE_CB_ID, PCD_SetupStageCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_RESET_CB_ID, PCD_ResetCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_SUSPEND_CB_ID, PCD_SuspendCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_RESUME_CB_ID, PCD_ResumeCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_CONNECT_CB_ID, PCD_ConnectCallback); + HAL_PCD_RegisterCallback(&hpcd_USB_OTG_FS, HAL_PCD_DISCONNECT_CB_ID, PCD_DisconnectCallback); + + HAL_PCD_RegisterDataOutStageCallback(&hpcd_USB_OTG_FS, PCD_DataOutStageCallback); + HAL_PCD_RegisterDataInStageCallback(&hpcd_USB_OTG_FS, PCD_DataInStageCallback); + HAL_PCD_RegisterIsoOutIncpltCallback(&hpcd_USB_OTG_FS, PCD_ISOOUTIncompleteCallback); + HAL_PCD_RegisterIsoInIncpltCallback(&hpcd_USB_OTG_FS, PCD_ISOINIncompleteCallback); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + HAL_PCDEx_SetRxFiFo(&hpcd_USB_OTG_FS, 0x80); + HAL_PCDEx_SetTxFiFo(&hpcd_USB_OTG_FS, 0, 0x40); + HAL_PCDEx_SetTxFiFo(&hpcd_USB_OTG_FS, 1, 0x80); + } + return USBD_OK; +} + +/** + * @brief De-Initializes the low level portion of the device driver. + * @param pdev: Device handle + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_DeInit(USBD_HandleTypeDef *pdev) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_DeInit(pdev->pData); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Starts the low level portion of the device driver. + * @param pdev: Device handle + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_Start(USBD_HandleTypeDef *pdev) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_Start(pdev->pData); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Stops the low level portion of the device driver. + * @param pdev: Device handle + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_Stop(USBD_HandleTypeDef *pdev) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_Stop(pdev->pData); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Opens an endpoint of the low level driver. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @param ep_type: Endpoint type + * @param ep_mps: Endpoint max packet size + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_OpenEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr, uint8_t ep_type, uint16_t ep_mps) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_Open(pdev->pData, ep_addr, ep_mps, ep_type); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Closes an endpoint of the low level driver. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_CloseEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_Close(pdev->pData, ep_addr); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Flushes an endpoint of the Low Level Driver. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_FlushEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_Flush(pdev->pData, ep_addr); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Sets a Stall condition on an endpoint of the Low Level Driver. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_StallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_SetStall(pdev->pData, ep_addr); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Clears a Stall condition on an endpoint of the Low Level Driver. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_ClearStallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_ClrStall(pdev->pData, ep_addr); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Returns Stall condition. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval Stall (1: Yes, 0: No) + */ +uint8_t USBD_LL_IsStallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + PCD_HandleTypeDef *hpcd = (PCD_HandleTypeDef*) pdev->pData; + + if((ep_addr & 0x80) == 0x80) + { + return hpcd->IN_ep[ep_addr & 0x7F].is_stall; + } + else + { + return hpcd->OUT_ep[ep_addr & 0x7F].is_stall; + } +} + +/** + * @brief Assigns a USB address to the device. + * @param pdev: Device handle + * @param dev_addr: Device address + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_SetUSBAddress(USBD_HandleTypeDef *pdev, uint8_t dev_addr) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_SetAddress(pdev->pData, dev_addr); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Transmits data over an endpoint. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @param pbuf: Pointer to data to be sent + * @param size: Data size + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_Transmit(USBD_HandleTypeDef *pdev, uint8_t ep_addr, uint8_t *pbuf, uint32_t size) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_Transmit(pdev->pData, ep_addr, pbuf, size); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Prepares an endpoint for reception. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @param pbuf: Pointer to data to be received + * @param size: Data size + * @retval USBD status + */ +USBD_StatusTypeDef USBD_LL_PrepareReceive(USBD_HandleTypeDef *pdev, uint8_t ep_addr, uint8_t *pbuf, uint32_t size) +{ + HAL_StatusTypeDef hal_status = HAL_OK; + USBD_StatusTypeDef usb_status = USBD_OK; + + hal_status = HAL_PCD_EP_Receive(pdev->pData, ep_addr, pbuf, size); + + usb_status = USBD_Get_USB_Status(hal_status); + + return usb_status; +} + +/** + * @brief Returns the last transfered packet size. + * @param pdev: Device handle + * @param ep_addr: Endpoint number + * @retval Recived Data Size + */ +uint32_t USBD_LL_GetRxDataSize(USBD_HandleTypeDef *pdev, uint8_t ep_addr) +{ + return HAL_PCD_EP_GetRxCount((PCD_HandleTypeDef*) pdev->pData, ep_addr); +} + +/** + * @brief Send LPM message to user layer + * @param hpcd: PCD handle + * @param msg: LPM message + * @retval None + */ +void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg) +{ + switch (msg) + { + case PCD_LPM_L0_ACTIVE: + if (hpcd->Init.low_power_enable) + { + SystemClockConfig_Resume(); + + /* Reset SLEEPDEEP bit of Cortex System Control Register. */ + SCB->SCR &= (uint32_t)~((uint32_t)(SCB_SCR_SLEEPDEEP_Msk | SCB_SCR_SLEEPONEXIT_Msk)); + } + __HAL_PCD_UNGATE_PHYCLOCK(hpcd); + USBD_LL_Resume(hpcd->pData); + break; + + case PCD_LPM_L1_ACTIVE: + __HAL_PCD_GATE_PHYCLOCK(hpcd); + USBD_LL_Suspend(hpcd->pData); + + /* Enter in STOP mode. */ + if (hpcd->Init.low_power_enable) + { + /* Set SLEEPDEEP bit and SleepOnExit of Cortex System Control Register. */ + SCB->SCR |= (uint32_t)((uint32_t)(SCB_SCR_SLEEPDEEP_Msk | SCB_SCR_SLEEPONEXIT_Msk)); + } + break; + } +} + +/** + * @brief Delays routine for the USB device library. + * @param Delay: Delay in ms + * @retval None + */ +void USBD_LL_Delay(uint32_t Delay) +{ + HAL_Delay(Delay); +} + +/* USER CODE BEGIN 5 */ +/** + * @brief Configures system clock after wake-up from USB resume callBack: + * enable HSI, PLL and select PLL as system clock source. + * @retval None + */ +static void SystemClockConfig_Resume(void) +{ + SystemClock_Config(); +} +/* USER CODE END 5 */ +/** + * @brief Retuns the USB status depending on the HAL status: + * @param hal_status: HAL status + * @retval USB status + */ +USBD_StatusTypeDef USBD_Get_USB_Status(HAL_StatusTypeDef hal_status) +{ + USBD_StatusTypeDef usb_status = USBD_OK; + + switch (hal_status) + { + case HAL_OK : + usb_status = USBD_OK; + break; + case HAL_ERROR : + usb_status = USBD_FAIL; + break; + case HAL_BUSY : + usb_status = USBD_BUSY; + break; + case HAL_TIMEOUT : + usb_status = USBD_FAIL; + break; + default : + usb_status = USBD_FAIL; + break; + } + return usb_status; +} + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.h b/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.h new file mode 100644 index 000000000..aa1300571 --- /dev/null +++ b/src/third_party/stm32_cubeide/USB_DEVICE/Target/usbd_conf.h @@ -0,0 +1,173 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : usbd_conf.h + * @version : v1.0_Cube + * @brief : Header for usbd_conf.c file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __USBD_CONF__H__ +#define __USBD_CONF__H__ + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include +#include +#include +#include "main.h" +#include "stm32f7xx.h" +#include "stm32f7xx_hal.h" + +/* USER CODE BEGIN INCLUDE */ + +/* USER CODE END INCLUDE */ + +/** @addtogroup USBD_OTG_DRIVER + * @{ + */ + +/** @defgroup USBD_CONF USBD_CONF + * @brief Configuration file for Usb otg low level driver. + * @{ + */ + +/** @defgroup USBD_CONF_Exported_Variables USBD_CONF_Exported_Variables + * @brief Public variables. + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_CONF_Exported_Defines USBD_CONF_Exported_Defines + * @brief Defines for configuration of the Usb device. + * @{ + */ + +/*---------- -----------*/ +#define USBD_MAX_NUM_INTERFACES 1U +/*---------- -----------*/ +#define USBD_MAX_NUM_CONFIGURATION 1U +/*---------- -----------*/ +#define USBD_MAX_STR_DESC_SIZ 512U +/*---------- -----------*/ +#define USBD_DEBUG_LEVEL 0U +/*---------- -----------*/ +#define USBD_LPM_ENABLED 1U +/*---------- -----------*/ +#define USBD_SELF_POWERED 1U + +/****************************************/ +/* #define for FS and HS identification */ +#define DEVICE_FS 0 +#define DEVICE_HS 1 + +/** + * @} + */ + +/** @defgroup USBD_CONF_Exported_Macros USBD_CONF_Exported_Macros + * @brief Aliases. + * @{ + */ + +/* Memory management macros */ + +/** Alias for memory allocation. */ +#define USBD_malloc malloc + +/** Alias for memory release. */ +#define USBD_free free + +/** Alias for memory set. */ +#define USBD_memset memset + +/** Alias for memory copy. */ +#define USBD_memcpy memcpy + +/** Alias for delay. */ +#define USBD_Delay HAL_Delay + +/* DEBUG macros */ + +#if (USBD_DEBUG_LEVEL > 0) +#define USBD_UsrLog(...) printf(__VA_ARGS__);\ + printf("\n"); +#else +#define USBD_UsrLog(...) +#endif + +#if (USBD_DEBUG_LEVEL > 1) + +#define USBD_ErrLog(...) printf("ERROR: ") ;\ + printf(__VA_ARGS__);\ + printf("\n"); +#else +#define USBD_ErrLog(...) +#endif + +#if (USBD_DEBUG_LEVEL > 2) +#define USBD_DbgLog(...) printf("DEBUG : ") ;\ + printf(__VA_ARGS__);\ + printf("\n"); +#else +#define USBD_DbgLog(...) +#endif + +/** + * @} + */ + +/** @defgroup USBD_CONF_Exported_Types USBD_CONF_Exported_Types + * @brief Types. + * @{ + */ + +/** + * @} + */ + +/** @defgroup USBD_CONF_Exported_FunctionsPrototype USBD_CONF_Exported_FunctionsPrototype + * @brief Declaration of public functions for Usb device. + * @{ + */ + +/* Exported functions -------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __USBD_CONF__H__ */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.c b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.c new file mode 100644 index 000000000..72f88451b --- /dev/null +++ b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.c @@ -0,0 +1,1992 @@ +/** + ****************************************************************************** + * @file jpeg_utils.c + * @author MCD Application Team + * @version V2.0.0 + * @date 3-June-2016 + * @brief This driver provides JPEG MCU (Minimum Coded Unit) blocks to RGB and RGB to JPEG MCU conversion functions. + * + * The STM32 HW JPEG decoder/encoder peripheral allows to decode/encode jpeg images. + * In decoding, the STM32 HW JPEG data output are organized in blocks called MCU (Minimum Coded Unit) + * as specified in the jpeg standard. + * In encoding, the STM32 HW JPEG data input must be organized in MCU blocks. + * Depending of the image color space and chrominance sampling, an MCU is generally organized in : + * N luminance (Y) blocks + a Blue chrominance (Cb) block + a Red chrominance (Cr) block. + * Each block size is 8x8 samples. + * + * The STM32 HW JPEG supports 3 possible color space : + * - YCbCr : an MCU is composed of 3 color components : Y , Cb and Cr + * - GrayScale : an MCU is composed of 1 single color component : Y + * - CMYK : an MCU is composed of 4 color components : Cyan, Magenta, Yellow, and Key (blacK) + * + * This utility file should be used on top of the STM32 HAL JPEG driver. + * In decoding, it allows to convert MCU blocks to RGB888 or ARGB8888 pixels stored to a destination + * frame buffer that can be displayed. + * In encoding, it allows to convert RGB888 or ARGB8888 pixels to MCU blocks. + * + * This utility supports following MCU Chroma sampling format and color space : + * - YCbCr 4:2:0 : Each MCU is composed of 4 Y 8x8 blocks + 1 Cb 8x8 block + Cr 8x8 block + * - YCbCr 4:2:2 : Each MCU is composed of 2 Y 8x8 blocks + 1 Cb 8x8 block + Cr 8x8 block + * - YCbCr 4:4:4 : Each MCU is composed of 1 Y 8x8 block + 1 Cb 8x8 block + Cr 8x8 block + * - GrayScale : Each MCU is composed of 1 Y 8x8 block + * - CMYK : Each MCU is composed of 1 Cyan 8x8 block + 1 Magenta 8x8 block + 1 Yellow 8x8 block + 1 Key 8x8 block. + * + * + * @How to use this driver + * + * - The configuration file "jpeg_utils_conf_template.h" is used to configure this utility + * providing some useful flexibilities. + * This file should be copied to the application folder and modified as follows: + * - Rename it to "jpeg_utils_conf.h". + * - Update the name of the JPEG driver's header file, depending on the EVAL board you are using. + * + * Example if using the STM32F769I-EVAL board : + * - Copy the file "jpeg_utils_conf_template.h" to the application folder and rename it to "jpeg_utils_conf.h" + * - Edit the "jpeg_utils_conf.h" and change lines 51 and 52 as follow : + * #include "stm32f7xx_hal.h" + * #include "stm32f7xx_hal_jpeg.h" + * + * Using this configuration file, user can change the following settings: + * - Use RGB888 or ARGB8888 or RGB565 by setting the constant JPEG_RGB_FORMAT respectively to JPEG_RGB888, JPEG_ARGB8888 JPEG_RGB565. + * - Swap RED, and Blue offsets if user needs to change the color order to BGR (instead of RGB) by setting: + * #define JPEG_SWAP_RB 1 + * - Enable or disable the decoding post-processing functions (YCbCr to RGB conversion functions) by setting the define USE_JPEG_DECODER + * respectively to 0 or 1. + * - Enable or disable the encoding pre-processing functions (RGB to YCbCr conversion functions) by setting the define USE_JPEG_ENCODER + * respectively to 0 or 1. + * + * * For Decoding: + * - First, function "JPEG_InitColorTables" should be called to initialize the YCbCr to RGB color + * conversion tables. This function needs to be called only one time at the beginning of the + * program whatever the number of jpeg files to be decoded. + * + * - As soon as the JPEG HW peripheral finished parsing the header of the JPEG input file, + * the HAL JPEG callback "HAL_JPEG_InfoReadyCallback" is launched providing the jpeg file + * characteristics found in its header. + * User can then call the utility function "JPEG_GetDecodeColorConvertFunc" with these + * information and retrieve the corresponding color conversion function and number of MCUs. + * + * Then each time an integer number of MCUs are available (from the HW JPEG output), user + * can call the retrieved function to convert these HW JPEG output data to RGB888 or + * ARGB8888 pixel stored to the specified destination buffer. + * + * * For Encoding: + * - First, function "JPEG_InitColorTables" should be called to initialize the YCbCr/RGB color + * conversion tables. This function needs to be called only one time at the beginning of the + * program whatever the number of jpeg files to be encoded or decoded. + * + * - First Use the utility function "JPEG_GetEncodeColorConvertFunc" with the input image informations + * to retrieve the corresponding color conversion function and number of MCUs. + * + * Then each time an RGB input buffer is available, user can call the retrieved function to convert + * RGB data to MCU blocks stored to the specified destination buffer. + * + * + ****************************************************************************** + * @attention + * + *

© COPYRIGHT(c) 2016 STMicroelectronics

+ * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of STMicroelectronics nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "jpeg_utils.h" + +/** @addtogroup Utilities + * @{ + */ + +/** @defgroup JPEG JPEG +* @brief JPEG YCbCr blocks <-> RGB conversion utilties +* @{ +*/ + +/** @defgroup JPEG_Private_Types JPEG Private Types +* @{ +*/ +/* Private typedef -----------------------------------------------------------*/ +typedef struct __JPEG_MCU_RGB_ConvertorTypeDef +{ + uint32_t ColorSpace; + uint32_t ChromaSubsampling; + + uint32_t ImageWidth; + uint32_t ImageHeight; + uint32_t ImageSize_Bytes; + + uint32_t LineOffset; + uint32_t BlockSize; + + uint32_t H_factor; + uint32_t V_factor; + + uint32_t WidthExtend; + uint32_t ScaledWidth; + + uint32_t MCU_Total_Nb; + + uint16_t *Y_MCU_LUT; + uint16_t *Cb_MCU_LUT; + uint16_t *Cr_MCU_LUT; + uint16_t *K_MCU_LUT; + +}JPEG_MCU_RGB_ConvertorTypeDef; +/** +* @} +*/ + +/** @defgroup JPEG_Private_Defines JPEG Private Defines +* @{ +*/ +/* Private define ------------------------------------------------------------*/ +#define YCBCR_420_BLOCK_SIZE 384 /* YCbCr 4:2:0 MCU : 4 8x8 blocks of Y + 1 8x8 block of Cb + 1 8x8 block of Cr */ +#define YCBCR_422_BLOCK_SIZE 256 /* YCbCr 4:2:2 MCU : 2 8x8 blocks of Y + 1 8x8 block of Cb + 1 8x8 block of Cr */ +#define YCBCR_444_BLOCK_SIZE 192 /* YCbCr 4:4:4 MCU : 1 8x8 block of Y + 1 8x8 block of Cb + 1 8x8 block of Cr */ + +#define GRAY_444_BLOCK_SIZE 64 /* GrayScale MCU : 1 8x8 block of Y */ + +#define CMYK_444_BLOCK_SIZE 256 /* CMYK MCU : 1 8x8 blocks of Cyan + 1 8x8 block Magenta + 1 8x8 block of Yellow and 1 8x8 block of BlacK */ + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + #define JPEG_GREEN_OFFSET 8 /* Offset of the GREEN color in a pixel */ + #define JPEG_ALPHA_OFFSET 24 /* Offset of the Transparency Alpha in a pixel */ + #define JPEG_BYTES_PER_PIXEL 4 /* Number of bytes in a pixel */ + #if (JPEG_SWAP_RB == 0) + #define JPEG_RED_OFFSET 16 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 0 /* Offset of the BLUE color in a pixel */ + #else + #define JPEG_RED_OFFSET 0 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 16 /* Offset of the BLUE color in a pixel */ + #endif /* JPEG_SWAP_RB */ + +#elif(JPEG_RGB_FORMAT == JPEG_RGB888) + #define JPEG_GREEN_OFFSET 8 /* Offset of the GREEN color in a pixel */ + #define JPEG_BYTES_PER_PIXEL 3 /* Number of bytes in a pixel */ + + #if (JPEG_SWAP_RB == 0) + #define JPEG_RED_OFFSET 16 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 0 /* Offset of the BLUE color in a pixel */ + #else + #define JPEG_RED_OFFSET 0 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 16 /* Offset of the BLUE color in a pixel */ + #endif /* JPEG_SWAP_RB */ + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + #define JPEG_GREEN_OFFSET 5 /* Offset of the GREEN color in a pixel */ + #define JPEG_BYTES_PER_PIXEL 2 /* Number of bytes in a pixel */ + #define JPEG_RGB565_GREEN_MASK 0x07E0 /* Mask of Green component in RGB565 Format */ + + #if (JPEG_SWAP_RB == 0) + #define JPEG_RED_OFFSET 11 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 0 /* Offset of the BLUE color in a pixel */ + #define JPEG_RGB565_RED_MASK 0xF800 /* Mask of Red component in RGB565 Format */ + #define JPEG_RGB565_BLUE_MASK 0x001F /* Mask of Blue component in RGB565 Format */ + #else + #define JPEG_RED_OFFSET 0 /* Offset of the RED color in a pixel */ + #define JPEG_BLUE_OFFSET 11 /* Offset of the BLUE color in a pixel */ + #define JPEG_RGB565_RED_MASK 0x001F /* Mask of Red component in RGB565 Format */ + #define JPEG_RGB565_BLUE_MASK 0xF800 /* Mask of Blue component in RGB565 Format */ + #endif /* JPEG_SWAP_RB */ + +#else + + #error "unknown JPEG_RGB_FORMAT " + +#endif /* JPEG_RGB_FORMAT */ + +/** +* @} +*/ + +/** @defgroup JPEG_Private_Macros JPEG Private Macros +* @{ +*/ +/* Private macro -------------------------------------------------------------*/ +#if (USE_JPEG_DECODER == 1) +#define CLAMP(value) CLAMP_LUT[(value) + 0x100] /* Range limitting macro */ +#endif +#if (USE_JPEG_ENCODER == 1) +#define MAX(val1,val2) ((val1 > val2) ? val1 : val2) +#endif +/** +* @} +*/ + +/** @defgroup JPEG_Private_Variables JPEG Private Variables +* @{ +*/ +/* Private variables ---------------------------------------------------------*/ + +static JPEG_MCU_RGB_ConvertorTypeDef JPEG_ConvertorParams; + +#if (USE_JPEG_DECODER == 1) +static int32_t CR_RED_LUT[256]; /* Cr to Red color conversion Look Up Table */ +static int32_t CB_BLUE_LUT[256]; /* Cb to Blue color conversion Look Up Table */ +static int32_t CR_GREEN_LUT[256]; /* Cr to Green color conversion Look Up Table*/ +static int32_t CB_GREEN_LUT[256]; /* Cb to Green color conversion Look Up Table*/ +#endif /* USE_JPEG_DECODER == 1 */ + +#if (USE_JPEG_ENCODER == 1) +static int32_t RED_Y_LUT[256]; /* Red to Y color conversion Look Up Table */ +static int32_t RED_CB_LUT[256]; /* Red to Cb color conversion Look Up Table */ +static int32_t BLUE_CB_RED_CR_LUT[256]; /* Red to Cr and Blue to Cb color conversion Look Up Table */ +static int32_t GREEN_Y_LUT[256]; /* Green to Y color conversion Look Up Table*/ +static int32_t GREEN_CR_LUT[256]; /* Green to Cr color conversion Look Up Table*/ +static int32_t GREEN_CB_LUT[256]; /* Green to Cb color conversion Look Up Table*/ +static int32_t BLUE_Y_LUT[256]; /* Blue to Y color conversion Look Up Table */ +static int32_t BLUE_CR_LUT[256]; /* Blue to Cr color conversion Look Up Table */ + +/* Different MCU Look Up Table */ +static uint16_t JPEG_Y_MCU_LUT[256]; +static uint16_t JPEG_Y_MCU_444_LUT[64]; + +static uint16_t JPEG_Cb_MCU_420_LUT[256]; +static uint16_t JPEG_Cb_MCU_422_LUT[256]; +static uint16_t JPEG_Cb_MCU_444_LUT[64]; + +static uint16_t JPEG_Cr_MCU_420_LUT[256]; +static uint16_t JPEG_Cr_MCU_422_LUT[256]; +static uint16_t JPEG_Cr_MCU_444_LUT[64]; + +static uint16_t JPEG_K_MCU_420_LUT[256]; +static uint16_t JPEG_K_MCU_422_LUT[256]; +static uint16_t JPEG_K_MCU_444_LUT[64]; + +/* YCCK format blocks */ +uint8_t kBlocks[16][16]; +#endif /* USE_JPEG_ENCODER == 1 */ + +#if (USE_JPEG_DECODER == 1) +/* color clamp table : used for range limitting */ +static const uint8_t CLAMP_LUT[] = { +/* clamp range 0xffffffff to 0xffffff00 */ +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* clamp range 0x00 to 0xff */ +0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, +0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, +0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, +0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, +0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, +0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, +0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, +0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, +0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, +0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, +0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, +0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, +0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, +0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, +0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, +0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +/* clamp range 0x100 to 0x1ff */ +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +#endif /* USE_JPEG_DECODER == 1 */ +/** +* @} +*/ + +/** @defgroup JPEG_Private_FunctionPrototypes JPEG Private FunctionPrototypes +* @{ +*/ +/* Private function prototypes -----------------------------------------------*/ +#if (USE_JPEG_ENCODER == 1) +static uint32_t JPEG_ARGB_MCU_YCbCr420_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_ARGB_MCU_YCbCr422_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_ARGB_MCU_YCbCr444_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + + +static uint32_t JPEG_ARGB_MCU_Gray_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_ARGB_MCU_YCCK_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static void JPEG_Init_MCU_LUT(void); +static void JPEG_InitPreProcColorTables(void); +static uint8_t *JPEG_Set_K_Blocks(uint8_t *pMCUBuffer, uint8_t pKBlocks[16][16], uint32_t ChromaSampling); + +#endif /* USE_JPEG_ENCODER == 1 */ + +#if (USE_JPEG_DECODER == 1) +static uint32_t JPEG_MCU_YCbCr420_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_MCU_YCbCr422_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_MCU_YCbCr444_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + + +static uint32_t JPEG_MCU_Gray_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); + +static uint32_t JPEG_MCU_YCCK_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount); +static void JPEG_InitPostProcColorTables(void); +#endif /* USE_JPEG_DECODER == 1 */ + +/** +* @} +*/ + +/** @defgroup JPEG_Private_Functions JPEG Private Functions +* @{ +*/ + +#if (USE_JPEG_ENCODER == 1) +/** + * @brief Convert RGB to YCbCr 4:2:0 blocks pixels + * @param pInBuffer : pointer to input RGB888/ARGB8888 frame buffer. + * @param pOutBuffer : pointer to output YCbCr blocks buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from RGB to YCbCr + */ +static uint32_t JPEG_ARGB_MCU_YCbCr420_ConvertBlocks (uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef, colones; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp, offset; + + uint32_t red, green, blue; + + uint8_t *pOutAddr; + uint8_t *pInAddr; + + + numberMCU = ((3 * DataCount) / ( 2 * JPEG_BYTES_PER_PIXEL * YCBCR_420_BLOCK_SIZE)); + + currentMCU = BlockIndex; + *ConvertedDataCount = numberMCU * JPEG_ConvertorParams.BlockSize; + + pOutAddr = &pOutBuffer[0]; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *JPEG_ConvertorParams.H_factor) / JPEG_ConvertorParams.WidthExtend)*JPEG_ConvertorParams.V_factor; + + yRef = ((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend); + + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + if(((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend) == 0) + { + colones = JPEG_ConvertorParams.H_factor - JPEG_ConvertorParams.LineOffset; + } + else + { + colones = JPEG_ConvertorParams.H_factor; + } + offset = 0; + + for(i= 0; i < JPEG_ConvertorParams.V_factor; i+=2) + { + + pInAddr = &pInBuffer[0] ; + + for(j=0; j < colones; j+=2) + { + /* First Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + cbcomp = (int32_t)(*(RED_CB_LUT + red)) + (int32_t)(*(GREEN_CB_LUT + green)) + (int32_t)(*(BLUE_CB_RED_CR_LUT + blue)) + 128; + crcomp = (int32_t)(*(BLUE_CB_RED_CR_LUT + red)) + (int32_t)(*(GREEN_CR_LUT + green)) + (int32_t)(*(BLUE_CR_LUT + blue)) + 128; + + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset])) = (ycomp); + (*(pOutAddr + JPEG_ConvertorParams.Cb_MCU_LUT[offset])) = (cbcomp); + (*(pOutAddr + JPEG_ConvertorParams.Cr_MCU_LUT[offset])) = (crcomp); + + /* Second Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + JPEG_BYTES_PER_PIXEL + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + JPEG_BYTES_PER_PIXEL + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + JPEG_BYTES_PER_PIXEL + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset + 1])) = (ycomp); + + /* Third Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + JPEG_ConvertorParams.ScaledWidth + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + JPEG_ConvertorParams.ScaledWidth + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + JPEG_ConvertorParams.ScaledWidth + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset + JPEG_ConvertorParams.H_factor])) = (ycomp); + + /* Fourth Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_ConvertorParams.ScaledWidth + JPEG_BYTES_PER_PIXEL + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset + JPEG_ConvertorParams.H_factor + 1])) = (ycomp); + + /****************/ + + pInAddr += JPEG_BYTES_PER_PIXEL * 2; + offset+=2; + } + offset += JPEG_ConvertorParams.H_factor + (JPEG_ConvertorParams.H_factor - colones); + refline += JPEG_ConvertorParams.ScaledWidth * 2 ; + } + pOutAddr += JPEG_ConvertorParams.BlockSize ; + } + return numberMCU; +} + +/** + * @brief Convert RGB to YCbCr 4:2:2 blocks pixels + * @param pInBuffer : pointer to input RGB888/ARGB8888 frame buffer. + * @param pOutBuffer : pointer to output YCbCr blocks buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from RGB to YCbCr + */ +static uint32_t JPEG_ARGB_MCU_YCbCr422_ConvertBlocks (uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef, colones; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp, offset; + + uint32_t red, green, blue; + + uint8_t *pOutAddr; + uint8_t *pInAddr; + + numberMCU = ((2 * DataCount) / (JPEG_BYTES_PER_PIXEL * YCBCR_422_BLOCK_SIZE)); + + currentMCU = BlockIndex; + *ConvertedDataCount = numberMCU * JPEG_ConvertorParams.BlockSize; + + pOutAddr = &pOutBuffer[0]; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *JPEG_ConvertorParams.H_factor) / JPEG_ConvertorParams.WidthExtend)*JPEG_ConvertorParams.V_factor; + + yRef = ((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend); + + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + if(((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend) == 0) + { + colones = JPEG_ConvertorParams.H_factor - JPEG_ConvertorParams.LineOffset; + } + else + { + colones = JPEG_ConvertorParams.H_factor; + } + offset = 0; + + for(i= 0; i < JPEG_ConvertorParams.V_factor; i+=1) + { + + pInAddr = &pInBuffer[0] ; + + for(j=0; j < colones; j+=2) + { + /* First Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + cbcomp = (int32_t)(*(RED_CB_LUT + red)) + (int32_t)(*(GREEN_CB_LUT + green)) + (int32_t)(*(BLUE_CB_RED_CR_LUT + blue)) + 128; + crcomp = (int32_t)(*(BLUE_CB_RED_CR_LUT + red)) + (int32_t)(*(GREEN_CR_LUT + green)) + (int32_t)(*(BLUE_CR_LUT + blue)) + 128; + + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset])) = ycomp; + (*(pOutAddr + JPEG_ConvertorParams.Cb_MCU_LUT[offset])) = cbcomp; + (*(pOutAddr + JPEG_ConvertorParams.Cr_MCU_LUT[offset])) = crcomp; + + /* Second Pixel */ +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline + JPEG_BYTES_PER_PIXEL)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BYTES_PER_PIXEL + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset + 1])) = ycomp; + + /****************/ + + pInAddr += JPEG_BYTES_PER_PIXEL * 2; + offset+=2; + } + offset += (JPEG_ConvertorParams.H_factor - colones); + refline += JPEG_ConvertorParams.ScaledWidth ; + + } + pOutAddr += JPEG_ConvertorParams.BlockSize; + } + + return numberMCU; +} + +/** + * @brief Convert RGB to YCbCr 4:4:4 blocks pixels + * @param pInBuffer : pointer to input RGB888/ARGB8888 frame buffer. + * @param pOutBuffer : pointer to output YCbCr blocks buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from RGB to YCbCr + */ +static uint32_t JPEG_ARGB_MCU_YCbCr444_ConvertBlocks (uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef, colones; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp, offset; + + uint32_t red, green, blue; + + uint8_t *pOutAddr; + uint8_t *pInAddr; + + numberMCU = ((3 * DataCount) / (JPEG_BYTES_PER_PIXEL * YCBCR_444_BLOCK_SIZE)); + + currentMCU = BlockIndex; + *ConvertedDataCount = numberMCU * JPEG_ConvertorParams.BlockSize; + + pOutAddr = &pOutBuffer[0]; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *JPEG_ConvertorParams.H_factor) / JPEG_ConvertorParams.WidthExtend)*JPEG_ConvertorParams.V_factor; + + yRef = ((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + if(((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend) == 0) + { + colones = JPEG_ConvertorParams.H_factor - JPEG_ConvertorParams.LineOffset; + } + else + { + colones = JPEG_ConvertorParams.H_factor; + } + offset = 0; + + for(i= 0; i < JPEG_ConvertorParams.V_factor; i++) + { + pInAddr = &pInBuffer[0] ; + + for(j=0; j < colones; j++) + { +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue)); + cbcomp = (int32_t)(*(RED_CB_LUT + red)) + (int32_t)(*(GREEN_CB_LUT + green)) + (int32_t)(*(BLUE_CB_RED_CR_LUT + blue)) + 128; + crcomp = (int32_t)(*(BLUE_CB_RED_CR_LUT + red)) + (int32_t)(*(GREEN_CR_LUT + green)) + (int32_t)(*(BLUE_CR_LUT + blue)) + 128; + + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset])) = (ycomp); + (*(pOutAddr + JPEG_ConvertorParams.Cb_MCU_LUT[offset])) = (cbcomp); + (*(pOutAddr + JPEG_ConvertorParams.Cr_MCU_LUT[offset])) = (crcomp); + + pInAddr += JPEG_BYTES_PER_PIXEL; + offset++; + } + offset += (JPEG_ConvertorParams.H_factor - colones); + refline += JPEG_ConvertorParams.ScaledWidth; + } + pOutAddr += JPEG_ConvertorParams.BlockSize; + } + + return numberMCU; +} + +/** + * @brief Convert RGB to Gray blocks pixels + * @param pInBuffer : pointer to input RGB888/ARGB8888 blocks. + * @param pOutBuffer : pointer to output Gray blocks buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from RGB to Gray + */ +static uint32_t JPEG_ARGB_MCU_Gray_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef, colones; + + uint32_t refline; + int32_t offset; + + uint32_t red, green, blue; + + uint8_t *pOutAddr; + uint8_t *pInAddr; + uint8_t ycomp; + + numberMCU = (DataCount / (JPEG_BYTES_PER_PIXEL * GRAY_444_BLOCK_SIZE)); + + currentMCU = BlockIndex; + *ConvertedDataCount = numberMCU * GRAY_444_BLOCK_SIZE; + + pOutAddr = &pOutBuffer[0]; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *JPEG_ConvertorParams.H_factor) / JPEG_ConvertorParams.WidthExtend)*JPEG_ConvertorParams.V_factor; + + yRef = ((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + if(((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend) == 0) + { + colones = JPEG_ConvertorParams.H_factor - JPEG_ConvertorParams.LineOffset; + } + else + { + colones = JPEG_ConvertorParams.H_factor; + } + offset = 0; + + for(i= 0; i < JPEG_ConvertorParams.V_factor; i++) + { + pInAddr = &pInBuffer[0] ; + + for(j=0; j < colones; j++) + { +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + refline + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + refline + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + refline + JPEG_BLUE_OFFSET/8)) ; +#endif + ycomp = (uint8_t)((int32_t)(*(RED_Y_LUT + red)) + (int32_t)(*(GREEN_Y_LUT + green)) + (int32_t)(*(BLUE_Y_LUT + blue))); + + (*(pOutAddr + offset)) = (ycomp); + + pInAddr += JPEG_BYTES_PER_PIXEL; + offset++; + } + offset += (JPEG_ConvertorParams.H_factor - colones); + refline += JPEG_ConvertorParams.ScaledWidth; + } + pOutAddr += JPEG_ConvertorParams.BlockSize; + } + + return numberMCU; +} + +/** + * @brief Convert RGB to YCCK blocks pixels + * @param pInBuffer : pointer to input RGB888/ARGB8888 blocks. + * @param pOutBuffer : pointer to output YCCK blocks buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from RGB to YCCK + */ +static uint32_t JPEG_ARGB_MCU_YCCK_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef, colones; + + uint32_t refline; + uint32_t offset; + + int32_t red, green, blue, color_k ; + + uint8_t *pOutAddr; + uint8_t *pInAddr; + + numberMCU = ((3 * DataCount) / (JPEG_BYTES_PER_PIXEL * CMYK_444_BLOCK_SIZE)); + *ConvertedDataCount = numberMCU * CMYK_444_BLOCK_SIZE; + + currentMCU = BlockIndex; + + pOutAddr = &pOutBuffer[0]; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *JPEG_ConvertorParams.H_factor) / JPEG_ConvertorParams.WidthExtend)*JPEG_ConvertorParams.V_factor; + + yRef = ((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + if(((currentMCU *JPEG_ConvertorParams.H_factor) % JPEG_ConvertorParams.WidthExtend) == 0) + { + colones = JPEG_ConvertorParams.H_factor - JPEG_ConvertorParams.LineOffset; + } + else + { + colones = JPEG_ConvertorParams.H_factor; + } + offset = 0; + for(i= 0; i < JPEG_ConvertorParams.V_factor; i++) + { + pInAddr = &pInBuffer[refline]; + + for(j=0; j < colones; j++) + { +#if (JPEG_RGB_FORMAT == JPEG_RGB565) + red = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_RED_MASK) >> JPEG_RED_OFFSET) ; + green = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_GREEN_MASK) >> JPEG_GREEN_OFFSET) ; + blue = (((*(__IO uint16_t *)(pInAddr + refline)) & JPEG_RGB565_BLUE_MASK) >> JPEG_BLUE_OFFSET) ; + red = (red << 3) | (red >> 2); + green = (green << 2) | (green >> 4); + blue = (blue << 3) | (blue >> 2); +#else + red = (*(pInAddr + JPEG_RED_OFFSET/8)) ; + green = (*(pInAddr + JPEG_GREEN_OFFSET/8)) ; + blue = (*(pInAddr + JPEG_BLUE_OFFSET/8)) ; +#endif + color_k = MAX(MAX(red,green),blue); + kBlocks[i][j]=color_k; + + (*(pOutAddr + JPEG_ConvertorParams.Y_MCU_LUT[offset])) = red * 255 / color_k; + (*(pOutAddr + JPEG_ConvertorParams.Cb_MCU_LUT[offset])) = green * 255 / color_k; + (*(pOutAddr + JPEG_ConvertorParams.Cr_MCU_LUT[offset])) = blue * 255 / color_k; + + pInAddr += JPEG_BYTES_PER_PIXEL; + offset++; + } + offset += (JPEG_ConvertorParams.H_factor - colones); + refline += JPEG_ConvertorParams.ScaledWidth; + } + JPEG_Set_K_Blocks(pOutAddr, kBlocks, JPEG_ConvertorParams.ChromaSubsampling); + pOutAddr += JPEG_ConvertorParams.BlockSize; + } + + return numberMCU; +} + +/** + * @brief Retrive Encoding RGB to YCbCr color conversion function and block number + * @param pJpegInfo : JPEG_ConfTypeDef that contains the JPEG image informations. + * These info are available in the HAL callback "HAL_JPEG_InfoReadyCallback". + * @param pFunction : pointer to JPEG_RGBToYCbCr_Convert_Function , used to retrive the color conversion function + * depending of the jpeg image color space and chroma sampling info. + * @param ImageNbMCUs : pointer to uint32_t, used to retrive the total number of MCU blocks in the jpeg image. + * @retval HAL status : HAL_OK or HAL_ERROR. + */ +HAL_StatusTypeDef JPEG_GetEncodeColorConvertFunc(JPEG_ConfTypeDef *pJpegInfo, JPEG_RGBToYCbCr_Convert_Function *pFunction, uint32_t *ImageNbMCUs) +{ + uint32_t hMCU, vMCU; + + JPEG_ConvertorParams.ColorSpace = pJpegInfo->ColorSpace; + JPEG_ConvertorParams.ChromaSubsampling = pJpegInfo->ChromaSubsampling; + + if(JPEG_ConvertorParams.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + if(JPEG_ConvertorParams.ChromaSubsampling == JPEG_420_SUBSAMPLING) + { + *pFunction = JPEG_ARGB_MCU_YCbCr420_ConvertBlocks; + } + else if (JPEG_ConvertorParams.ChromaSubsampling == JPEG_422_SUBSAMPLING) + { + *pFunction = JPEG_ARGB_MCU_YCbCr422_ConvertBlocks; + } + else if (JPEG_ConvertorParams.ChromaSubsampling == JPEG_444_SUBSAMPLING) + { + *pFunction = JPEG_ARGB_MCU_YCbCr444_ConvertBlocks; + } + else + { + return HAL_ERROR; /* Chroma SubSampling Not supported*/ + } + } + else if(JPEG_ConvertorParams.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) + { + *pFunction = JPEG_ARGB_MCU_Gray_ConvertBlocks; + } + else if(JPEG_ConvertorParams.ColorSpace == JPEG_CMYK_COLORSPACE) + { + *pFunction = JPEG_ARGB_MCU_YCCK_ConvertBlocks; + } + else + { + return HAL_ERROR; /* Color space Not supported*/ + } + + JPEG_ConvertorParams.ImageWidth = pJpegInfo->ImageWidth; + JPEG_ConvertorParams.ImageHeight = pJpegInfo->ImageHeight; + JPEG_ConvertorParams.ImageSize_Bytes = pJpegInfo->ImageWidth * pJpegInfo->ImageHeight * JPEG_BYTES_PER_PIXEL; + + if((JPEG_ConvertorParams.ChromaSubsampling == JPEG_420_SUBSAMPLING) || (JPEG_ConvertorParams.ChromaSubsampling == JPEG_422_SUBSAMPLING)) + { + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 16; + + JPEG_ConvertorParams.Y_MCU_LUT = JPEG_Y_MCU_LUT; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 16 - JPEG_ConvertorParams.LineOffset; + } + + JPEG_ConvertorParams.H_factor = 16; + + if(JPEG_ConvertorParams.ChromaSubsampling == JPEG_420_SUBSAMPLING) + { + JPEG_ConvertorParams.V_factor = 16; + + if(JPEG_ConvertorParams.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + JPEG_ConvertorParams.BlockSize = YCBCR_420_BLOCK_SIZE; + } + + JPEG_ConvertorParams.Cb_MCU_LUT = JPEG_Cb_MCU_420_LUT; + JPEG_ConvertorParams.Cr_MCU_LUT = JPEG_Cr_MCU_420_LUT; + + JPEG_ConvertorParams.K_MCU_LUT = JPEG_K_MCU_420_LUT; + } + else /* 4:2:2*/ + { + JPEG_ConvertorParams.V_factor = 8; + + if(JPEG_ConvertorParams.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + JPEG_ConvertorParams.BlockSize = YCBCR_422_BLOCK_SIZE; + } + + JPEG_ConvertorParams.Cb_MCU_LUT = JPEG_Cb_MCU_422_LUT; + JPEG_ConvertorParams.Cr_MCU_LUT = JPEG_Cr_MCU_422_LUT; + + JPEG_ConvertorParams.K_MCU_LUT = JPEG_K_MCU_422_LUT; + } + } + else if(JPEG_ConvertorParams.ChromaSubsampling == JPEG_444_SUBSAMPLING) + { + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 8; + + JPEG_ConvertorParams.Y_MCU_LUT = JPEG_Y_MCU_444_LUT; + + JPEG_ConvertorParams.Cb_MCU_LUT = JPEG_Cb_MCU_444_LUT; + JPEG_ConvertorParams.Cr_MCU_LUT = JPEG_Cr_MCU_444_LUT; + + JPEG_ConvertorParams.K_MCU_LUT = JPEG_K_MCU_444_LUT; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 8 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 8; + JPEG_ConvertorParams.V_factor = 8; + + if(JPEG_ConvertorParams.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + JPEG_ConvertorParams.BlockSize = YCBCR_444_BLOCK_SIZE; + } + if(JPEG_ConvertorParams.ColorSpace == JPEG_CMYK_COLORSPACE) + { + JPEG_ConvertorParams.BlockSize = CMYK_444_BLOCK_SIZE; + } + else if(JPEG_ConvertorParams.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) + { + JPEG_ConvertorParams.BlockSize = GRAY_444_BLOCK_SIZE; + } + + } + else + { + return HAL_ERROR; /* Not supported*/ + } + + + JPEG_ConvertorParams.WidthExtend = JPEG_ConvertorParams.ImageWidth + JPEG_ConvertorParams.LineOffset; + JPEG_ConvertorParams.ScaledWidth = JPEG_BYTES_PER_PIXEL * JPEG_ConvertorParams.ImageWidth; + + hMCU = (JPEG_ConvertorParams.ImageWidth / JPEG_ConvertorParams.H_factor); + if((JPEG_ConvertorParams.ImageWidth % JPEG_ConvertorParams.H_factor) != 0) + { + hMCU++; /*+1 for horizenatl incomplete MCU */ + } + + vMCU = (JPEG_ConvertorParams.ImageHeight / JPEG_ConvertorParams.V_factor); + if((JPEG_ConvertorParams.ImageHeight % JPEG_ConvertorParams.V_factor) != 0) + { + vMCU++; /*+1 for vertical incomplete MCU */ + } + JPEG_ConvertorParams.MCU_Total_Nb = (hMCU * vMCU); + *ImageNbMCUs = JPEG_ConvertorParams.MCU_Total_Nb; + + return HAL_OK; +} +#endif /* USE_JPEG_ENCODER == 1 */ + +#if (USE_JPEG_DECODER == 1) +/** + * @brief Convert YCbCr 4:2:0 blocks to RGB pixels + * @param pInBuffer : pointer to input YCbCr blocks buffer. + * @param pOutBuffer : pointer to output RGB888/ARGB8888 frame buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from YCbCr to RGB + */ + +static uint32_t JPEG_MCU_YCbCr420_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j,k, currentMCU, xRef,yRef; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp; + + int32_t c_red, c_blue, c_green; + + uint8_t *pOutAddr, *pOutAddr2; + uint8_t *pChrom, *pLum; + + numberMCU = DataCount / YCBCR_420_BLOCK_SIZE; + currentMCU = BlockIndex; + + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *16) / JPEG_ConvertorParams.WidthExtend)*16; + + yRef = ((currentMCU *16) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + pChrom = pInBuffer + 256; /* pChroma = pInBuffer + 4*64 */ + + pLum = pInBuffer; + + for(i= 0; i < 16; i+=2) + { + if(i == 8) + { + pLum = pInBuffer + 128; + } + + if(refline < JPEG_ConvertorParams.ImageSize_Bytes) + { + pOutAddr = pOutBuffer + refline; + pOutAddr2 = pOutAddr + JPEG_ConvertorParams.ScaledWidth; + + for(k= 0; k<2; k++) + { + for(j=0; j < 8; j+=2) + { + cbcomp = (int32_t)(*(pChrom)); + c_blue = (int32_t)(*(CB_BLUE_LUT + cbcomp)); + + crcomp = (int32_t)(*(pChrom + 64)); + c_red = (int32_t)(*(CR_RED_LUT + crcomp)); + + c_green = ((int32_t)(*(CR_GREEN_LUT + crcomp)) + (int32_t)(*(CB_GREEN_LUT + cbcomp))) >> 16; + + + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint32_t *)pOutAddr = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + *((__IO uint32_t *)(pOutAddr + 4)) = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8)); + + *(__IO uint32_t *)pOutAddr2 = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8 +1)); + + *((__IO uint32_t *)(pOutAddr2 +4)) = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + + +#elif (JPEG_RGB_FORMAT == JPEG_RGB888) + + ycomp = (int32_t)(*(pLum +j)); + + pOutAddr[JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr[JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr[JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + pOutAddr[3 + JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr[3 + JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr[3 + JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8)); + + pOutAddr2[JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr2[JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr2[JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8 +1)); + + pOutAddr2[3+ JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr2[3 + JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr2[3 + JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint16_t *)pOutAddr = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + *((__IO uint16_t *)(pOutAddr + 2)) = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8)); + + *(__IO uint16_t *)pOutAddr2 = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + + /**********/ + ycomp = (int32_t)(*(pLum +j +8 +1)); + + *((__IO uint16_t *)(pOutAddr2 +2)) = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); +#endif /* JPEG_RGB_FORMAT */ + + pOutAddr += JPEG_BYTES_PER_PIXEL * 2; + pOutAddr2 += JPEG_BYTES_PER_PIXEL * 2; + + pChrom++; + } + pLum += 64; + } + + pLum = pLum - 128 + 16; + + refline += 2*JPEG_ConvertorParams.ScaledWidth; + } + } + + pInBuffer += YCBCR_420_BLOCK_SIZE; + } + return numberMCU; +} + +/** + * @brief Convert YCbCr 4:2:2 blocks to RGB pixels + * @param pInBuffer : pointer to input YCbCr blocks buffer. + * @param pOutBuffer : pointer to output RGB888/ARGB8888 frame buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from YCbCr to RGB + */ +static uint32_t JPEG_MCU_YCbCr422_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j,k, currentMCU, xRef,yRef; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp; + + int32_t c_red, c_blue, c_green; + + uint8_t *pOutAddr; + uint8_t *pChrom, *pLum; + + numberMCU = DataCount / YCBCR_422_BLOCK_SIZE; + currentMCU = BlockIndex; + + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *16) / JPEG_ConvertorParams.WidthExtend)*8; + + yRef = ((currentMCU *16) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + pChrom = pInBuffer + 128; /* pChroma = pInBuffer + 2*64 */ + + pLum = pInBuffer; + + for(i= 0; i < 8; i++) + { + if(refline < JPEG_ConvertorParams.ImageSize_Bytes) + { + pOutAddr = pOutBuffer + refline; + + for(k= 0; k<2; k++) + { + for(j=0; j < 8; j+=2) + { + cbcomp = (int32_t)(*(pChrom)); + c_blue = (int32_t)(*(CB_BLUE_LUT + cbcomp)); + + crcomp = (int32_t)(*(pChrom + 64)); + c_red = (int32_t)(*(CR_RED_LUT + crcomp)); + + c_green = ((int32_t)(*(CR_GREEN_LUT + crcomp)) + (int32_t)(*(CB_GREEN_LUT + cbcomp))) >> 16; + + + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint32_t *)pOutAddr = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + *((__IO uint32_t *)(pOutAddr + 4)) = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB888) + + ycomp = (int32_t)(*(pLum +j)); + + pOutAddr[JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr[JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr[JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + pOutAddr[3 + JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr[3 + JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr[3 + JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint16_t *)pOutAddr = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + /**********/ + ycomp = (int32_t)(*(pLum +j +1)); + + *((__IO uint16_t *)(pOutAddr + 2)) = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + +#endif /* JPEG_RGB_FORMAT*/ + + pOutAddr += JPEG_BYTES_PER_PIXEL * 2; + + pChrom++; + } + pLum += 64; + } + + pLum = pLum - 128 + 8; + + refline += JPEG_ConvertorParams.ScaledWidth; + } + } + + pInBuffer += YCBCR_422_BLOCK_SIZE; + } + return numberMCU; +} + +/** + * @brief Convert YCbCr 4:4:4 blocks to RGB pixels + * @param pInBuffer : pointer to input YCbCr blocks buffer. + * @param pOutBuffer : pointer to output RGB888/ARGB8888 frame buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from YCbCr to RGB + */ +static uint32_t JPEG_MCU_YCbCr444_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef; + + uint32_t refline; + int32_t ycomp, crcomp, cbcomp; + + int32_t c_red, c_blue, c_green; + + uint8_t *pOutAddr; + uint8_t *pChrom, *pLum; + + numberMCU = DataCount / YCBCR_444_BLOCK_SIZE; + currentMCU = BlockIndex; + + + while(currentMCU < (numberMCU + BlockIndex)) + { + + xRef = ((currentMCU *8) / JPEG_ConvertorParams.WidthExtend)*8; + + yRef = ((currentMCU *8) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + pChrom = pInBuffer + 64; /* pChroma = pInBuffer + 4*64 */ + + pLum = pInBuffer; + + for(i= 0; i < 8; i++) + { + if(refline < JPEG_ConvertorParams.ImageSize_Bytes) + { + pOutAddr = pOutBuffer+ refline; + + + for(j=0; j < 8; j++) + { + cbcomp = (int32_t)(*pChrom); + c_blue = (int32_t)(*(CB_BLUE_LUT + cbcomp)); + + crcomp = (int32_t)(*(pChrom + 64)); + c_red = (int32_t)(*(CR_RED_LUT + crcomp)); + + c_green = ((int32_t)(*(CR_GREEN_LUT + crcomp)) + (int32_t)(*(CB_GREEN_LUT + cbcomp))) >> 16; + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint32_t *)pOutAddr = + (CLAMP(ycomp + c_red) << JPEG_RED_OFFSET) | \ + (CLAMP( ycomp + c_green) << JPEG_GREEN_OFFSET) | \ + (CLAMP(ycomp + c_blue) << JPEG_BLUE_OFFSET); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB888) + + ycomp = (int32_t)(*(pLum +j)); + + pOutAddr[JPEG_RED_OFFSET/8] = CLAMP(ycomp + c_red); + pOutAddr[JPEG_GREEN_OFFSET/8] = CLAMP(ycomp + c_green); + pOutAddr[JPEG_BLUE_OFFSET/8] = CLAMP(ycomp + c_blue); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + + ycomp = (int32_t)(*(pLum +j)); + + *(__IO uint16_t *)pOutAddr = + ((CLAMP(ycomp + c_red) >> 3) << JPEG_RED_OFFSET) | \ + ((CLAMP( ycomp + c_green) >> 2) << JPEG_GREEN_OFFSET) | \ + ((CLAMP(ycomp + c_blue) >> 3) << JPEG_BLUE_OFFSET); + +#endif /* JPEG_RGB_FORMAT */ + + pOutAddr += JPEG_BYTES_PER_PIXEL; + + pChrom++; + } + pLum += 8; + + refline += JPEG_ConvertorParams.ScaledWidth; + } + } + + pInBuffer += YCBCR_444_BLOCK_SIZE; + } + return numberMCU; +} + +/** + * @brief Convert Y Gray blocks to RGB pixels + * @param pInBuffer : pointer to input Luminance Y blocks buffer. + * @param pOutBuffer : pointer to output RGB888/ARGB8888 frame buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from YCbCr to RGB + */ +static uint32_t JPEG_MCU_Gray_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t currentMCU, xRef,yRef; + uint32_t refline; + + + uint32_t i,j, ySample; + uint8_t *pOutAddr, *pLum; + + + numberMCU = DataCount / GRAY_444_BLOCK_SIZE; + currentMCU = BlockIndex; + + while(currentMCU < (numberMCU + BlockIndex)) + { + xRef = ((currentMCU *8) / JPEG_ConvertorParams.WidthExtend)*8; + + yRef = ((currentMCU *8) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + pLum = pInBuffer; + + for(i= 0; i < 8; i++) + { + pOutAddr = pOutBuffer + refline; + if(refline < JPEG_ConvertorParams.ImageSize_Bytes) + { + for(j=0; j < 8; j++) + { + ySample = (uint32_t)(*pLum); + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + + *(__IO uint32_t *)pOutAddr = ySample | (ySample << 8) | (ySample << 16); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB888) + + pOutAddr[0] = ySample; + pOutAddr[1] = ySample; + pOutAddr[2] = ySample; + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + + *(__IO uint16_t *)pOutAddr = ((ySample >> 3) << 11) | ((ySample >> 2) << 5) | (ySample >> 3); + +#endif /* JPEG_RGB_FORMAT */ + + pOutAddr += JPEG_BYTES_PER_PIXEL; + pLum++; + } + + refline += JPEG_ConvertorParams.ScaledWidth; + } + } + + pInBuffer += GRAY_444_BLOCK_SIZE; + } + return numberMCU; +} + +/** + * @brief Convert CMYK blocks to RGB pixels + * @param pInBuffer : pointer to input CMYK blocks buffer. + * @param pOutBuffer : pointer to output RGB888/ARGB8888 frame buffer. + * @param BlockIndex : index of the input buffer first block in the final image. + * @param DataCount : number of bytes in the input buffer . + * @param ConvertedDataCount : number of converted bytes from input buffer. + * @retval Number of blcoks converted from CMYK to RGB + */ +static uint32_t JPEG_MCU_YCCK_ARGB_ConvertBlocks(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount) +{ + uint32_t numberMCU; + uint32_t i,j, currentMCU, xRef,yRef; + + uint32_t refline; + int32_t color_k; + + int32_t c_red, c_blue, c_green; + + uint8_t *pOutAddr, *pChrom; + + numberMCU = DataCount / CMYK_444_BLOCK_SIZE; + currentMCU = BlockIndex; + + + while(currentMCU < (numberMCU + BlockIndex)) + { + + xRef = ((currentMCU *8) / JPEG_ConvertorParams.WidthExtend)*8; + + yRef = ((currentMCU *8) % JPEG_ConvertorParams.WidthExtend); + + refline = JPEG_ConvertorParams.ScaledWidth * xRef + (JPEG_BYTES_PER_PIXEL*yRef); + + currentMCU++; + + pChrom = pInBuffer; + + for(i= 0; i < 8; i++) + { + if(refline < JPEG_ConvertorParams.ImageSize_Bytes) + { + pOutAddr = pOutBuffer+ refline; + + for(j=0; j < 8; j++) + { + color_k = (int32_t)(*(pChrom + 192)); + c_red = (color_k * ((int32_t)(*pChrom)))/255; + + c_green = (color_k * (int32_t)(*(pChrom + 64)))/255; + + c_blue = (color_k * (int32_t)(*(pChrom + 128)))/255; + +#if (JPEG_RGB_FORMAT == JPEG_ARGB8888) + + *(__IO uint32_t *)pOutAddr = + (c_red << JPEG_RED_OFFSET) | \ + (c_green << JPEG_GREEN_OFFSET) | \ + (c_blue << JPEG_BLUE_OFFSET); + +#elif (JPEG_RGB_FORMAT == JPEG_RGB888) + + pOutAddr[JPEG_RED_OFFSET/8] = c_red; + pOutAddr[JPEG_GREEN_OFFSET/8] = c_green; + pOutAddr[JPEG_BLUE_OFFSET/8] = c_blue; + +#elif (JPEG_RGB_FORMAT == JPEG_RGB565) + + *(__IO uint16_t *)pOutAddr = + ((c_red >> 3) << JPEG_RED_OFFSET) | \ + ((c_green >> 2) << JPEG_GREEN_OFFSET) | \ + ((c_blue >> 3) << JPEG_BLUE_OFFSET); + +#endif /* JPEG_RGB_FORMAT */ + + pOutAddr += JPEG_BYTES_PER_PIXEL; + + pChrom++; + } + + refline += JPEG_ConvertorParams.ScaledWidth; + } + } + + pInBuffer += CMYK_444_BLOCK_SIZE; + } + return numberMCU; +} + +/** + * @brief Retrive Decoding YCbCr to RGB color conversion function and block number + * @param pJpegInfo : JPEG_ConfTypeDef that contains the JPEG image informations. + * These info are available in the HAL callback "HAL_JPEG_InfoReadyCallback". + * @param pFunction : pointer to JPEG_YCbCrToRGB_Convert_Function , used to retrive the color conversion function + * depending of the jpeg image color space and chroma sampling info. + * @param ImageNbMCUs : pointer to uint32_t, used to retrive the total number of MCU blocks in the jpeg image. + * @retval HAL status : HAL_OK or HAL_ERROR. + */ +HAL_StatusTypeDef JPEG_GetDecodeColorConvertFunc(JPEG_ConfTypeDef *pJpegInfo, JPEG_YCbCrToRGB_Convert_Function *pFunction, uint32_t *ImageNbMCUs) +{ + uint32_t hMCU, vMCU; + + JPEG_ConvertorParams.ColorSpace = pJpegInfo->ColorSpace; + JPEG_ConvertorParams.ImageWidth = pJpegInfo->ImageWidth; + JPEG_ConvertorParams.ImageHeight = pJpegInfo->ImageHeight; + JPEG_ConvertorParams.ImageSize_Bytes = pJpegInfo->ImageWidth * pJpegInfo->ImageHeight * JPEG_BYTES_PER_PIXEL; + + JPEG_ConvertorParams.ChromaSubsampling = pJpegInfo->ChromaSubsampling; + if(JPEG_ConvertorParams.ColorSpace == JPEG_YCBCR_COLORSPACE) + { + if(JPEG_ConvertorParams.ChromaSubsampling == JPEG_420_SUBSAMPLING) + { + *pFunction = JPEG_MCU_YCbCr420_ARGB_ConvertBlocks; + + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 16; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 16 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 16; + JPEG_ConvertorParams.V_factor = 16; + } + else if(JPEG_ConvertorParams.ChromaSubsampling == JPEG_422_SUBSAMPLING) + { + *pFunction = JPEG_MCU_YCbCr422_ARGB_ConvertBlocks; + + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 16; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 16 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 16; + JPEG_ConvertorParams.V_factor = 8; + } + else /*4:4:4*/ + { + *pFunction = JPEG_MCU_YCbCr444_ARGB_ConvertBlocks; + + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 8; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 8 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 8; + JPEG_ConvertorParams.V_factor = 8; + } + } + else if(JPEG_ConvertorParams.ColorSpace == JPEG_GRAYSCALE_COLORSPACE) + { + *pFunction = JPEG_MCU_Gray_ARGB_ConvertBlocks; + + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 8; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 8 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 8; + JPEG_ConvertorParams.V_factor = 8; + } + else if(JPEG_ConvertorParams.ColorSpace == JPEG_CMYK_COLORSPACE) + { + *pFunction = JPEG_MCU_YCCK_ARGB_ConvertBlocks; + + JPEG_ConvertorParams.LineOffset = JPEG_ConvertorParams.ImageWidth % 8; + + if(JPEG_ConvertorParams.LineOffset != 0) + { + JPEG_ConvertorParams.LineOffset = 8 - JPEG_ConvertorParams.LineOffset; + } + JPEG_ConvertorParams.H_factor = 8; + JPEG_ConvertorParams.V_factor = 8; + } + else + { + return HAL_ERROR; /* Color space Not supported*/ + } + + JPEG_ConvertorParams.WidthExtend = JPEG_ConvertorParams.ImageWidth + JPEG_ConvertorParams.LineOffset; + JPEG_ConvertorParams.ScaledWidth = JPEG_BYTES_PER_PIXEL * JPEG_ConvertorParams.ImageWidth; + + hMCU = (JPEG_ConvertorParams.ImageWidth / JPEG_ConvertorParams.H_factor); + if((JPEG_ConvertorParams.ImageWidth % JPEG_ConvertorParams.H_factor) != 0) + { + hMCU++; /*+1 for horizenatl incomplete MCU */ + } + + vMCU = (JPEG_ConvertorParams.ImageHeight / JPEG_ConvertorParams.V_factor); + if((JPEG_ConvertorParams.ImageHeight % JPEG_ConvertorParams.V_factor) != 0) + { + vMCU++; /*+1 for vertical incomplete MCU */ + } + JPEG_ConvertorParams.MCU_Total_Nb = (hMCU * vMCU); + *ImageNbMCUs = JPEG_ConvertorParams.MCU_Total_Nb; + + return HAL_OK; +} + +/** + * @brief Initializes the YCbCr -> RGB colors conversion Look Up Tables + * @param None + * @retval None + */ +void JPEG_InitPostProcColorTables(void) +{ + int32_t index, i; + + for (i = 0; i <= 255; i++) + { + index = (i * 2) - 256; + CR_RED_LUT[i] = ( (((int32_t) ((1.40200 / 2) * (1L << 16))) * index) + ((int32_t) 1 << (16 - 1))) >> 16; + + CB_BLUE_LUT[i] = ( (((int32_t) ((1.77200 / 2) * (1L << 16))) * index) + ((int32_t) 1 << (16 - 1))) >> 16; + + CR_GREEN_LUT[i] = (-((int32_t) ((0.71414 / 2) * (1L << 16)))) * index; + CB_GREEN_LUT[i] = (-((int32_t) ((0.34414 / 2) * (1L << 16)))) * index; + } +} +#endif /* USE_JPEG_DECODER == 1 */ + +/** + * @brief Initializes the colors conversion Look Up Tables + * @param None + * @retval None + */ +void JPEG_InitColorTables(void) +{ + +#if (USE_JPEG_ENCODER == 1) + JPEG_InitPreProcColorTables(); + JPEG_Init_MCU_LUT(); +#endif + +#if (USE_JPEG_DECODER == 1) + JPEG_InitPostProcColorTables(); +#endif + +} + +#if (USE_JPEG_ENCODER == 1) +/** + * @brief Initializes the RGB -> YCbCr colors conversion Look Up Tables + * @param None + * @retval None + */ +void JPEG_InitPreProcColorTables(void) +{ + int32_t i; + + for (i = 0; i <= 255; i++) + { + RED_Y_LUT[i] = (( ((int32_t) ((0.299 ) * (1L << 16))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + GREEN_Y_LUT[i] = (( ((int32_t) ((0.587 ) * (1L << 16))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + BLUE_Y_LUT[i] = (( ((int32_t) ((0.114 ) * (1L << 16))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + + RED_CB_LUT[i] = (((-((int32_t) ((0.1687 ) * (1L << 16)))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + GREEN_CB_LUT[i] = (((-((int32_t) ((0.3313 ) * (1L << 16)))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + + /* BLUE_CB_LUT and RED_CR_LUT are identical */ + BLUE_CB_RED_CR_LUT[i] = (( ((int32_t) ((0.5 ) * (1L << 16))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + + GREEN_CR_LUT[i] = (((-((int32_t) ((0.4187 ) * (1L << 16)))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + BLUE_CR_LUT[i] = (((-((int32_t) ((0.0813 ) * (1L << 16)))) * i) + ((int32_t) 1 << (16 - 1))) >> 16 ; + } +} + +/** + * @brief Initializes the MCU Look Up Tables + * @param None + * @retval None + */ +void JPEG_Init_MCU_LUT(void) +{ + uint32_t i, j, offset; + + /*Y LUT */ + for(i = 0; i < 16; i++) + { + for(j = 0; j < 16; j++) + { + offset = j + (i*8); + if((j>=8) && (i>=8)) offset+= 120; + else if((j>=8) && (i<8)) offset+= 56; + else if((j<8) && (i>=8)) offset+= 64; + + JPEG_Y_MCU_LUT[i*16 + j] = offset; + } + } + + /*Cb Cr K LUT*/ + for(i = 0; i < 16; i++) + { + for(j = 0; j < 16; j++) + { + offset = i*16 + j; + + JPEG_Cb_MCU_420_LUT[offset] = (j/2) + ((i/2)*8) + 256; + JPEG_Cb_MCU_422_LUT[offset] = (j/2) + (i*8) + 128; + + JPEG_Cr_MCU_420_LUT[offset] = (j/2) + ((i/2)*8) + 320; + JPEG_Cr_MCU_422_LUT[offset] = (j/2) + (i*8) + 192; + + JPEG_K_MCU_420_LUT[offset] = (j/2) + ((i/2)*8) + 384; + JPEG_K_MCU_422_LUT[offset] = (j/2) + ((i/2)*8) + 256; + } + } + + for(i = 0; i < 8; i++) + { + for(j = 0; j < 8; j++) + { + offset = i*8 + j; + + JPEG_Y_MCU_444_LUT[offset] = offset; + JPEG_Cb_MCU_444_LUT[offset] = offset + 64 ; + JPEG_Cr_MCU_444_LUT[offset] = offset + 128 ; + JPEG_K_MCU_444_LUT[offset] = offset + 192 ; + } + } +} + +/** + * @brief Initializes the YCCK format K Blocks + * @param ChromaSampling + * @retval pMCUBuffer + */ +static uint8_t *JPEG_Set_K_Blocks(uint8_t *pMCUBuffer, uint8_t pKBlocks[16][16], uint32_t ChromaSampling) +{ + uint32_t i,j; + + if(ChromaSampling == JPEG_420_SUBSAMPLING) + { + /* 4:2:0 4Y + Cb + Cr + K*/ + + pMCUBuffer = pMCUBuffer + 384; + /* K block */ + for(i=0;i<16;i+=2) + { + for(j=0;j<16;j+=2) + { + *pMCUBuffer = pKBlocks[i][j]; + pMCUBuffer++; + } + } + } + else if(ChromaSampling == JPEG_422_SUBSAMPLING) + { + /* 4:2:2 2Y horizental + Cb + Cr + K*/ + pMCUBuffer = pMCUBuffer + 256; + + /* K block */ + for(i=0;i<8;i++) + { + for(j=0;j<16;j+=2) + { + *pMCUBuffer = pKBlocks[i][j]; + pMCUBuffer++; + } + } + } + else if(ChromaSampling == JPEG_444_SUBSAMPLING) + { + /* 4:4:4 Y + Cb + Cr + K*/ + + /* K block */ + pMCUBuffer = pMCUBuffer + 192; + for(i=0;i<8;i++) + { + for(j=0;j<8;j++) + { + *pMCUBuffer = pKBlocks[i][j]; + pMCUBuffer++; + } + } + } + return pMCUBuffer; +} +#endif /* USE_JPEG_ENCODER == 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.h b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.h new file mode 100644 index 000000000..28df78a37 --- /dev/null +++ b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils.h @@ -0,0 +1,122 @@ +/** + ****************************************************************************** + * @file jpeg_utils.h + * @author MCD Application Team + * @version V2.0.0 + * @date 3-June-2016 + * @brief Header for jpeg_utils.c module + ****************************************************************************** + * @attention + * + *

© COPYRIGHT(c) 2016 STMicroelectronics

+ * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of STMicroelectronics nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __JPEG_UTILS_H +#define __JPEG_UTILS_H + +/* Includes ------------------------------------------------------------------*/ +#include "jpeg_utils_conf.h" + +/** @addtogroup Utilities + * @{ + */ + +/** @addtogroup JPEG + * @{ + */ + +/** @defgroup JPEG_Exported_Defines JPEG Exported Defines + * @{ + */ +/** +* @} +*/ + +/** @defgroup JPEG_Exported_Types JPEG Exported Types + * @{ + */ +#if (USE_JPEG_DECODER == 1) +typedef uint32_t (* JPEG_YCbCrToRGB_Convert_Function)(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount ); +#endif + +#if (USE_JPEG_ENCODER == 1) +typedef uint32_t (* JPEG_RGBToYCbCr_Convert_Function)(uint8_t *pInBuffer, + uint8_t *pOutBuffer, + uint32_t BlockIndex, + uint32_t DataCount, + uint32_t *ConvertedDataCount ); +#endif +/** +* @} +*/ + +/** @defgroup JPEG_Exported_Macros JPEG Exported Macros + * @{ + */ +/** +* @} +*/ + +/** @defgroup JPEG_Exported_Variables JPEG Exported Variables + * @{ + */ +/** +* @} +*/ + +/** @defgroup JPEG_Exported_FunctionsPrototype JPEG Exported FunctionsPrototype + * @{ + */ +void JPEG_InitColorTables(void); + +#if (USE_JPEG_DECODER == 1) +HAL_StatusTypeDef JPEG_GetDecodeColorConvertFunc(JPEG_ConfTypeDef *pJpegInfo, JPEG_YCbCrToRGB_Convert_Function *pFunction, uint32_t *ImageNbMCUs); +#endif + +#if (USE_JPEG_ENCODER == 1) +HAL_StatusTypeDef JPEG_GetEncodeColorConvertFunc(JPEG_ConfTypeDef *pJpegInfo, JPEG_RGBToYCbCr_Convert_Function *pFunction, uint32_t *ImageNbMCUs); +#endif +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* __JPEG_UTILS_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils_conf_template.h b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils_conf_template.h new file mode 100644 index 000000000..2d63e6df0 --- /dev/null +++ b/src/third_party/stm32_cubeide/Utilities/JPEG/jpeg_utils_conf_template.h @@ -0,0 +1,85 @@ +/** + ****************************************************************************** + * @file jpeg_utils_conf_template.h + * @author MCD Application Team + * @version V2.0.0 + * @date 3-June-2016 + * @brief jpeg_utils configuration template file. + * This file should be copied to the application folder and modified + * as follows: + * - Rename it to 'jpeg_utils_conf.h'. + * - Update the name of the JPEG driver's header file, depending on + * the EVAL board you are using, see lines 51 and 52 below (by default this + * file will generate compile error unless you do this modification). + ****************************************************************************** + * @attention + * + *

© COPYRIGHT(c) 2016 STMicroelectronics

+ * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. Neither the name of STMicroelectronics nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ + +#ifndef __JPEG_UTILS_CONF_H__ +#define __JPEG_UTILS_CONF_H__ + + +/* Includes ------------------------------------------------------------------*/ +/*#include "stm32fXxx_hal.h" */ +/* #include "stm32fXxx_hal_jpeg.h" */ + +/* Private define ------------------------------------------------------------*/ +/** @addtogroup JPEG_Private_Defines + * @{ + */ +/* RGB Color format definition for JPEG encoding/Decoding : Should not be modified*/ +#define JPEG_ARGB8888 0 /* ARGB8888 Color Format */ +#define JPEG_RGB888 1 /* RGB888 Color Format */ +#define JPEG_RGB565 2 /* RGB565 Color Format */ + +/** +* @} +*/ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @addtogroup JPEG_Exported_Defines + * @{ + */ +#define USE_JPEG_DECODER 1 /* Enable Decoding Post-Processing functions (YCbCr to RGB conversion) */ +#define USE_JPEG_ENCODER 1 /* Enable Encoding Pre-Processing functions (RGB to YCbCr conversion)*/ + +#define JPEG_RGB_FORMAT JPEG_ARGB8888 /* Select RGB format: ARGB8888, RGB888, RBG_565 */ +#define JPEG_SWAP_RB 0 /* Change color order to BGR */ + +/** +* @} +*/ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions ------------------------------------------------------- */ + +#endif /* __JPEG_UTILS_CONF_H__ */ diff --git a/src/third_party/stm32_cubeide/bb3.ioc b/src/third_party/stm32_cubeide/bb3.ioc new file mode 100644 index 000000000..ed0fa1b95 --- /dev/null +++ b/src/third_party/stm32_cubeide/bb3.ioc @@ -0,0 +1,942 @@ +#MicroXplorer Configuration settings - do not modify +ADC1.Channel-0\#ChannelRegularConversion=ADC_CHANNEL_VBAT +ADC1.ContinuousConvMode=DISABLE +ADC1.IPParameters=Rank-0\#ChannelRegularConversion,Channel-0\#ChannelRegularConversion,SamplingTime-0\#ChannelRegularConversion,NbrOfConversionFlag,master,ContinuousConvMode,ScanConvMode +ADC1.NbrOfConversionFlag=1 +ADC1.Rank-0\#ChannelRegularConversion=1 +ADC1.SamplingTime-0\#ChannelRegularConversion=ADC_SAMPLETIME_15CYCLES +ADC1.ScanConvMode=ADC_SCAN_DISABLE +ADC1.master=1 +DAC.DAC_Trigger=DAC_TRIGGER_T6_TRGO +DAC.IPParameters=DAC_Trigger +DMA2D.ColorMode=DMA2D_OUTPUT_RGB565 +DMA2D.IPParameters=ColorMode +Dma.DAC1.0.Direction=DMA_MEMORY_TO_PERIPH +Dma.DAC1.0.FIFOMode=DMA_FIFOMODE_DISABLE +Dma.DAC1.0.Instance=DMA1_Stream5 +Dma.DAC1.0.MemDataAlignment=DMA_MDATAALIGN_BYTE +Dma.DAC1.0.MemInc=DMA_MINC_ENABLE +Dma.DAC1.0.Mode=DMA_NORMAL +Dma.DAC1.0.PeriphDataAlignment=DMA_PDATAALIGN_BYTE +Dma.DAC1.0.PeriphInc=DMA_PINC_DISABLE +Dma.DAC1.0.Priority=DMA_PRIORITY_LOW +Dma.DAC1.0.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode +Dma.Request0=DAC1 +Dma.Request1=SDMMC1_RX +Dma.Request2=SDMMC1_TX +Dma.RequestsNb=3 +Dma.SDMMC1_RX.1.Direction=DMA_PERIPH_TO_MEMORY +Dma.SDMMC1_RX.1.FIFOMode=DMA_FIFOMODE_ENABLE +Dma.SDMMC1_RX.1.FIFOThreshold=DMA_FIFO_THRESHOLD_FULL +Dma.SDMMC1_RX.1.Instance=DMA2_Stream3 +Dma.SDMMC1_RX.1.MemBurst=DMA_MBURST_INC4 +Dma.SDMMC1_RX.1.MemDataAlignment=DMA_MDATAALIGN_WORD +Dma.SDMMC1_RX.1.MemInc=DMA_MINC_ENABLE +Dma.SDMMC1_RX.1.Mode=DMA_PFCTRL +Dma.SDMMC1_RX.1.PeriphBurst=DMA_PBURST_INC4 +Dma.SDMMC1_RX.1.PeriphDataAlignment=DMA_PDATAALIGN_WORD +Dma.SDMMC1_RX.1.PeriphInc=DMA_PINC_DISABLE +Dma.SDMMC1_RX.1.Priority=DMA_PRIORITY_LOW +Dma.SDMMC1_RX.1.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode,FIFOThreshold,MemBurst,PeriphBurst +Dma.SDMMC1_TX.2.Direction=DMA_MEMORY_TO_PERIPH +Dma.SDMMC1_TX.2.FIFOMode=DMA_FIFOMODE_ENABLE +Dma.SDMMC1_TX.2.FIFOThreshold=DMA_FIFO_THRESHOLD_FULL +Dma.SDMMC1_TX.2.Instance=DMA2_Stream6 +Dma.SDMMC1_TX.2.MemBurst=DMA_MBURST_INC4 +Dma.SDMMC1_TX.2.MemDataAlignment=DMA_MDATAALIGN_WORD +Dma.SDMMC1_TX.2.MemInc=DMA_MINC_ENABLE +Dma.SDMMC1_TX.2.Mode=DMA_PFCTRL +Dma.SDMMC1_TX.2.PeriphBurst=DMA_PBURST_INC4 +Dma.SDMMC1_TX.2.PeriphDataAlignment=DMA_PDATAALIGN_WORD +Dma.SDMMC1_TX.2.PeriphInc=DMA_PINC_DISABLE +Dma.SDMMC1_TX.2.Priority=DMA_PRIORITY_LOW +Dma.SDMMC1_TX.2.RequestParameters=Instance,Direction,PeriphInc,MemInc,PeriphDataAlignment,MemDataAlignment,Mode,Priority,FIFOMode,FIFOThreshold,MemBurst,PeriphBurst +ETH.AutoNegotiation=ETH_AUTONEGOTIATION_DISABLE +ETH.IPParameters=MediaInterface,PhyAddress,AutoNegotiation +ETH.MediaInterface=ETH_MEDIA_INTERFACE_MII +ETH.PhyAddress=2 +FATFS.IPParameters=_USE_FIND,_USE_LFN,_FS_LOCK,_FS_REENTRANT,_USE_STRFUNC +FATFS._FS_LOCK=0 +FATFS._FS_REENTRANT=1 +FATFS._USE_FIND=1 +FATFS._USE_LFN=2 +FATFS._USE_STRFUNC=0 +FMC.CASLatency1=FMC_SDRAM_CAS_LATENCY_3 +FMC.ExitSelfRefreshDelay1=7 +FMC.IPParameters=CASLatency1,SDClockPeriod1,SDClockPeriod2,ReadPipeDelay1,ReadPipeDelay2,LoadToActiveDelay1,ExitSelfRefreshDelay1,SelfRefreshTime1,RowCycleDelay1,RowCycleDelay2,WriteRecoveryTime1,RPDelay1,RPDelay2,RCDDelay1 +FMC.LoadToActiveDelay1=2 +FMC.RCDDelay1=2 +FMC.RPDelay1=2 +FMC.RPDelay2=2 +FMC.ReadPipeDelay1=FMC_SDRAM_RPIPE_DELAY_1 +FMC.ReadPipeDelay2=FMC_SDRAM_RPIPE_DELAY_1 +FMC.RowCycleDelay1=7 +FMC.RowCycleDelay2=7 +FMC.SDClockPeriod1=FMC_SDRAM_CLOCK_PERIOD_2 +FMC.SDClockPeriod2=FMC_SDRAM_CLOCK_PERIOD_2 +FMC.SelfRefreshTime1=4 +FMC.WriteRecoveryTime1=3 +FREERTOS.IPParameters=Tasks01,configTOTAL_HEAP_SIZE,configMINIMAL_STACK_SIZE +FREERTOS.Tasks01=defaultTask,0,128,StartDefaultTask,Default,NULL +FREERTOS.configMINIMAL_STACK_SIZE=1024 +FREERTOS.configTOTAL_HEAP_SIZE=131072 +File.Version=6 +GPIO.groupedBy=Group By Peripherals +I2C1.I2C_Speed_Mode=I2C_Fast +I2C1.IPParameters=Timing,Speed,I2C_Speed_Mode +I2C1.Speed=400 +I2C1.Timing=0x6000030D +IWDG.IPParameters=Prescaler +IWDG.Prescaler=IWDG_PRESCALER_16 +JPEG.IPParameters=JPEG_RGB_FORMAT +JPEG.JPEG_RGB_FORMAT=JPEG_RGB565 +KeepUserPlacement=false +LIBJPEG.IPParameters=LIBJPEG_FS_type +LIBJPEG.LIBJPEG_FS_type=NoFS +LTDC.ActiveH=272 +LTDC.ActiveW=480 +LTDC.Alpha0_L0=255 +LTDC.Alpha_L0=255 +LTDC.HBP=43 +LTDC.HFP=8 +LTDC.HSPolarity=LTDC_HSPOLARITY_AH +LTDC.HSync=51 +LTDC.IPParameters=ActiveW,ActiveH,HSync,HBP,HFP,VSync,VBP,VFP,HSPolarity,Layers,WindowX1_L0,WindowY1_L0,PixelFormat_L0,Alpha_L0,Alpha0_L0,ImageWidth_L0,ImageHeight_L0 +LTDC.ImageHeight_L0=272 +LTDC.ImageWidth_L0=480 +LTDC.Layers=0 +LTDC.PixelFormat_L0=LTDC_PIXEL_FORMAT_RGB565 +LTDC.VBP=12 +LTDC.VFP=8 +LTDC.VSync=20 +LTDC.WindowX1_L0=480 +LTDC.WindowY1_L0=272 +LWIP.IPParameters=LWIP_DNS,LWIP_SNTP +LWIP.LWIP_DNS=1 +LWIP.LWIP_SNTP=1 +LWIP.Version=v2.1.2_Cube +Mcu.Family=STM32F7 +Mcu.IP0=ADC1 +Mcu.IP1=CORTEX_M7 +Mcu.IP10=I2C1 +Mcu.IP11=IWDG +Mcu.IP12=JPEG +Mcu.IP13=LIBJPEG +Mcu.IP14=LTDC +Mcu.IP15=LWIP +Mcu.IP16=NVIC +Mcu.IP17=RCC +Mcu.IP18=RNG +Mcu.IP19=RTC +Mcu.IP2=CRC +Mcu.IP20=SDMMC1 +Mcu.IP21=SPI2 +Mcu.IP22=SPI4 +Mcu.IP23=SPI5 +Mcu.IP24=SYS +Mcu.IP25=TIM3 +Mcu.IP26=TIM6 +Mcu.IP27=TIM8 +Mcu.IP28=TIM12 +Mcu.IP29=UART7 +Mcu.IP3=DAC +Mcu.IP30=USB_DEVICE +Mcu.IP31=USB_OTG_FS +Mcu.IP4=DMA +Mcu.IP5=DMA2D +Mcu.IP6=ETH +Mcu.IP7=FATFS +Mcu.IP8=FMC +Mcu.IP9=FREERTOS +Mcu.IPNb=32 +Mcu.Name=STM32F769IGTx +Mcu.Package=LQFP176 +Mcu.Pin0=PE2 +Mcu.Pin1=PE3 +Mcu.Pin10=PI11 +Mcu.Pin100=PD0 +Mcu.Pin101=PD1 +Mcu.Pin102=PD2 +Mcu.Pin103=PD3 +Mcu.Pin104=PD4 +Mcu.Pin105=PD5 +Mcu.Pin106=PD7 +Mcu.Pin107=PG9 +Mcu.Pin108=PG10 +Mcu.Pin109=PG11 +Mcu.Pin11=PF0 +Mcu.Pin110=PG12 +Mcu.Pin111=PG13 +Mcu.Pin112=PG14 +Mcu.Pin113=PG15 +Mcu.Pin114=PB3 +Mcu.Pin115=PB4 +Mcu.Pin116=PB5 +Mcu.Pin117=PB6 +Mcu.Pin118=PB7 +Mcu.Pin119=PB8 +Mcu.Pin12=PF1 +Mcu.Pin120=PB9 +Mcu.Pin121=PE0 +Mcu.Pin122=PE1 +Mcu.Pin123=PI4 +Mcu.Pin124=PI5 +Mcu.Pin125=PI6 +Mcu.Pin126=PI7 +Mcu.Pin127=VP_ADC1_Vbat_Input +Mcu.Pin128=VP_CRC_VS_CRC +Mcu.Pin129=VP_DMA2D_VS_DMA2D +Mcu.Pin13=PF2 +Mcu.Pin130=VP_FATFS_VS_SDIO +Mcu.Pin131=VP_FREERTOS_VS_CMSIS_V1 +Mcu.Pin132=VP_IWDG_VS_IWDG +Mcu.Pin133=VP_JPEG_VS_JPEG +Mcu.Pin134=VP_LIBJPEG_VS_JPEGLIB +Mcu.Pin135=VP_LWIP_VS_Enabled +Mcu.Pin136=VP_RNG_VS_RNG +Mcu.Pin137=VP_RTC_VS_RTC_Activate +Mcu.Pin138=VP_SYS_VS_tim10 +Mcu.Pin139=VP_TIM6_VS_ClockSourceINT +Mcu.Pin14=PF3 +Mcu.Pin140=VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS +Mcu.Pin15=PF4 +Mcu.Pin16=PF5 +Mcu.Pin17=PF6 +Mcu.Pin18=PF7 +Mcu.Pin19=PF8 +Mcu.Pin2=PE4 +Mcu.Pin20=PF9 +Mcu.Pin21=PF10 +Mcu.Pin22=PH0/OSC_IN +Mcu.Pin23=PH1/OSC_OUT +Mcu.Pin24=PC0 +Mcu.Pin25=PC1 +Mcu.Pin26=PC2 +Mcu.Pin27=PC3 +Mcu.Pin28=PA0/WKUP +Mcu.Pin29=PA1 +Mcu.Pin3=PE5 +Mcu.Pin30=PA2 +Mcu.Pin31=PH2 +Mcu.Pin32=PH3 +Mcu.Pin33=PH4 +Mcu.Pin34=PH5 +Mcu.Pin35=PA3 +Mcu.Pin36=PA4 +Mcu.Pin37=PA5 +Mcu.Pin38=PA6 +Mcu.Pin39=PA7 +Mcu.Pin4=PE6 +Mcu.Pin40=PC4 +Mcu.Pin41=PC5 +Mcu.Pin42=PB0 +Mcu.Pin43=PB1 +Mcu.Pin44=PB2 +Mcu.Pin45=PF11 +Mcu.Pin46=PF12 +Mcu.Pin47=PF13 +Mcu.Pin48=PF14 +Mcu.Pin49=PF15 +Mcu.Pin5=PC13 +Mcu.Pin50=PG0 +Mcu.Pin51=PG1 +Mcu.Pin52=PE7 +Mcu.Pin53=PE8 +Mcu.Pin54=PE9 +Mcu.Pin55=PE10 +Mcu.Pin56=PE11 +Mcu.Pin57=PE12 +Mcu.Pin58=PE13 +Mcu.Pin59=PE14 +Mcu.Pin6=PC14/OSC32_IN +Mcu.Pin60=PE15 +Mcu.Pin61=PB10 +Mcu.Pin62=PB11 +Mcu.Pin63=PH6 +Mcu.Pin64=PH7 +Mcu.Pin65=PB12 +Mcu.Pin66=PB13 +Mcu.Pin67=PB14 +Mcu.Pin68=PB15 +Mcu.Pin69=PD8 +Mcu.Pin7=PC15/OSC32_OUT +Mcu.Pin70=PD9 +Mcu.Pin71=PD10 +Mcu.Pin72=PD11 +Mcu.Pin73=PD12 +Mcu.Pin74=PD13 +Mcu.Pin75=PD14 +Mcu.Pin76=PD15 +Mcu.Pin77=PG2 +Mcu.Pin78=PG3 +Mcu.Pin79=PG4 +Mcu.Pin8=PI9 +Mcu.Pin80=PG5 +Mcu.Pin81=PG6 +Mcu.Pin82=PG7 +Mcu.Pin83=PG8 +Mcu.Pin84=PC6 +Mcu.Pin85=PC7 +Mcu.Pin86=PC8 +Mcu.Pin87=PC9 +Mcu.Pin88=PA8 +Mcu.Pin89=PA9 +Mcu.Pin9=PI10 +Mcu.Pin90=PA11 +Mcu.Pin91=PA12 +Mcu.Pin92=PA13 +Mcu.Pin93=PI0 +Mcu.Pin94=PI1 +Mcu.Pin95=PI3 +Mcu.Pin96=PA14 +Mcu.Pin97=PA15 +Mcu.Pin98=PC11 +Mcu.Pin99=PC12 +Mcu.PinsNb=141 +Mcu.ThirdPartyNb=0 +Mcu.UserConstants= +Mcu.UserName=STM32F769IGTx +MxCube.Version=5.6.0 +MxDb.Version=DB.5.0.60 +NVIC.ADC_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.DMA1_Stream5_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.DMA2_Stream3_IRQn=true\:5\:0\:false\:false\:true\:true\:false\:true +NVIC.DMA2_Stream6_IRQn=true\:5\:0\:false\:false\:true\:true\:false\:true +NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.ETH_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.EXTI15_10_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.EXTI9_5_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.ForceEnableDMAVector=true +NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:true +NVIC.OTG_FS_IRQn=true\:5\:0\:false\:false\:true\:true\:false\:true +NVIC.PendSV_IRQn=true\:15\:0\:false\:false\:false\:true\:true\:false +NVIC.PriorityGroup=NVIC_PRIORITYGROUP_4 +NVIC.SDMMC1_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false +NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:false\:true\:true\:true +NVIC.TIM1_UP_TIM10_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:true +NVIC.TimeBase=TIM1_UP_TIM10_IRQn +NVIC.TimeBaseIP=TIM10 +NVIC.UART7_IRQn=true\:5\:0\:false\:false\:true\:true\:true\:true +NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false +PA0/WKUP.Mode=MII +PA0/WKUP.Signal=ETH_CRS +PA1.Mode=MII +PA1.Signal=ETH_RX_CLK +PA11.Mode=Device_Only +PA11.Signal=USB_OTG_FS_DM +PA12.Mode=Device_Only +PA12.Signal=USB_OTG_FS_DP +PA13.Mode=Trace_Asynchronous_SW +PA13.Signal=SYS_JTMS-SWDIO +PA14.Mode=Trace_Asynchronous_SW +PA14.Signal=SYS_JTCK-SWCLK +PA15.GPIOParameters=GPIO_PuPd,GPIO_Label +PA15.GPIO_Label=SPI5_IRQ +PA15.GPIO_PuPd=GPIO_PULLUP +PA15.Locked=true +PA15.Signal=GPXTI15 +PA2.Mode=MII +PA2.Signal=ETH_MDIO +PA3.Mode=MII +PA3.Signal=ETH_COL +PA4.Signal=COMP_DAC1_group +PA5.GPIOParameters=GPIO_Speed +PA5.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PA5.Mode=RGB565 +PA5.Signal=LTDC_R4 +PA6.GPIOParameters=GPIO_Speed +PA6.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PA6.Mode=RGB565 +PA6.Signal=LTDC_G2 +PA7.Mode=MII +PA7.Signal=ETH_RX_DV +PA8.GPIOParameters=GPIO_PuPd,GPIO_Label +PA8.GPIO_Label=SPI2_IRQ +PA8.GPIO_PuPd=GPIO_PULLUP +PA8.Locked=true +PA8.Signal=GPXTI8 +PA9.Mode=Activate_VBUS +PA9.Signal=USB_OTG_FS_VBUS +PB0.GPIOParameters=GPIO_Speed +PB0.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PB0.Mode=RGB565 +PB0.Signal=LTDC_R3 +PB1.GPIOParameters=GPIO_Speed +PB1.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PB1.Mode=RGB565 +PB1.Signal=LTDC_R6 +PB10.Mode=Activate Rx Err signal +PB10.Signal=ETH_RX_ER +PB11.Mode=MII +PB11.Signal=ETH_TX_EN +PB12.Mode=MII +PB12.Signal=ETH_TXD0 +PB13.Mode=MII +PB13.Signal=ETH_TXD1 +PB14.Mode=Full_Duplex_Master +PB14.Signal=SPI2_MISO +PB15.GPIOParameters=GPIO_Label +PB15.GPIO_Label=TFT_BRIGHTNESS +PB15.Signal=S_TIM12_CH2 +PB2.GPIOParameters=GPIO_PuPd,GPIO_Label +PB2.GPIO_Label=NFAULT +PB2.GPIO_PuPd=GPIO_PULLUP +PB2.Locked=true +PB2.Signal=GPIO_Input +PB3.Mode=Trace_Asynchronous_SW +PB3.Signal=SYS_JTDO-SWO +PB4.GPIOParameters=GPIO_Label +PB4.GPIO_Label=UART_TX/DOUT1 +PB4.Mode=Asynchronous +PB4.Signal=UART7_TX +PB5.GPIOParameters=GPIO_Label +PB5.GPIO_Label=DOUT2 +PB5.Locked=true +PB5.Signal=S_TIM3_CH2 +PB6.Mode=I2C +PB6.Signal=I2C1_SCL +PB7.Mode=I2C +PB7.Signal=I2C1_SDA +PB8.Mode=MII +PB8.Signal=ETH_TXD3 +PB9.GPIOParameters=GPIO_PuPd,GPIO_Label +PB9.GPIO_Label=SPI4_IRQ +PB9.GPIO_PuPd=GPIO_PULLUP +PB9.Locked=true +PB9.Signal=GPXTI9 +PC0.GPIOParameters=GPIO_Speed +PC0.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PC0.Mode=RGB565 +PC0.Signal=LTDC_R5 +PC1.Mode=MII +PC1.Signal=ETH_MDC +PC11.GPIOParameters=GPIO_PuPd,GPIO_Label,GPIO_ModeDefaultEXTI +PC11.GPIO_Label=SD_DETECT +PC11.GPIO_ModeDefaultEXTI=GPIO_MODE_IT_FALLING +PC11.GPIO_PuPd=GPIO_PULLUP +PC11.Locked=true +PC11.Signal=GPXTI11 +PC12.Mode=SD_1_bit +PC12.Signal=SDMMC1_CK +PC13.GPIOParameters=GPIO_Label +PC13.GPIO_Label=DIN2 +PC13.Locked=true +PC13.Signal=GPIO_Input +PC14/OSC32_IN.Locked=true +PC14/OSC32_IN.Mode=LSE-External-Oscillator +PC14/OSC32_IN.Signal=RCC_OSC32_IN +PC15/OSC32_OUT.Locked=true +PC15/OSC32_OUT.Mode=LSE-External-Oscillator +PC15/OSC32_OUT.Signal=RCC_OSC32_OUT +PC2.Mode=MII +PC2.Signal=ETH_TXD2 +PC3.Mode=MII +PC3.Signal=ETH_TX_CLK +PC4.Mode=MII +PC4.Signal=ETH_RXD0 +PC5.Mode=MII +PC5.Signal=ETH_RXD1 +PC6.GPIOParameters=GPIO_PuPd,GPIO_Label +PC6.GPIO_Label=ENC_A +PC6.GPIO_PuPd=GPIO_PULLUP +PC6.Signal=S_TIM8_CH1 +PC7.GPIOParameters=GPIO_PuPd,GPIO_Label +PC7.GPIO_Label=ENC_B +PC7.GPIO_PuPd=GPIO_PULLUP +PC7.Signal=S_TIM8_CH2 +PC8.Mode=SD_1_bit +PC8.Signal=SDMMC1_D0 +PC9.GPIOParameters=GPIO_Speed,GPIO_Label +PC9.GPIO_Label=MCLK_25 +PC9.GPIO_Speed=GPIO_SPEED_FREQ_VERY_HIGH +PC9.Mode=Clock-out-2 +PC9.Signal=RCC_MCO_2 +PD0.Signal=FMC_D2_DA2 +PD1.Signal=FMC_D3_DA3 +PD10.Signal=FMC_D15_DA15 +PD11.GPIOParameters=GPIO_Speed,GPIO_Label +PD11.GPIO_Label=SPI2_CSB +PD11.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PD11.Locked=true +PD11.Signal=GPIO_Output +PD12.GPIOParameters=GPIO_Speed,GPIO_Label +PD12.GPIO_Label=SPI2_CSA +PD12.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PD12.Locked=true +PD12.Signal=GPIO_Output +PD13.GPIOParameters=GPIO_PuPd,GPIO_Label +PD13.GPIO_Label=OE_SYNC +PD13.GPIO_PuPd=GPIO_NOPULL +PD13.Locked=true +PD13.Signal=GPIO_Output +PD14.Signal=FMC_D0_DA0 +PD15.Signal=FMC_D1_DA1 +PD2.Mode=SD_1_bit +PD2.Signal=SDMMC1_CMD +PD3.GPIOParameters=GPIO_Speed +PD3.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PD3.Mode=RGB565 +PD3.Signal=LTDC_G7 +PD4.GPIOParameters=GPIO_Label +PD4.GPIO_Label=USB_OTG_FS_OC +PD4.Locked=true +PD4.Signal=GPIO_Input +PD5.GPIOParameters=GPIO_Label +PD5.GPIO_Label=USB_OTG_FS_PSO +PD5.Locked=true +PD5.Signal=GPIO_Output +PD7.GPIOParameters=GPIO_Label +PD7.GPIO_Label=IRQ_TOUCH +PD7.Locked=true +PD7.Signal=GPIO_Input +PD8.Signal=FMC_D13_DA13 +PD9.Signal=FMC_D14_DA14 +PE0.Signal=FMC_NBL0 +PE1.Signal=FMC_NBL1 +PE10.Signal=FMC_D7_DA7 +PE11.Signal=FMC_D8_DA8 +PE12.Signal=FMC_D9_DA9 +PE13.Signal=FMC_D10_DA10 +PE14.Signal=FMC_D11_DA11 +PE15.Signal=FMC_D12_DA12 +PE2.Mode=Full_Duplex_Master +PE2.Signal=SPI4_SCK +PE3.GPIOParameters=GPIO_Speed,GPIO_Label +PE3.GPIO_Label=SPI4_CSA +PE3.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PE3.Locked=true +PE3.Signal=GPIO_Output +PE4.GPIOParameters=GPIO_Speed,GPIO_Label +PE4.GPIO_Label=SPI4_CSB +PE4.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PE4.Locked=true +PE4.Signal=GPIO_Output +PE5.Mode=Full_Duplex_Master +PE5.Signal=SPI4_MISO +PE6.Mode=Full_Duplex_Master +PE6.Signal=SPI4_MOSI +PE7.Signal=FMC_D4_DA4 +PE8.Signal=FMC_D5_DA5 +PE9.Signal=FMC_D6_DA6 +PF0.Signal=FMC_A0 +PF1.Signal=FMC_A1 +PF10.GPIOParameters=GPIO_Speed +PF10.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PF10.Mode=RGB565 +PF10.Signal=LTDC_DE +PF11.Signal=FMC_SDNRAS +PF12.Signal=FMC_A6 +PF13.Signal=FMC_A7 +PF14.Signal=FMC_A8 +PF15.Signal=FMC_A9 +PF2.Signal=FMC_A2 +PF3.Signal=FMC_A3 +PF4.Signal=FMC_A4 +PF5.Signal=FMC_A5 +PF6.GPIOParameters=GPIO_Label +PF6.GPIO_Label=UART_RX/DIN1 +PF6.Mode=Asynchronous +PF6.Signal=UART7_RX +PF7.Mode=Full_Duplex_Master +PF7.Signal=SPI5_SCK +PF8.Mode=Full_Duplex_Master +PF8.Signal=SPI5_MISO +PF9.Mode=Full_Duplex_Master +PF9.Signal=SPI5_MOSI +PG0.Signal=FMC_A10 +PG1.Signal=FMC_A11 +PG10.GPIOParameters=GPIO_Speed +PG10.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG10.Mode=RGB565 +PG10.Signal=LTDC_G3 +PG11.GPIOParameters=GPIO_Speed +PG11.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG11.Mode=RGB565 +PG11.Signal=LTDC_B3 +PG12.GPIOParameters=GPIO_Speed +PG12.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG12.Mode=RGB565 +PG12.Signal=LTDC_B4 +PG13.GPIOParameters=GPIO_Speed,GPIO_Label +PG13.GPIO_Label=SPI5_CSB +PG13.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG13.Locked=true +PG13.Signal=GPIO_Output +PG14.GPIOParameters=GPIO_Speed,GPIO_Label +PG14.GPIO_Label=SPI5_CSA +PG14.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG14.Locked=true +PG14.Signal=GPIO_Output +PG15.Signal=FMC_SDNCAS +PG2.GPIOParameters=GPIO_PuPd,GPIO_Label +PG2.GPIO_Label=PWR_DIRECT +PG2.GPIO_PuPd=GPIO_PULLDOWN +PG2.Locked=true +PG2.Signal=GPIO_Output +PG3.GPIOParameters=GPIO_PuPd,GPIO_Label +PG3.GPIO_Label=PWR_SSTART +PG3.GPIO_PuPd=GPIO_PULLDOWN +PG3.Locked=true +PG3.Signal=GPIO_Output +PG4.Signal=FMC_A14_BA0 +PG5.Signal=FMC_A15_BA1 +PG6.GPIOParameters=GPIO_Speed +PG6.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG6.Mode=RGB565 +PG6.Signal=LTDC_R7 +PG7.GPIOParameters=GPIO_Speed +PG7.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PG7.Mode=RGB565 +PG7.Signal=LTDC_CLK +PG8.Signal=FMC_SDCLK +PG9.GPIOParameters=GPIO_PuPd,GPIO_Label +PG9.GPIO_Label=USER_SW +PG9.GPIO_PuPd=GPIO_PULLUP +PG9.Locked=true +PG9.Signal=GPIO_Input +PH0/OSC_IN.Mode=HSE-External-Oscillator +PH0/OSC_IN.Signal=RCC_OSC_IN +PH1/OSC_OUT.Mode=HSE-External-Oscillator +PH1/OSC_OUT.Signal=RCC_OSC_OUT +PH2.Mode=SdramChipSelect1_1 +PH2.Signal=FMC_SDCKE0 +PH3.Mode=SdramChipSelect1_1 +PH3.Signal=FMC_SDNE0 +PH4.GPIOParameters=GPIO_Speed +PH4.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PH4.Mode=RGB565 +PH4.Signal=LTDC_G4 +PH5.Signal=FMC_SDNWE +PH6.Mode=MII +PH6.Signal=ETH_RXD2 +PH7.Mode=MII +PH7.Signal=ETH_RXD3 +PI0.GPIOParameters=GPIO_Speed +PI0.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI0.Mode=RGB565 +PI0.Signal=LTDC_G5 +PI1.Mode=Full_Duplex_Master +PI1.Signal=SPI2_SCK +PI10.GPIOParameters=GPIO_Speed +PI10.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI10.Mode=RGB565 +PI10.Signal=LTDC_HSYNC +PI11.GPIOParameters=GPIO_Speed +PI11.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI11.Mode=RGB565 +PI11.Signal=LTDC_G6 +PI3.Mode=Full_Duplex_Master +PI3.Signal=SPI2_MOSI +PI4.GPIOParameters=GPIO_PuPd,GPIO_Label +PI4.GPIO_Label=ENC_SW +PI4.GPIO_PuPd=GPIO_PULLUP +PI4.Locked=true +PI4.Signal=GPIO_Input +PI5.GPIOParameters=GPIO_Speed +PI5.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI5.Mode=RGB565 +PI5.Signal=LTDC_B5 +PI6.GPIOParameters=GPIO_Speed +PI6.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI6.Mode=RGB565 +PI6.Signal=LTDC_B6 +PI7.GPIOParameters=GPIO_Speed +PI7.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI7.Mode=RGB565 +PI7.Signal=LTDC_B7 +PI9.GPIOParameters=GPIO_Speed +PI9.GPIO_Speed=GPIO_SPEED_FREQ_HIGH +PI9.Mode=RGB565 +PI9.Signal=LTDC_VSYNC +PinOutPanel.RotationAngle=0 +ProjectManager.AskForMigrate=true +ProjectManager.BackupPrevious=false +ProjectManager.CompilerOptimize=3 +ProjectManager.ComputerToolchain=false +ProjectManager.CoupleFile=true +ProjectManager.CustomerFirmwarePackage= +ProjectManager.DefaultFWLocation=true +ProjectManager.DeletePrevious=true +ProjectManager.DeviceId=STM32F769IGTx +ProjectManager.FirmwarePackage=STM32Cube FW_F7 V1.16.0 +ProjectManager.FreePins=true +ProjectManager.HalAssertFull=true +ProjectManager.HeapSize=0x200 +ProjectManager.KeepUserCode=true +ProjectManager.LastFirmware=true +ProjectManager.LibraryCopy=1 +ProjectManager.MainLocation=Core/Src +ProjectManager.NoMain=true +ProjectManager.PreviousToolchain=STM32CubeIDE +ProjectManager.ProjectBuild=false +ProjectManager.ProjectFileName=bb3.ioc +ProjectManager.ProjectName=bb3 +ProjectManager.StackSize=0x400 +ProjectManager.TargetToolchain=STM32CubeIDE +ProjectManager.ToolChainLocation= +ProjectManager.UnderRoot=true +ProjectManager.functionlistsort=1-MX_GPIO_Init-GPIO-false-HAL-true,2-MX_DMA_Init-DMA-false-HAL-true,3-SystemClock_Config-RCC-false-HAL-false,4-MX_ADC1_Init-ADC1-false-HAL-true,5-MX_CORTEX_M7_Init-CORTEX_M7-false-HAL-true,6-MX_CRC_Init-CRC-false-HAL-true,7-MX_DAC_Init-DAC-false-HAL-true,8-MX_DMA2D_Init-DMA2D-false-HAL-true,9-MX_FMC_Init-FMC-false-HAL-true,10-MX_I2C1_Init-I2C1-false-HAL-true,11-MX_LTDC_Init-LTDC-false-HAL-true,12-MX_RNG_Init-RNG-false-HAL-true,13-MX_RTC_Init-RTC-false-HAL-true,14-MX_SDMMC1_SD_Init-SDMMC1-false-HAL-true,15-MX_SPI2_Init-SPI2-false-HAL-true,16-MX_SPI5_Init-SPI5-false-HAL-true,17-MX_TIM8_Init-TIM8-false-HAL-true,18-MX_TIM12_Init-TIM12-false-HAL-true,19-MX_SPI4_Init-SPI4-false-HAL-true,20-MX_LWIP_Init-LWIP-false-HAL-false,21-MX_TIM6_Init-TIM6-false-HAL-true,22-MX_FATFS_Init-FATFS-false-HAL-false,23-MX_UART7_Init-UART7-false-HAL-true,24-MX_USB_DEVICE_Init-USB_DEVICE-false-HAL-false,25-MX_IWDG_Init-IWDG-false-HAL-true,26-MX_LIBJPEG_Init-LIBJPEG-false-HAL-false,27-MX_JPEG_Init-JPEG-false-HAL-true +RCC.AHBFreq_Value=216000000 +RCC.APB1CLKDivider=RCC_HCLK_DIV4 +RCC.APB1Freq_Value=54000000 +RCC.APB1TimFreq_Value=108000000 +RCC.APB2CLKDivider=RCC_HCLK_DIV4 +RCC.APB2Freq_Value=54000000 +RCC.APB2TimFreq_Value=108000000 +RCC.CECFreq_Value=32786.88524590164 +RCC.CortexFreq_Value=216000000 +RCC.DFSDMAudioFreq_Value=100000000 +RCC.DFSDMFreq_Value=54000000 +RCC.DSIFreq_Value=62500000 +RCC.DSITXEscFreq_Value=15625000 +RCC.EnbaleCSS=true +RCC.EthernetFreq_Value=216000000 +RCC.FCLKCortexFreq_Value=216000000 +RCC.FamilyName=M +RCC.HCLKFreq_Value=216000000 +RCC.HSE_VALUE=25000000 +RCC.HSI_VALUE=16000000 +RCC.I2C1Freq_Value=54000000 +RCC.I2C2Freq_Value=54000000 +RCC.I2C3Freq_Value=54000000 +RCC.I2C4CLockSelection=RCC_I2C4CLKSOURCE_HSI +RCC.I2C4Freq_Value=16000000 +RCC.I2SFreq_Value=100000000 +RCC.IPParameters=AHBFreq_Value,APB1CLKDivider,APB1Freq_Value,APB1TimFreq_Value,APB2CLKDivider,APB2Freq_Value,APB2TimFreq_Value,CECFreq_Value,CortexFreq_Value,DFSDMAudioFreq_Value,DFSDMFreq_Value,DSIFreq_Value,DSITXEscFreq_Value,EnbaleCSS,EthernetFreq_Value,FCLKCortexFreq_Value,FamilyName,HCLKFreq_Value,HSE_VALUE,HSI_VALUE,I2C1Freq_Value,I2C2Freq_Value,I2C3Freq_Value,I2C4CLockSelection,I2C4Freq_Value,I2SFreq_Value,LCDTFTFreq_Value,LPTIM1Freq_Value,LSI_VALUE,MCO1PinFreq_Value,MCO2PinFreq_Value,PLLCLKFreq_Value,PLLDSIFreq_Value,PLLDSIVCOFreq_Value,PLLI2SN,PLLI2SPCLKFreq_Value,PLLI2SQCLKFreq_Value,PLLI2SRCLKFreq_Value,PLLI2SRoutputFreq_Value,PLLM,PLLN,PLLQ,PLLQCLKFreq_Value,PLLQoutputFreq_Value,PLLRFreq_Value,PLLSAIN,PLLSAIPCLKFreq_Value,PLLSAIQCLKFreq_Value,PLLSAIR,PLLSAIRCLKFreq_Value,PLLSAIRDiv,PLLSAIoutputFreq_Value,RCC_MCO1Source,RCC_MCO2Source,RCC_RTC_Clock_Source_FROM_HSE,RNGFreq_Value,RTCClockSelection,RTCFreq_Value,SAI1Freq_Value,SAI2Freq_Value,SDMMC2Freq_Value,SDMMCClockSelection,SDMMCFreq_Value,SPDIFRXFreq_Value,SYSCLKFreq_VALUE,SYSCLKSource,UART4Freq_Value,UART5Freq_Value,UART7Freq_Value,UART8Freq_Value,USART1Freq_Value,USART2Freq_Value,USART3Freq_Value,USART6Freq_Value,USBFreq_Value,VCOI2SOutputFreq_Value,VCOInputFreq_Value,VCOOutputFreq_Value,VCOSAIOutputFreq_Value +RCC.LCDTFTFreq_Value=10000000 +RCC.LPTIM1Freq_Value=54000000 +RCC.LSI_VALUE=32000 +RCC.MCO1PinFreq_Value=25000000 +RCC.MCO2PinFreq_Value=25000000 +RCC.PLLCLKFreq_Value=216000000 +RCC.PLLDSIFreq_Value=500000000 +RCC.PLLDSIVCOFreq_Value=1000000000 +RCC.PLLI2SN=200 +RCC.PLLI2SPCLKFreq_Value=100000000 +RCC.PLLI2SQCLKFreq_Value=100000000 +RCC.PLLI2SRCLKFreq_Value=100000000 +RCC.PLLI2SRoutputFreq_Value=100000000 +RCC.PLLM=25 +RCC.PLLN=432 +RCC.PLLQ=9 +RCC.PLLQCLKFreq_Value=48000000 +RCC.PLLQoutputFreq_Value=48000000 +RCC.PLLRFreq_Value=216000000 +RCC.PLLSAIN=200 +RCC.PLLSAIPCLKFreq_Value=100000000 +RCC.PLLSAIQCLKFreq_Value=100000000 +RCC.PLLSAIR=5 +RCC.PLLSAIRCLKFreq_Value=40000000 +RCC.PLLSAIRDiv=RCC_PLLSAIDIVR_4 +RCC.PLLSAIoutputFreq_Value=100000000 +RCC.RCC_MCO1Source=RCC_MCO1SOURCE_HSE +RCC.RCC_MCO2Source=RCC_MCO2SOURCE_HSE +RCC.RCC_RTC_Clock_Source_FROM_HSE=RCC_RTCCLKSOURCE_HSE_DIV25 +RCC.RNGFreq_Value=48000000 +RCC.RTCClockSelection=RCC_RTCCLKSOURCE_LSE +RCC.RTCFreq_Value=32768 +RCC.SAI1Freq_Value=100000000 +RCC.SAI2Freq_Value=100000000 +RCC.SDMMC2Freq_Value=216000000 +RCC.SDMMCClockSelection=RCC_SDMMC1CLKSOURCE_CLK48 +RCC.SDMMCFreq_Value=48000000 +RCC.SPDIFRXFreq_Value=100000000 +RCC.SYSCLKFreq_VALUE=216000000 +RCC.SYSCLKSource=RCC_SYSCLKSOURCE_PLLCLK +RCC.UART4Freq_Value=54000000 +RCC.UART5Freq_Value=54000000 +RCC.UART7Freq_Value=54000000 +RCC.UART8Freq_Value=54000000 +RCC.USART1Freq_Value=54000000 +RCC.USART2Freq_Value=54000000 +RCC.USART3Freq_Value=54000000 +RCC.USART6Freq_Value=54000000 +RCC.USBFreq_Value=48000000 +RCC.VCOI2SOutputFreq_Value=200000000 +RCC.VCOInputFreq_Value=1000000 +RCC.VCOOutputFreq_Value=432000000 +RCC.VCOSAIOutputFreq_Value=200000000 +RTC.AsynchPrediv=127 +RTC.IPParameters=AsynchPrediv,SynchPrediv +RTC.SynchPrediv=255 +SDMMC1.ClockDiv=0 +SDMMC1.HardwareFlowControl=SDMMC_HARDWARE_FLOW_CONTROL_ENABLE +SDMMC1.IPParameters=HardwareFlowControl,ClockDiv +SH.COMP_DAC1_group.0=DAC_OUT1,DAC_OUT1 +SH.COMP_DAC1_group.ConfNb=1 +SH.FMC_A0.0=FMC_A0,12b-sda1 +SH.FMC_A0.ConfNb=1 +SH.FMC_A1.0=FMC_A1,12b-sda1 +SH.FMC_A1.ConfNb=1 +SH.FMC_A10.0=FMC_A10,12b-sda1 +SH.FMC_A10.ConfNb=1 +SH.FMC_A11.0=FMC_A11,12b-sda1 +SH.FMC_A11.ConfNb=1 +SH.FMC_A14_BA0.0=FMC_BA0,FourSdramBanks1 +SH.FMC_A14_BA0.ConfNb=1 +SH.FMC_A15_BA1.0=FMC_BA1,FourSdramBanks1 +SH.FMC_A15_BA1.ConfNb=1 +SH.FMC_A2.0=FMC_A2,12b-sda1 +SH.FMC_A2.ConfNb=1 +SH.FMC_A3.0=FMC_A3,12b-sda1 +SH.FMC_A3.ConfNb=1 +SH.FMC_A4.0=FMC_A4,12b-sda1 +SH.FMC_A4.ConfNb=1 +SH.FMC_A5.0=FMC_A5,12b-sda1 +SH.FMC_A5.ConfNb=1 +SH.FMC_A6.0=FMC_A6,12b-sda1 +SH.FMC_A6.ConfNb=1 +SH.FMC_A7.0=FMC_A7,12b-sda1 +SH.FMC_A7.ConfNb=1 +SH.FMC_A8.0=FMC_A8,12b-sda1 +SH.FMC_A8.ConfNb=1 +SH.FMC_A9.0=FMC_A9,12b-sda1 +SH.FMC_A9.ConfNb=1 +SH.FMC_D0_DA0.0=FMC_D0,sd-16b-d1 +SH.FMC_D0_DA0.ConfNb=1 +SH.FMC_D10_DA10.0=FMC_D10,sd-16b-d1 +SH.FMC_D10_DA10.ConfNb=1 +SH.FMC_D11_DA11.0=FMC_D11,sd-16b-d1 +SH.FMC_D11_DA11.ConfNb=1 +SH.FMC_D12_DA12.0=FMC_D12,sd-16b-d1 +SH.FMC_D12_DA12.ConfNb=1 +SH.FMC_D13_DA13.0=FMC_D13,sd-16b-d1 +SH.FMC_D13_DA13.ConfNb=1 +SH.FMC_D14_DA14.0=FMC_D14,sd-16b-d1 +SH.FMC_D14_DA14.ConfNb=1 +SH.FMC_D15_DA15.0=FMC_D15,sd-16b-d1 +SH.FMC_D15_DA15.ConfNb=1 +SH.FMC_D1_DA1.0=FMC_D1,sd-16b-d1 +SH.FMC_D1_DA1.ConfNb=1 +SH.FMC_D2_DA2.0=FMC_D2,sd-16b-d1 +SH.FMC_D2_DA2.ConfNb=1 +SH.FMC_D3_DA3.0=FMC_D3,sd-16b-d1 +SH.FMC_D3_DA3.ConfNb=1 +SH.FMC_D4_DA4.0=FMC_D4,sd-16b-d1 +SH.FMC_D4_DA4.ConfNb=1 +SH.FMC_D5_DA5.0=FMC_D5,sd-16b-d1 +SH.FMC_D5_DA5.ConfNb=1 +SH.FMC_D6_DA6.0=FMC_D6,sd-16b-d1 +SH.FMC_D6_DA6.ConfNb=1 +SH.FMC_D7_DA7.0=FMC_D7,sd-16b-d1 +SH.FMC_D7_DA7.ConfNb=1 +SH.FMC_D8_DA8.0=FMC_D8,sd-16b-d1 +SH.FMC_D8_DA8.ConfNb=1 +SH.FMC_D9_DA9.0=FMC_D9,sd-16b-d1 +SH.FMC_D9_DA9.ConfNb=1 +SH.FMC_NBL0.0=FMC_NBL0,Sd2ByteEnable1 +SH.FMC_NBL0.ConfNb=1 +SH.FMC_NBL1.0=FMC_NBL1,Sd2ByteEnable1 +SH.FMC_NBL1.ConfNb=1 +SH.FMC_SDCLK.0=FMC_SDCLK,12b-sda1 +SH.FMC_SDCLK.ConfNb=1 +SH.FMC_SDNCAS.0=FMC_SDNCAS,12b-sda1 +SH.FMC_SDNCAS.ConfNb=1 +SH.FMC_SDNRAS.0=FMC_SDNRAS,12b-sda1 +SH.FMC_SDNRAS.ConfNb=1 +SH.FMC_SDNWE.0=FMC_SDNWE,12b-sda1 +SH.FMC_SDNWE.ConfNb=1 +SH.GPXTI11.0=GPIO_EXTI11 +SH.GPXTI11.ConfNb=1 +SH.GPXTI15.0=GPIO_EXTI15 +SH.GPXTI15.ConfNb=1 +SH.GPXTI8.0=GPIO_EXTI8 +SH.GPXTI8.ConfNb=1 +SH.GPXTI9.0=GPIO_EXTI9 +SH.GPXTI9.ConfNb=1 +SH.S_TIM12_CH2.0=TIM12_CH2,PWM Generation2 CH2 +SH.S_TIM12_CH2.ConfNb=1 +SH.S_TIM3_CH2.0=TIM3_CH2,PWM Generation2 CH2 +SH.S_TIM3_CH2.ConfNb=1 +SH.S_TIM8_CH1.0=TIM8_CH1,Encoder_Interface +SH.S_TIM8_CH1.ConfNb=1 +SH.S_TIM8_CH2.0=TIM8_CH2,Encoder_Interface +SH.S_TIM8_CH2.ConfNb=1 +SPI2.BaudRatePrescaler=SPI_BAUDRATEPRESCALER_16 +SPI2.CalculateBaudRate=3.375 MBits/s +SPI2.DataSize=SPI_DATASIZE_8BIT +SPI2.Direction=SPI_DIRECTION_2LINES +SPI2.IPParameters=VirtualType,Mode,Direction,CalculateBaudRate,BaudRatePrescaler,DataSize +SPI2.Mode=SPI_MODE_MASTER +SPI2.VirtualType=VM_MASTER +SPI4.BaudRatePrescaler=SPI_BAUDRATEPRESCALER_16 +SPI4.CalculateBaudRate=3.375 MBits/s +SPI4.DataSize=SPI_DATASIZE_8BIT +SPI4.Direction=SPI_DIRECTION_2LINES +SPI4.IPParameters=VirtualType,Mode,Direction,CalculateBaudRate,BaudRatePrescaler,DataSize +SPI4.Mode=SPI_MODE_MASTER +SPI4.VirtualType=VM_MASTER +SPI5.BaudRatePrescaler=SPI_BAUDRATEPRESCALER_16 +SPI5.CalculateBaudRate=3.375 MBits/s +SPI5.DataSize=SPI_DATASIZE_8BIT +SPI5.Direction=SPI_DIRECTION_2LINES +SPI5.IPParameters=VirtualType,Mode,Direction,CalculateBaudRate,BaudRatePrescaler,DataSize +SPI5.Mode=SPI_MODE_MASTER +SPI5.VirtualType=VM_MASTER +TIM12.Channel-PWM\ Generation2\ CH2=TIM_CHANNEL_2 +TIM12.IPParameters=Channel-PWM Generation2 CH2,Prescaler,Period,Pulse-PWM Generation2 CH2 +TIM12.Period=999 +TIM12.Prescaler=3 +TIM12.Pulse-PWM\ Generation2\ CH2=499 +TIM3.Channel-PWM\ Generation2\ CH2=TIM_CHANNEL_2 +TIM3.IPParameters=Channel-PWM Generation2 CH2,Prescaler,Period +TIM3.Period=1000 +TIM3.Prescaler=100 +TIM6.AutoReloadPreload=TIM_AUTORELOAD_PRELOAD_ENABLE +TIM6.IPParameters=Period,AutoReloadPreload,TIM_MasterOutputTrigger +TIM6.Period=2249 +TIM6.TIM_MasterOutputTrigger=TIM_TRGO_UPDATE +TIM8.EncoderMode=TIM_ENCODERMODE_TI12 +TIM8.IC1Filter=5 +TIM8.IC1Polarity=TIM_ICPOLARITY_FALLING +TIM8.IC2Filter=5 +TIM8.IC2Polarity=TIM_ICPOLARITY_FALLING +TIM8.IPParameters=Period,IC1Polarity,IC1Filter,IC2Filter,IC2Polarity,EncoderMode +TIM8.Period=65535 +UART7.IPParameters=WordLength,Parity +UART7.Parity=PARITY_EVEN +UART7.WordLength=WORDLENGTH_9B +USB_DEVICE.CLASS_NAME_FS=CDC +USB_DEVICE.IPParameters=VirtualMode-CDC_FS,VirtualModeFS,CLASS_NAME_FS,VID-CDC_FS,MANUFACTURER_STRING-CDC_FS,PID_CDC_FS,PRODUCT_STRING_CDC_FS +USB_DEVICE.MANUFACTURER_STRING-CDC_FS=EEZ +USB_DEVICE.PID_CDC_FS=8216 +USB_DEVICE.PRODUCT_STRING_CDC_FS=MCU Virtual ComPort +USB_DEVICE.VID-CDC_FS=4617 +USB_DEVICE.VirtualMode-CDC_FS=Cdc +USB_DEVICE.VirtualModeFS=Cdc_FS +USB_OTG_FS.IPParameters=VirtualMode +USB_OTG_FS.VirtualMode=Device_Only +VP_ADC1_Vbat_Input.Mode=IN-Vbat +VP_ADC1_Vbat_Input.Signal=ADC1_Vbat_Input +VP_CRC_VS_CRC.Mode=CRC_Activate +VP_CRC_VS_CRC.Signal=CRC_VS_CRC +VP_DMA2D_VS_DMA2D.Mode=DMA2D_Activate +VP_DMA2D_VS_DMA2D.Signal=DMA2D_VS_DMA2D +VP_FATFS_VS_SDIO.Mode=SDIO +VP_FATFS_VS_SDIO.Signal=FATFS_VS_SDIO +VP_FREERTOS_VS_CMSIS_V1.Mode=CMSIS_V1 +VP_FREERTOS_VS_CMSIS_V1.Signal=FREERTOS_VS_CMSIS_V1 +VP_IWDG_VS_IWDG.Mode=IWDG_Activate +VP_IWDG_VS_IWDG.Signal=IWDG_VS_IWDG +VP_JPEG_VS_JPEG.Mode=JPEG_Activate +VP_JPEG_VS_JPEG.Signal=JPEG_VS_JPEG +VP_LIBJPEG_VS_JPEGLIB.Mode=Enabled +VP_LIBJPEG_VS_JPEGLIB.Signal=LIBJPEG_VS_JPEGLIB +VP_LWIP_VS_Enabled.Mode=Enabled +VP_LWIP_VS_Enabled.Signal=LWIP_VS_Enabled +VP_RNG_VS_RNG.Mode=RNG_Activate +VP_RNG_VS_RNG.Signal=RNG_VS_RNG +VP_RTC_VS_RTC_Activate.Mode=RTC_Enabled +VP_RTC_VS_RTC_Activate.Signal=RTC_VS_RTC_Activate +VP_SYS_VS_tim10.Mode=TIM10 +VP_SYS_VS_tim10.Signal=SYS_VS_tim10 +VP_TIM6_VS_ClockSourceINT.Mode=Enable_Timer +VP_TIM6_VS_ClockSourceINT.Signal=TIM6_VS_ClockSourceINT +VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS.Mode=CDC_FS +VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS.Signal=USB_DEVICE_VS_USB_DEVICE_CDC_FS +board=EEZ BB3 MCU board v0.3 +isbadioc=false